• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.minimum函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.minimum函数的典型用法代码示例。如果您正苦于以下问题:Python minimum函数的具体用法?Python minimum怎么用?Python minimum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了minimum函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __PlotWeightedAtlas

    def __PlotWeightedAtlas(self, sortedfunctiontuples, weightdict):
        # initialize the data
        median = self.__Curves[sortedfunctiontuples[0][0]]
        minimum = np.Inf + np.zeros(len(median))
        maximum = -np.Inf + np.zeros(len(median))
        outliers = []
        cumulativeWeight = weightdict[sortedfunctiontuples[0][0]]
        i = 0
        # determine the functions in inter quartile range (IQR)
        while cumulativeWeight < .5:
            maximum = np.maximum(
                maximum, self.__Curves[sortedfunctiontuples[i][0]])
            minimum = np.minimum(
                minimum, self.__Curves[sortedfunctiontuples[i][0]])
            cumulativeWeight += weightdict[sortedfunctiontuples[i][0]]
            i += 1

        upperfence = maximum[:]
        lowerfence = minimum[:]
        fences = AtlasMath.BandDepth.GenerateFences(minimum, maximum, median)
        # Determine the functions in the 99.3% confidence interval
        while cumulativeWeight < .993:
            upperfence = np.maximum(
                upperfence, self.__Curves[sortedfunctiontuples[i][0]])
            lowerfence = np.minimum(
                lowerfence, self.__Curves[sortedfunctiontuples[i][0]])
            cumulativeWeight += weightdict[sortedfunctiontuples[i][0]]
            i += 1
        fences = [
            np.minimum(upperfence, fences[0]), np.maximum(lowerfence, fences[1])]
        # the left over functions are outliers
        for pair in sortedfunctiontuples[i:]:
            outliers.append(self.__Curves[pair[0]])
        self.__PlotLines(median, maximum, minimum, outliers, fences)
        return
开发者ID:KitwareMedical,项目名称:AtlasBuilder,代码行数:35,代码来源:AtlasBuilder.py


示例2: process_chunk

    def process_chunk(self, t0, t1, intensity, weights, pp_intensity, pp_weights):
        # Loop over intensity/weights in chunks of size v1_chunk
        for ichunk in xrange(0, self.nt_chunk, self.v1_chunk):
            for frequency in xrange(self.nfreq):
                # Calculate the v1 for each frequency
                self.v1_tmp[frequency] =  self._v1(intensity[frequency, ichunk:ichunk+self.v1_chunk], weights[frequency, ichunk:ichunk+self.v1_chunk])

            # Once v1s have been calculated for each frequency, update the weights and running variance
            non_zero_v1 = self.v1_tmp != 0
            zero_v1 = np.logical_not(non_zero_v1)

            # For nonzero (successful) v1s, increase the weights (if possible) and update the running variance
            self.running_weights[non_zero_v1] = np.minimum(2.0, self.running_weights[non_zero_v1] + self.w_clamp)
            self.v1_tmp[non_zero_v1] = np.minimum((1-self.var_weight) * self.running_var[non_zero_v1] + self.var_weight * self.v1_tmp[non_zero_v1], 
                                                  self.running_var[non_zero_v1] + self.var_clamp_add + self.running_var[non_zero_v1] * self.var_clamp_mult)
            self.v1_tmp[non_zero_v1] = np.maximum(self.v1_tmp[non_zero_v1], self.running_var[non_zero_v1] - self.var_clamp_add - self.running_var[non_zero_v1] * self.var_clamp_mult)
            self.running_var[non_zero_v1] = self.v1_tmp[non_zero_v1]

            # For unsuccessful v1s, decrease the weights (if possible) and do not modify the running variance 
            self.running_weights[zero_v1] = np.maximum(0, self.running_weights[zero_v1] - self.w_clamp)
            
            # Mask fill!
            intensity_valid = (weights[:, ichunk:ichunk+self.v1_chunk] > self.w_cutoff)
            rand_intensity = np.random.standard_normal(size=intensity[:, ichunk:ichunk+self.v1_chunk].shape)
            for (ifreq,v) in enumerate(self.running_var):
                if v > 0.0:
                    rand_intensity[ifreq, :] *= v**0.5
            intensity[:, ichunk:ichunk+self.v1_chunk] = np.where(intensity_valid, intensity[:, ichunk:ichunk+self.v1_chunk], rand_intensity)
            weights[:, ichunk:ichunk+self.v1_chunk] = np.repeat(self.running_weights, self.v1_chunk).reshape(self.nfreq, self.v1_chunk)
开发者ID:kmsmith137,项目名称:rf_pipelines,代码行数:29,代码来源:online_mask_filler.py


示例3: util_analysis

def util_analysis(arrps):

    cpu_u, cpu_u_orig, dead_miss_orig, dead_miss = [], [], [], []
    for ix, arrp in enumerate(arrps):

        original_deadline_misses = len(arrp[(arrp['abs_deadline'] - arrp['abs_start'] < arrp['original_duration'])])*100.0/len(arrp)
        deadline_misses = len(arrp[arrp['miss']==1])*100.0/len(arrp)

        sum_my_offload_task_dur = sum(arrps[(ix+1)%3]['dur_offload'])

        exp_dur = arrp['abs_end'][-1] - arrp['abs_start'][0]
        original_cpu_util = sum(np.minimum(arrp['original_duration'], arrp['abs_deadline'] - arrp['abs_start']))*100.0/exp_dur
        cpu_util = (sum_my_offload_task_dur + sum(np.minimum(arrp['duration'], arrp['abs_deadline'] - arrp['abs_start'])))*100.0/exp_dur

        print 'CPU %d, Util %2.2f --> %2.2f, Deadline miss %2.5f --> %2.5f'%(ix, original_cpu_util, cpu_util, original_deadline_misses, deadline_misses)

        cpu_u_orig.append(original_cpu_util)
        cpu_u.append(cpu_util)
        dead_miss_orig.append(original_deadline_misses)
        dead_miss.append(deadline_misses)


    result = [cpu_u, cpu_u_orig, dead_miss_orig, dead_miss]

    return result
开发者ID:gkchai,项目名称:garud,代码行数:25,代码来源:gd_stats.py


示例4: reflective_transformation

def reflective_transformation(y, lb, ub):
    """Compute reflective transformation and its gradient."""
    if in_bounds(y, lb, ub):
        return y, np.ones_like(y)

    lb_finite = np.isfinite(lb)
    ub_finite = np.isfinite(ub)

    x = y.copy()
    g_negative = np.zeros_like(y, dtype=bool)

    mask = lb_finite & ~ub_finite
    x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])
    g_negative[mask] = y[mask] < lb[mask]

    mask = ~lb_finite & ub_finite
    x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])
    g_negative[mask] = y[mask] > ub[mask]

    mask = lb_finite & ub_finite
    d = ub - lb
    t = np.remainder(y[mask] - lb[mask], 2 * d[mask])
    x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)
    g_negative[mask] = t > d[mask]

    g = np.ones_like(y)
    g[g_negative] = -1

    return x, g
开发者ID:MechCoder,项目名称:scipy,代码行数:29,代码来源:common.py


示例5: compute_overlap

def compute_overlap(a, b):
    """
    Parameters
    ----------
    a: (N, 4) ndarray of float
    b: (K, 4) ndarray of float
    Returns
    -------
    overlaps: (N, K) ndarray of overlap between boxes and query_boxes
    """
    area = (b[:, 2] - b[:, 0] + 1) * (b[:, 3] - b[:, 1] + 1)

    iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0]) + 1
    ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1]) + 1

    iw = np.maximum(iw, 0)
    ih = np.maximum(ih, 0)

    ua = np.expand_dims((a[:, 2] - a[:, 0] + 1) * (a[:, 3] - a[:, 1] + 1), axis=1) + area - iw * ih

    ua = np.maximum(ua, np.finfo(float).eps)

    intersection = iw * ih

    return intersection / ua
开发者ID:jodiexyx,项目名称:rob599,代码行数:25,代码来源:anchors.py


示例6: nms2d

def nms2d(boxes, overlap=0.3):
    """Compute the nms given a set of scored boxes,
    as numpy array with 5 columns <x1> <y1> <x2> <y2> <score>
    return the indices of the tubelets to keep
    """

    if boxes.size == 0:
        return np.array([],dtype=np.int32)

    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]

    scores = boxes[:, 4]
    areas = (x2-x1+1) * (y2-y1+1)
    I = np.argsort(scores)
    indices = np.zeros(scores.shape, dtype=np.int32)

    counter = 0
    while I.size > 0:
        i = I[-1]
        indices[counter] = i
        counter += 1

        xx1 = np.maximum(x1[i],x1[I[:-1]])
        yy1 = np.maximum(y1[i],y1[I[:-1]])
        xx2 = np.minimum(x2[i],x2[I[:-1]])
        yy2 = np.minimum(y2[i],y2[I[:-1]])

        inter = np.maximum(0.0, xx2 - xx1 + 1) * np.maximum(0.0, yy2 - yy1 + 1)
        iou = inter / (areas[i] + areas[I[:-1]] - inter)
        I = I[np.where(iou <= overlap)[0]]

    return indices[:counter]
开发者ID:yuta1125tp,项目名称:Action-Tubelet-Detection-in-AVA,代码行数:35,代码来源:ACT_utils.py


示例7: clip_upper

def clip_upper(arr,upper_bound):
    """
    In-place, one-sided version of numpy.clip().

    i.e. numpy.clip(arr,a_max=upper_bound,out=arr) if it existed.
    """
    minimum(arr,upper_bound,arr)
开发者ID:ioam,项目名称:svn-history,代码行数:7,代码来源:arrayutil.py


示例8: connect_extrema

def connect_extrema(im_pos, target, markers, visualize=False):
	'''
	im_pos : XYZ positions of each point in image formation (n x m x 3)
	'''
	height, width,_ = im_pos.shape
	centroid = np.array(target)

	im_pos = np.ascontiguousarray(im_pos.astype(np.int16))
	cost_map = np.ascontiguousarray(np.zeros([height, width], dtype=np.uint16))

	extrema = dgn.geodesic_map_MPI(cost_map, im_pos, np.array(centroid, dtype=np.int16), 1, 1)
	cost_map = extrema[-1]

	trails = []
	for m in markers:
		trail = dgn.geodesic_trail(cost_map.copy()+(32000*(im_pos[:,:,2]==0)).astype(np.uint16), np.array(m, dtype=np.int16))
		trails += [trail.copy()]
	if visualize:
		cost_map = deepcopy(cost_map)
		circ = circle(markers[0][0],markers[0][1], 5)
		circ = np.array([np.minimum(circ[0], height-1), np.minimum(circ[1], width-1)])
		circ = np.array([np.maximum(circ[0], 0), np.maximum(circ[1], 0)])
		cost_map[circ[0], circ[1]] = 0
		for i,t in enumerate(trails[1:]):
			# embed()
			cost_map[t[:,0], t[:,1]] = 0
			circ = circle(markers[i+1][0],markers[i+1][1], 5)
			circ = np.array([np.minimum(circ[0], height-1), np.minimum(circ[1], width-1)])
			circ = np.array([np.maximum(circ[0], 0), np.maximum(circ[1], 0)])
			cost_map[circ[0], circ[1]] = 0
		return trails, cost_map
	else:
		return trails
开发者ID:MerDane,项目名称:pyKinectTools,代码行数:33,代码来源:GeodesicSkeleton.py


示例9: test_get_output_for

    def test_get_output_for(self, ParametricRectifierLayer, init_alpha):
        input_shape = (3, 3, 28, 28)
        # random input tensor
        input = np.random.randn(*input_shape).astype(theano.config.floatX)

        # default: alphas shared only along 2nd axis
        layer = ParametricRectifierLayer(input_shape, alpha=init_alpha)
        alpha_v = layer.alpha.get_value()
        expected = np.maximum(input, 0) + np.minimum(input, 0) * \
            alpha_v[None, :, None, None]
        assert np.allclose(layer.get_output_for(input).eval(), expected)

        # scalar alpha
        layer = ParametricRectifierLayer(input_shape, alpha=init_alpha,
                                         shared_axes='all')
        alpha_v = layer.alpha.get_value()
        expected = np.maximum(input, 0) + np.minimum(input, 0) * alpha_v
        assert np.allclose(layer.get_output_for(input).eval(), expected)

        # alphas shared over the 1st axis
        layer = ParametricRectifierLayer(input_shape, alpha=init_alpha,
                                         shared_axes=0)
        alpha_v = layer.alpha.get_value()
        expected = np.maximum(input, 0) + np.minimum(input, 0) * \
            alpha_v[None, :, :, :]
        assert np.allclose(layer.get_output_for(input).eval(), expected)

        # alphas shared over the 1st and 4th axes
        layer = ParametricRectifierLayer(input_shape, shared_axes=(0, 3),
                                         alpha=init_alpha)
        alpha_v = layer.alpha.get_value()
        expected = np.maximum(input, 0) + np.minimum(input, 0) * \
            alpha_v[None, :, :, None]
        assert np.allclose(layer.get_output_for(input).eval(), expected)
开发者ID:colinfang,项目名称:Lasagne,代码行数:34,代码来源:test_special.py


示例10: onmouse

def onmouse(event, x, y, flags, param):
    global selection, drag_start, tracking_state, show_backproj, down_x, down_y, selcFrame
    global mouseX, mouseY, trackBoxShow
    x, y = np.int16([x, y]) #[sic] BUG
    mouseX = x
    mouseY = y
    if event == cv2.EVENT_LBUTTONDOWN:
        down_x = x
        down_y = y
        drag_start = (x, y)
        tracking_state = 0
        trackBoxShow = True
    if event == cv2.EVENT_LBUTTONUP:
        trackBoxShow = False
    if drag_start:
        if flags & cv2.EVENT_FLAG_LBUTTON:
            h, w = selcFrame.shape[:2]
            xo, yo = drag_start
            x0, y0 = np.maximum(0, np.minimum([xo, yo], [x, y]))
            x1, y1 = np.minimum([w, h], np.maximum([xo, yo], [x, y]))
            selection = None
            if x1-x0 > 0 and y1-y0 > 0:
                selection = (x0, y0, x1, y1)
        else:
            drag_start = None
            if selection is not None:
                tracking_state = 1
开发者ID:Ladvien,项目名称:Overlord,代码行数:27,代码来源:overlord.py


示例11: __getitem__

    def __getitem__(self, index):

        if self._representation == 'mv':
            representation_idx = 1
        elif self._representation == 'residual':
            representation_idx = 2
        else:
            representation_idx = 0


        if self._is_train:
            video_path, label, num_frames = random.choice(self._video_list)
        else:
            video_path, label, num_frames = self._video_list[index]

        frames = []
        for seg in range(self._num_segments):

            if self._is_train:
                gop_index, gop_pos = self._get_train_frame_index(num_frames, seg)
            else:
                gop_index, gop_pos = self._get_test_frame_index(num_frames, seg)

            img = load(video_path, gop_index, gop_pos,
                       representation_idx, self._accumulate)

            if img is None:
                print('Error: loading video %s failed.' % video_path)
                img = np.zeros((256, 256, 2)) if self._representation == 'mv' else np.zeros((256, 256, 3))
            else:
                if self._representation == 'mv':
                    img = clip_and_scale(img, 20)
                    img += 128
                    img = (np.minimum(np.maximum(img, 0), 255)).astype(np.uint8)
                elif self._representation == 'residual':
                    img += 128
                    img = (np.minimum(np.maximum(img, 0), 255)).astype(np.uint8)

            if self._representation == 'iframe':
                img = color_aug(img)

                # BGR to RGB. (PyTorch uses RGB according to doc.)
                img = img[..., ::-1]

            frames.append(img)

        frames = self._transform(frames)

        frames = np.array(frames)
        frames = np.transpose(frames, (0, 3, 1, 2))
        input = torch.from_numpy(frames).float() / 255.0

        if self._representation == 'iframe':
            input = (input - self._input_mean) / self._input_std
        elif self._representation == 'residual':
            input = (input - 0.5) / self._input_std
        elif self._representation == 'mv':
            input = (input - 0.5)

        return input, label
开发者ID:baiyancheng20,项目名称:pytorch-coviar,代码行数:60,代码来源:dataset.py


示例12: calculate_coeff_proratisation

    def calculate_coeff_proratisation(self, info_ind, trim_wage_regime, trim_wage_all):
        ''' Calcul du coefficient de proratisation '''
        
        def _assurance_corrigee(trim_regime, agem):
            ''' 
            Deux types de corrections :
            - correction de 1948-1982
            - Détermination de la durée d'assurance corrigée introduite par la réforme Boulin
            (majoration quand départ à la retraite après 65 ans) à partir de 1983'''
            P = reduce(getattr, self.param_name.split('.'), self.P)
            
            if P.prorat.dispositif == 1:
                correction = (P.prorat.n_trim - trim_regime)/2
                return trim_regime + correction
            elif P.prorat.dispositif == 2:
                age_taux_plein = P.decote.age_null
                trim_majo = divide(agem - age_taux_plein, 3)*(agem > age_taux_plein)
                elig_majo = (trim_regime < P.prorat.n_trim)
                correction = trim_regime*P.tx_maj*trim_majo*elig_majo
                return trim_regime + correction
            else:
                return trim_regime

        P =  reduce(getattr, self.param_name.split('.'), self.P)
        trim_regime = trim_wage_regime['trimesters']['regime'].sum(1) 
        trim_regime_maj = sum(trim_wage_regime['maj'].values())
        agem = info_ind['agem']
        trim_regime = trim_regime_maj + trim_regime  # _assurance_corrigee(trim_regime, agem) 
        #disposition pour montée en charge de la loi Boulin (ne s'applique qu'entre 72 et 74) :
        if P.prorat.application_plaf == 1:
            trim_regime = minimum(trim_regime, P.prorat.plaf) 
        CP = minimum(1, divide(trim_regime, P.prorat.n_trim))
        return CP
开发者ID:simonrabate,项目名称:Til-Pension,代码行数:33,代码来源:regime_prive.py


示例13: minimum_pension

 def minimum_pension(self, trim_wages_reg, trim_wages_all, pension_reg, pension_all):
     ''' MICO du régime général : allocation différentielle
     RQ : ASPA et minimum vieillesse sont gérés par OF
     Il est attribué quels que soient les revenus dont dispose le retraité en plus de ses pensions : loyers, revenus du capital, activité professionnelle...
     + mécanisme de répartition si cotisations à plusieurs régimes
     TODO: coder toutes les évolutions et rebondissements 2004/2008'''
     P = reduce(getattr, self.param_name.split('.'), self.P)
     # pension_RG, pension, trim_RG, trim_cot, trim
     trimesters = trim_wages_reg['trimesters']
     trim_regime = trimesters['regime'].sum() + sum(trim_wages_reg['maj'].values())
     coeff = minimum(1, divide(trim_regime, P.prorat.n_trim))
     if P.mico.dispositif == 0:
         # Avant le 1er janvier 1983, comparé à l'AVTS
         min_pension = self.P.common.avts
         return maximum(min_pension - pension_reg,0)*coeff
     elif P.mico.dispositif == 1:
         # TODO: Voir comment gérer la limite de cumul relativement complexe (Doc n°5 du COR)
         mico = P.mico.entier
         return maximum(mico - pension_reg,0)*coeff
     elif P.mico.dispositif == 2:
         # A partir du 1er janvier 2004 les périodes cotisées interviennent (+ dispositif transitoire de 2004)
         nb_trim = P.prorat.n_trim
         trim_regime = trimesters['regime'].sum() #+ sum(trim_wages_regime['maj'].values())
         trim_cot_regime = sum(trimesters[key].sum() for key in trimesters.keys() if 'cot' in key)
         mico_entier = P.mico.entier*minimum(divide(trim_regime, nb_trim), 1)
         maj = (P.mico.entier_maj - P.mico.entier)*divide(trim_cot_regime, nb_trim)
         mico = mico_entier + maj*(trim_cot_regime >= P.mico.trim_min)
         return (mico - pension_reg)*(mico > pension_reg)*(pension_reg>0)
开发者ID:simonrabate,项目名称:Til-Pension,代码行数:28,代码来源:regime_prive.py


示例14: step

        def step(self):
            """
            Perform single automaton step.
            """

            gsum = sum(self.grid)

            # Compute burn touched cells
            maximum(1, self.grid, self.burn_map)
            self.burn_map -= 1

            # Correlate cells for next set of fires
            correlate(self.burn_map, self.spread, mode='constant', cval=0,
                output=self.next_burn_map)

            # And cutoff at 1 and multiply by grid to remove
            # barren cells.
            self.next_burn_map *= self.grid
            minimum(1, self.next_burn_map, self.next_burn_map)

            # Finally ignite next set of trees and top at barren
            self.grid += self.next_burn_map
            self.grid += self.burn_map
            minimum(3, self.grid, self.grid)

            if p.sleep:
                __import__('time').sleep(p.sleep)

            # No more fire?
            return gsum < sum(self.grid)
开发者ID:elout,项目名称:lewd,代码行数:30,代码来源:ff.py


示例15: test_elementwise_min_grad

    def test_elementwise_min_grad(self, n, m, d, gc, dc):
        go = np.random.rand(n, m, d).astype(np.float32)
        X = np.random.rand(n, m, d).astype(np.float32)
        Y = np.random.rand(n, m, d).astype(np.float32)
        Z = np.random.rand(n, m, d).astype(np.float32)
        mx = np.minimum(np.minimum(X, Y), Z)
        inputs = [mx, go, X, Y, Z]

        def min_grad_op(mx, go, X, Y, Z):
            def mx_grad(a):
                return go * (mx == a)

            return [mx_grad(a) for a in [X, Y, Z]]

        op = core.CreateOperator(
            "MinGradient",
            ["mx", "go", "X", "Y", "Z"],
            ["gX", "gY", "gZ"]
        )

        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=inputs,
            reference=min_grad_op,
        )
        self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
开发者ID:gtgalone,项目名称:pytorch,代码行数:27,代码来源:utility_ops_test.py


示例16: image_cart_to_polar

def image_cart_to_polar(image, center, min_radius, max_radius, phase_width, zoom_factor=1):
    '''Converts an image from cartesian to polar coordinates around center'''

    # Upsample image
    if zoom_factor != 1:
        image = zoom(image, (zoom_factor, zoom_factor), order=4)
        center = (center[0]*zoom_factor + zoom_factor/2, center[1]*zoom_factor + zoom_factor/2)
        min_radius = min_radius * zoom_factor
        max_radius = max_radius * zoom_factor

    # pad if necessary
    max_x, max_y = image.shape[0], image.shape[1]
    pad_dist_x = np.max([(center[0] + max_radius) - max_x, -(center[0] - max_radius)])
    pad_dist_y = np.max([(center[1] + max_radius) - max_y, -(center[1] - max_radius)])
    pad_dist = int(np.max([0, pad_dist_x, pad_dist_y]))
    if pad_dist != 0:
        image = np.pad(image, pad_dist, 'constant')

    # coordinate conversion
    theta, r = np.meshgrid(np.linspace(0, 2*np.pi, phase_width),
                           np.arange(min_radius, max_radius))
    x, y = coord_polar_to_cart(r, theta, center)
    x, y = np.round(x), np.round(y)
    x, y = x.astype(int), y.astype(int)
    x = np.maximum(x, 0)
    y = np.maximum(y, 0)
    x = np.minimum(x, max_x-1)
    y = np.minimum(y, max_y-1)


    polar = image[x, y]
    polar.reshape((max_radius - min_radius, phase_width))

    return polar
开发者ID:Peichao,项目名称:Constrained_NMF,代码行数:34,代码来源:cell_magic_wand.py


示例17: nms

def nms(boxes, threshold, method):
    if boxes.size == 0:
        return np.empty((0, 3))
    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]
    s = boxes[:, 4]
    area = (x2 - x1 + 1) * (y2 - y1 + 1)
    I = np.argsort(s)
    pick = np.zeros_like(s, dtype=np.int16)
    counter = 0
    while I.size > 0:
        i = I[-1]
        pick[counter] = i
        counter += 1
        idx = I[0:-1]
        xx1 = np.maximum(x1[i], x1[idx])
        yy1 = np.maximum(y1[i], y1[idx])
        xx2 = np.minimum(x2[i], x2[idx])
        yy2 = np.minimum(y2[i], y2[idx])
        w = np.maximum(0.0, xx2 - xx1 + 1)
        h = np.maximum(0.0, yy2 - yy1 + 1)
        inter = w * h
        if method is 'Min':
            o = inter / np.minimum(area[i], area[idx])
        else:
            o = inter / (area[i] + area[idx] - inter)
        I = I[np.where(o <= threshold)]
    pick = pick[0:counter]
    return pick
开发者ID:nxp-gf,项目名称:flask-facep-reg-v3,代码行数:31,代码来源:mtcnn_detect.py


示例18: update_qm_region

    def update_qm_region(self, atoms,
                         potential_energies=None,
                         ):
        """Update the QM region while the simulation is running

        Parameters
        ----------
        atoms : ase.Atoms
            whole structure
        potential_energies : array
            Potential energy per atom

        Returns
        -------
        list of lists of ints
            list of individual clusters as lists of atoms
        """
        # Make sure the right atoms object is in

        # ------ Increase the energy by a common factor - makes it more readable in some cases
        if (self.energy_increase is not None):
            potential_energies *= self.energy_increase

        # ------ Cap maximum energy according to the flag
        if (self.energy_cap is not None):
            np.minimum(potential_energies, self.energy_cap, potential_energies)

        # ------ Get the energized atoms list
        flagged_atoms_dict = {}

        flagged_atoms_dict["potential_energies"] = self.get_energized_list(atoms,
                                                                           potential_energies,
                                                                           "avg_potential_energies",
                                                                           self.qm_flag_potential_energies)

        energized_set = set()
        for key in flagged_atoms_dict:
            energized_set = set(flagged_atoms_dict[key]) | energized_set
        energized_list = list(energized_set)
        self.old_energized_list = list(energized_list)

        if (len(energized_list) != 0):
            self.mediator.neighbour_list.update(atoms)

        # TODO if energized list include the whole system just pass it along
        for array_i, atom_i in enumerate(energized_list):
            energized_list[array_i] = self.create_cluster_around_atom(atoms, atom_i, hydrogenate=False)

        self.qm_atoms_list = energized_list
        if (len(self.qm_atoms_list) > 0):
            self.join_clusters()
            self.expand_cluster(self.mediator.special_atoms_list)
            self.join_clusters()

        if self.only_heavy is False:
            for index in range(len(self.qm_atoms_list)):
                self.qm_atoms_list[index] = self.hydrogenate_cluster(atoms, self.qm_atoms_list[index])

        self.qm_atoms_list = list(map(list, self.qm_atoms_list))
        return self.qm_atoms_list
开发者ID:libAtoms,项目名称:matscipy,代码行数:60,代码来源:qm_flagging_tool.py


示例19: rp_gumbel_original

def rp_gumbel_original(p_zero, loc, scale, flvol, max_return_period=1e9):
    """
    Transforms a unique, or array of flood volumes into the belonging return
    periods, according to gumbel parameters (belonging to non-zero part of the
    distribution) and a zero probability
    Inputs:
        p_zero:        probability that flood volume is zero
        loc:           Gumbel location parameter (of non-zero part of distribution)
        scale:         Gumbel scale parameter (of non-zero part of distribution)
        flvol:         Flood volume that will be transformed to return period
        max_return_period: maximum return period considered. This maximum is needed to prevent that floating point
                        precision becomes a problem (default: 1e9)
    This function is copied from: https://repos.deltares.nl/repos/Hydrology/trunk/GLOFRIS/src/rp_bias_corr.py
    """
    
    np.seterr(divide='ignore')
    np.seterr(invalid='ignore')
    max_p = 1-1./max_return_period
    max_p_residual = np.minimum(np.maximum((max_p-np.float64(p_zero))/(1-np.float64(p_zero)), 0), 1)
    max_reduced_variate = -np.log(-np.log(np.float64(max_p_residual)))
    # compute the gumbel reduced variate belonging to the Gumbel distribution (excluding any zero-values)
    # make sure that the reduced variate does not exceed the one, resembling the 1,000,000 year return period
    reduced_variate = np.minimum((flvol-loc)/scale, max_reduced_variate)
    # reduced_variate = (flvol-loc)/scale
    # transform the reduced variate into a probability (residual after removing the zero volume probability)
    p_residual = np.minimum(np.maximum(np.exp(-np.exp(-np.float64(reduced_variate))), 0), 1)
    # tranform from non-zero only distribution to zero-included distribution
    p = np.minimum(np.maximum(p_residual*(1-p_zero) + p_zero, p_zero), max_p)  # Never larger than max_p
    # transform into a return period    
    return_period = 1./(1-p)
    test_p = p == 1    
    return return_period, test_p
开发者ID:edwinkost,项目名称:extreme_value_analysis,代码行数:32,代码来源:glofris_postprocess_edwin_modified.py


示例20: nms

def nms(dets, thresh):
  # -------------------------
  # Pure Python NMS baseline.
  # Written by Ross Girshick
  # -------------------------
  x1 = dets[:, 0] - dets[:, 2] / 2.
  y1 = dets[:, 1] - dets[:, 3] / 2.
  x2 = dets[:, 0] + dets[:, 2] / 2.
  y2 = dets[:, 1] + dets[:, 3] / 2.
  scores = dets[:, 4]# 预测得分
  areas = (x2 - x1 + 1) * (y2 - y1 + 1)
  order = scores.argsort()[::-1]#排序
  keep = []
  while order.size > 0:
    i = order[0]
    keep.append(i)
    xx1 = np.maximum(x1[i], x1[order[1:]])
    yy1 = np.maximum(y1[i], y1[order[1:]])
    xx2 = np.minimum(x2[i], x2[order[1:]])
    yy2 = np.minimum(y2[i], y2[order[1:]])
    w = np.maximum(0.0, xx2 - xx1 + 1)
    h = np.maximum(0.0, yy2 - yy1 + 1)
    inter = w * h
    ovr = inter / (areas[i] + areas[order[1:]] - inter)
    inds = np.where(ovr <= thresh)[0]
    order = order[inds + 1]
  return dets[np.require(keep), :]
开发者ID:dyz-zju,项目名称:MVision,代码行数:27,代码来源:show_det.py



注:本文中的numpy.minimum函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.mod函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.min_scalar_type函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap