• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.clip函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.clip函数的典型用法代码示例。如果您正苦于以下问题:Python clip函数的具体用法?Python clip怎么用?Python clip使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了clip函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: lowess

def lowess(x, y, f=2. / 3., iter=3):
    """lowess(x, y, f=2./3., iter=3) -> yest

    Lowess smoother: Robust locally weighted regression.
    The lowess function fits a nonparametric regression curve to a scatterplot.
    The arrays x and y contain an equal number of elements; each pair
    (x[i], y[i]) defines a data point in the scatterplot. The function returns
    the estimated (smooth) values of y.

    The smoothing span is given by f. A larger value for f will result in a
    smoother curve. The number of robustifying iterations is given by iter. The
    function will run faster with a smaller number of iterations.

    x and y should be numpy float arrays of equal length.  The return value is
    also a numpy float array of that length.

    e.g.
    >>> import numpy
    >>> x = numpy.array([4,  4,  7,  7,  8,  9, 10, 10, 10, 11, 11, 12, 12, 12,
    ...                 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 16, 16,
    ...                 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 20, 20, 20, 20,
    ...                 20, 22, 23, 24, 24, 24, 24, 25], numpy.float)
    >>> y = numpy.array([2, 10,  4, 22, 16, 10, 18, 26, 34, 17, 28, 14, 20, 24,
    ...                 28, 26, 34, 34, 46, 26, 36, 60, 80, 20, 26, 54, 32, 40,
    ...                 32, 40, 50, 42, 56, 76, 84, 36, 46, 68, 32, 48, 52, 56,
    ...                 64, 66, 54, 70, 92, 93, 120, 85], numpy.float)
    >>> result = lowess(x, y)
    >>> len(result)
    50
    >>> print "[%0.2f, ..., %0.2f]" % (result[0], result[-1])
    [4.85, ..., 84.98]
    """
    n = len(x)
    r = int(numpy.ceil(f * n))
    h = [numpy.sort(abs(x - x[i]))[r] for i in range(n)]
    w = numpy.clip(abs(([x] - numpy.transpose([x])) / h), 0.0, 1.0)
    w = 1 - w * w * w
    w = w * w * w
    yest = numpy.zeros(n)
    delta = numpy.ones(n)
    for iteration in range(iter):
        for i in xrange(n):
            weights = delta * w[:, i]
            weights_mul_x = weights * x
            b1 = numpy.dot(weights, y)
            b2 = numpy.dot(weights_mul_x, y)
            A11 = sum(weights)
            A12 = sum(weights_mul_x)
            A21 = A12
            A22 = numpy.dot(weights_mul_x, x)
            determinant = A11 * A22 - A12 * A21
            beta1 = (A22 * b1 - A12 * b2) / determinant
            beta2 = (A11 * b2 - A21 * b1) / determinant
            yest[i] = beta1 + beta2 * x[i]
        residuals = y - yest
        s = median(abs(residuals))
        delta[:] = numpy.clip(residuals / (6 * s), -1, 1)
        delta[:] = 1 - delta * delta
        delta[:] = delta * delta
    return yest
开发者ID:joshainglis,项目名称:biopython,代码行数:60,代码来源:lowess.py


示例2: mouseMoveEvent

    def mouseMoveEvent(self, ev):
        if self.lastMousePos is None:
            self.lastMousePos = Point(ev.pos())
        delta = Point(ev.pos() - self.lastMousePos)
        self.lastMousePos = Point(ev.pos())

        QtGui.QGraphicsView.mouseMoveEvent(self, ev)
        if not self.mouseEnabled:
            return
        self.sigSceneMouseMoved.emit(self.mapToScene(ev.pos()))

        if self.clickAccepted:  ## Ignore event if an item in the scene has already claimed it.
            return

        if ev.buttons() == QtCore.Qt.RightButton:
            delta = Point(np.clip(delta[0], -50, 50), np.clip(-delta[1], -50, 50))
            scale = 1.01 ** delta
            self.scale(scale[0], scale[1], center=self.mapToScene(self.mousePressPos))
            self.sigRangeChanged.emit(self, self.range)

        elif ev.buttons() in [QtCore.Qt.MidButton, QtCore.Qt.LeftButton]:  ## Allow panning by left or mid button.
            px = self.pixelSize()
            tr = -delta * px

            self.translate(tr[0], tr[1])
            self.sigRangeChanged.emit(self, self.range)
开发者ID:fivejjs,项目名称:pyqtgraph,代码行数:26,代码来源:GraphicsView.py


示例3: _compute_normalized_data

    def _compute_normalized_data(self, data_array):
        """
        Apply `data_func`, then linearly scale to the unit interval, and
        then apply `unit_func`.
        """
        
        # FIXME: Deal with nans?

        if self._dirty:
            self._recalculate()

        if self.data_func is not None:
            data_array = self.data_func(data_array)
            low, high = self.transformed_bounds
        else:
            low, high = self.range.low, self.range.high
        range_diff = high - low

        # Linearly transform the values to the unit interval.        

        if range_diff == 0.0 or isinf(range_diff):
            # Handle null range, or infinite range (which can happen during 
            # initialization before range is connected to a data source).
            norm_data = 0.5*ones_like(data_array)
        else:
            norm_data = empty(data_array.shape, dtype='float32')
            norm_data[:] = data_array
            norm_data -= low
            norm_data /= range_diff
            clip(norm_data, 0.0, 1.0, norm_data)

        if self.unit_func is not None:
            norm_data = self.unit_func(norm_data)

        return norm_data
开发者ID:5n1p,项目名称:chaco,代码行数:35,代码来源:transform_color_mapper.py


示例4: numpy_run

 def numpy_run(self):
     """Forward propagation from batch on CPU only.
     """
     super(All2AllStrictRELU, self).numpy_run()
     self.output.map_write()
     mem = self.output.mem
     numpy.clip(mem, 0.0, 1.0e30, mem)
开发者ID:vmarkovtsev,项目名称:veles.znicz,代码行数:7,代码来源:all2all.py


示例5: isotonic_regression

def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None,
                        increasing=True):
    """Solve the isotonic regression model::

        min sum w[i] (y[i] - y_[i]) ** 2

        subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max

    where:
        - y[i] are inputs (real numbers)
        - y_[i] are fitted
        - w[i] are optional strictly positive weights (default to 1.0)

    Read more in the :ref:`User Guide <isotonic>`.

    Parameters
    ----------
    y : iterable of floating-point values
        The data.

    sample_weight : iterable of floating-point values, optional, default: None
        Weights on each point of the regression.
        If None, weight is set to 1 (equal weights).

    y_min : optional, default: None
        If not None, set the lowest value of the fit to y_min.

    y_max : optional, default: None
        If not None, set the highest value of the fit to y_max.

    increasing : boolean, optional, default: True
        Whether to compute ``y_`` is increasing (if set to True) or decreasing
        (if set to False)

    Returns
    -------
    y_ : list of floating-point values
        Isotonic fit of y.

    References
    ----------
    "Active set algorithms for isotonic regression; A unifying framework"
    by Michael J. Best and Nilotpal Chakravarti, section 3.
    """
    order = np.s_[:] if increasing else np.s_[::-1]
    y = np.array(y[order], dtype=np.float64)
    if sample_weight is None:
        sample_weight = np.ones(len(y), dtype=np.float64)
    else:
        sample_weight = np.array(sample_weight[order], dtype=np.float64)

    _inplace_contiguous_isotonic_regression(y, sample_weight)
    if y_min is not None or y_max is not None:
        # Older versions of np.clip don't accept None as a bound, so use np.inf
        if y_min is None:
            y_min = -np.inf
        if y_max is None:
            y_max = np.inf
        np.clip(y, y_min, y_max, y)
    return y[order]
开发者ID:AlexandreAbraham,项目名称:scikit-learn,代码行数:60,代码来源:isotonic.py


示例6: lossFun

def lossFun(inputs, targets, hprev):
  """
  inputs,targets are both list of integers.
  hprev is Hx1 array of initial hidden state
  returns the loss, gradients on model parameters, and last hidden state
  """
  xs, hs, ys, ps = {}, {}, {}, {}
  hs[-1] = np.copy(hprev)
  loss = 0
  # forward pass
  for t in xrange(len(inputs)):
    xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
    xs[t][inputs[t]] = 1
    hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state
    ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars
    ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars
    loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
  # backward pass: compute gradients going backwards
  dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
  dbh, dby = np.zeros_like(bh), np.zeros_like(by)
  dhnext = np.zeros_like(hs[0])
  for t in reversed(xrange(len(inputs))):
    dy = np.copy(ps[t])
    dy[targets[t]] -= 1 # backprop into y
    dWhy += np.dot(dy, hs[t].T)
    dby += dy
    dh = np.dot(Why.T, dy) + dhnext # backprop into h
    dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
    dbh += dhraw
    dWxh += np.dot(dhraw, xs[t].T)
    dWhh += np.dot(dhraw, hs[t-1].T)
    dhnext = np.dot(Whh.T, dhraw)
  for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
    np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
  return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
开发者ID:tienchil,项目名称:neural_networks_projects,代码行数:35,代码来源:p4.py


示例7: _raisePermanenceToThreshold

	def _raisePermanenceToThreshold(self, perm, mask):
	    """
	    This method ensures that each column has enough connections to input bits
	    to allow it to become active. Since a column must have at least
	    'self._stimulusThreshold' overlaps in order to be considered during the
	    inhibition phase, columns without such minimal number of connections, even
	    if all the input bits they are connected to turn on, have no chance of
	    obtaining the minimum threshold. For such columns, the permanence values
	    are increased until the minimum number of connections are formed.


	    Parameters:
	    ----------------------------
	    @param perm:    An array of permanence values for a column. The array is
	                    "dense", i.e. it contains an entry for each input bit, even
	                    if the permanence value is 0.
	    @param mask:    the indices of the columns whose permanences need to be
	                    raised.
	    """
	    if len(mask) < self._stimulusThreshold:
	        raise Exception("This is likely due to a " +
	        "value of stimulusThreshold that is too large relative " +
	        "to the input size. [len(mask) < self._stimulusThreshold]")

	    numpy.clip(perm, self._synPermMin, self._synPermMax, out=perm)
	    while True:
	        numConnected = numpy.nonzero(perm > self._synPermConnected)[0].size
	        if numConnected >= self._stimulusThreshold:
	            return
	        perm[mask] += self._synPermBelowStimulusInc
开发者ID:pikeabot,项目名称:spatial-pooler,代码行数:30,代码来源:augmented_spatial_pooler.py


示例8: K

 def K(self, X, X2=None,alpha=None,variance=None):
     """
     Computes the covariance matrix cov(X[i,:],X2[j,:]).
     
     Args:
         X: Matrix where each row is a point.
         X2: Matrix where each row is a point.
         alpha: It's the scaled alpha.
         Variance: Sigma hyperparameter.
         
     """
     if alpha is None:
         alpha=self.alpha
     if variance is None:
         variance=self.variance
     
     if X2 is None:
         X=X*alpha/self.scaleAlpha
         Xsq=np.sum(np.square(X), 1)
         r=-2.*np.dot(X, X.T) + (Xsq[:, None] + Xsq[None, :])
         r = np.clip(r, 0, np.inf)
         return variance*np.exp(-0.5*r)
     else:
         X=X*alpha/self.scaleAlpha
         X2=X2*alpha/self.scaleAlpha
         r=-2.*np.dot(X, X2.T) + (np.sum(np.square(X), 1)[:, None] + np.sum(np.square(X2), 1)[None, :])
         r = np.clip(r, 0, np.inf)
         return variance*np.exp(-0.5*r)
开发者ID:toscanosaul,项目名称:SBO,代码行数:28,代码来源:SK.py


示例9: test_special_sparse_dot

def test_special_sparse_dot():
    # Test the function that computes np.dot(W, H), only where X is non zero.
    n_samples = 10
    n_features = 5
    n_components = 3
    rng = np.random.mtrand.RandomState(42)
    X = rng.randn(n_samples, n_features)
    np.clip(X, 0, None, out=X)
    X_csr = sp.csr_matrix(X)

    W = np.abs(rng.randn(n_samples, n_components))
    H = np.abs(rng.randn(n_components, n_features))

    WH_safe = nmf._special_sparse_dot(W, H, X_csr)
    WH = nmf._special_sparse_dot(W, H, X)

    # test that both results have same values, in X_csr nonzero elements
    ii, jj = X_csr.nonzero()
    WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
    assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)

    # test that WH_safe and X_csr have the same sparse structure
    assert_array_equal(WH_safe.indices, X_csr.indices)
    assert_array_equal(WH_safe.indptr, X_csr.indptr)
    assert_array_equal(WH_safe.shape, X_csr.shape)
开发者ID:kjacks21,项目名称:scikit-learn,代码行数:25,代码来源:test_nmf.py


示例10: _clipToSafeRange

 def _clipToSafeRange(min_, max_, isLog):
     # Clip range if needed
     minLimit = FLOAT32_MINPOS if isLog else FLOAT32_SAFE_MIN
     min_ = numpy.clip(min_, minLimit, FLOAT32_SAFE_MAX)
     max_ = numpy.clip(max_, minLimit, FLOAT32_SAFE_MAX)
     assert min_ < max_
     return min_, max_
开发者ID:dnaudet,项目名称:silx,代码行数:7,代码来源:GLPlotFrame.py


示例11: combine_images

def combine_images(imgs, alphas):
    """ Combine multiple rgb images in one rgb image """
    image_f = numpy.zeros(imgs[0].shape, dtype='float')
    for i in range(0, len(imgs)):
        image_f += alphas[i] * imgs[i]
    numpy.clip(image_f, 0., 255., image_f)
    return numpy.array(image_f, dtype='uint8')
开发者ID:Moschn,项目名称:neuronal-activity-analyzer,代码行数:7,代码来源:util.py


示例12: _compute_disk_overlap

def _compute_disk_overlap(d, r1, r2):
    """
    Compute surface overlap between two disks of radii ``r1`` and ``r2``,
    with centers separated by a distance ``d``.

    Parameters
    ----------
    d : float
        Distance between centers.
    r1 : float
        Radius of the first disk.
    r2 : float
        Radius of the second disk.

    Returns
    -------
    vol: float
        Volume of the overlap between the two disks.
    """

    ratio1 = (d ** 2 + r1 ** 2 - r2 ** 2) / (2 * d * r1)
    ratio1 = np.clip(ratio1, -1, 1)
    acos1 = math.acos(ratio1)

    ratio2 = (d ** 2 + r2 ** 2 - r1 ** 2) / (2 * d * r2)
    ratio2 = np.clip(ratio2, -1, 1)
    acos2 = math.acos(ratio2)

    a = -d + r2 + r1
    b = d - r2 + r1
    c = d + r2 - r1
    d = d + r2 + r1
    area = (r1 ** 2 * acos1 + r2 ** 2 * acos2 -
            0.5 * sqrt(abs(a * b * c * d)))
    return area / (math.pi * (min(r1, r2) ** 2))
开发者ID:ericsolo,项目名称:python,代码行数:35,代码来源:blobs_detection.py


示例13: interpgrid

def interpgrid(a, xi, yi):
    """Fast 2D, linear interpolation on an integer grid"""

    Ny, Nx = np.shape(a)
    if isinstance(xi, np.ndarray):
        x = xi.astype(np.int)
        y = yi.astype(np.int)
        # Check that xn, yn don't exceed max index
        xn = np.clip(x + 1, 0, Nx - 1)
        yn = np.clip(y + 1, 0, Ny - 1)
    else:
        x = np.int(xi)
        y = np.int(yi)
        # conditional is faster than clipping for integers
        if x == (Nx - 2): xn = x
        else: xn = x + 1
        if y == (Ny - 2): yn = y
        else: yn = y + 1

    a00 = a[y, x]
    a01 = a[y, xn]
    a10 = a[yn, x]
    a11 = a[yn, xn]
    xt = xi - x
    yt = yi - y
    a0 = a00 * (1 - xt) + a01 * xt
    a1 = a10 * (1 - xt) + a11 * xt
    ai = a0 * (1 - yt) + a1 * yt

    if not isinstance(xi, np.ndarray):
        if np.ma.is_masked(ai):
            raise TerminateTrajectory

    return ai
开发者ID:andreas-h,项目名称:matplotlib,代码行数:34,代码来源:streamplot.py


示例14: rmsprop_one_step

    def rmsprop_one_step(self, param_name, index, grad_args, decay = 0.9, momentum = 0, learning_rate_adapt = 0.05, 
        learning_rate_min = 1e-6, learning_rate_max = 10):
        # RMSPROP: Tieleman, T. and Hinton, G. (2012), Lecture 6.5 - rmsprop, COURSERA: Neural Networks for Machine Learning
        # Implementation based on https://github.com/BRML/climin/blob/master/climin/rmsprop.py
        
        # We use Nesterov momentum: first, we make a step according to the momentum and then we calculate the gradient.
        step1 = self.param_updates[param_name] * momentum
        self.wrt[param_name].set_value(self.wrt[param_name].get_value()+step1)
        grad = self.get_grad(*grad_args)

        self.moving_mean_squared[param_name] = (decay * self.moving_mean_squared[param_name] + (1 - decay) * grad ** 2)
        step2 = self.learning_rates[param_name] * grad / (self.moving_mean_squared[param_name] + 1e-8)**0.5

        # DEBUG
        if param_name == 'lhyp' or 'ls':
            step2 = np.clip(step2, -0.1, 0.1)
        
        self.wrt[param_name].set_value(self.wrt[param_name].get_value()+step2)
        #self.params[param_name] += step2

        step = step1 + step2

        # Step rate adaption. If the current step and the momentum agree, we slightly increase the step rate for that dimension.
        if learning_rate_adapt:
            # This code might look weird, but it makes it work with both numpy and gnumpy.
            step_non_negative = step > 0
            step_before_non_negative = self.param_updates[param_name] > 0
            agree = (step_non_negative == step_before_non_negative) * 1.#0か1が出る
            adapt = 1 + agree * learning_rate_adapt * 2 - learning_rate_adapt
            self.learning_rates[param_name] *= adapt
            self.learning_rates[param_name] = np.clip(self.learning_rates[param_name], learning_rate_min, learning_rate_max)

        self.param_updates[param_name] = step
开发者ID:futoshi-futami,项目名称:GP-and-GPLVM,代码行数:33,代码来源:DGPLVM_opt_cl.py


示例15: updateParticles

    def updateParticles(self):
        # Update positions with velocity
        self.particles[:, 0:2] += self.particles[:, 4:6]
        #np.clip(self.particles[:,0], 0, self.bounds[0], self.particles[:,0])
        #np.clip(self.particles[:,1], 0, self.bounds[1], self.particles[:,1])

        # Add noise to w,h
        if self.SIGMA_size > 0.0001:
            self.particles[:, 2:4] += random.normal(0, self.SIGMA_size, (self.particles.shape[0], 2))
        #np.clip(self.particles[:,2], 1, self.bounds[0], self.particles[:,2])
        #np.clip(self.particles[:,3], 1, self.bounds[1], self.particles[:,3])
        # Add noise to velocities and clip
        self.particles[:, 4:6] += random.normal(
            0, self.SIGMA_velocity, (self.particles.shape[0], 2))
        #np.clip(self.particles[:,4:6], -MAX_velocity,MAX_velocity, self.particles[:,4:6])

        lb = [0, 0, 1, 1, -MAX_velocity, -MAX_velocity, 0]
        ub = [self.bounds[1],
              self.bounds[0],
              self.bounds[1],
              self.bounds[0],
              MAX_velocity,
              MAX_velocity,
              1]
        np.clip(self.particles, lb, ub, self.particles)
        if np.max(self.particles[:, 0]) > self.bounds[1]:
            print "Not clipped"
        self.iterations += 1
开发者ID:snuderl,项目名称:VideoTracking,代码行数:28,代码来源:particle.py


示例16: test_nmf_negative_beta_loss

def test_nmf_negative_beta_loss():
    # Test that an error is raised if beta_loss < 0 and X contains zeros.
    # Test that the output has not NaN values when the input contains zeros.
    n_samples = 6
    n_features = 5
    n_components = 3

    rng = np.random.mtrand.RandomState(42)
    X = rng.randn(n_samples, n_features)
    np.clip(X, 0, None, out=X)
    X_csr = sp.csr_matrix(X)

    def _assert_nmf_no_nan(X, beta_loss):
        W, H, _ = non_negative_factorization(
            X, init='random', n_components=n_components, solver='mu',
            beta_loss=beta_loss, random_state=0, max_iter=1000)
        assert not np.any(np.isnan(W))
        assert not np.any(np.isnan(H))

    msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
    for beta_loss in (-0.6, 0.):
        assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss)
        _assert_nmf_no_nan(X + 1e-9, beta_loss)

    for beta_loss in (0.2, 1., 1.2, 2., 2.5):
        _assert_nmf_no_nan(X, beta_loss)
        _assert_nmf_no_nan(X_csr, beta_loss)
开发者ID:kjacks21,项目名称:scikit-learn,代码行数:27,代码来源:test_nmf.py


示例17: _to_raw

    def _to_raw(self, data1, data2):
        from matplotlib import pyplot as plt
        from matplotlib.colors import Normalize
        cmapdir = options.config.get("webgl", "colormaps")
        cmap = plt.imread(os.path.join(cmapdir, "%s.png"%self.cmap))

        norm1 = Normalize(self.vmin, self.vmax)
        norm2 = Normalize(self.vmin2, self.vmax2)
        
        d1 = np.clip(norm1(data1), 0, 1)
        d2 = np.clip(1 - norm2(data2), 0, 1)
        dim1 = np.round(d1 * (cmap.shape[1]-1))
        # Nans in data seemed to cause weird interaction with conversion to uint32
        dim1 = np.nan_to_num(dim1).astype(np.uint32) 
        dim2 = np.round(d2 * (cmap.shape[0]-1))
        dim2 = np.nan_to_num(dim2).astype(np.uint32)

        colored = cmap[dim2.ravel(), dim1.ravel()]
        r, g, b, a = colored.T
        r.shape = dim1.shape
        g.shape = dim1.shape
        b.shape = dim1.shape
        a.shape = dim1.shape
        # Preserve nan values as alpha = 0
        aidx = np.logical_or(np.isnan(data1),np.isnan(data2))
        a[aidx] = 0
        # Code from master, to handle alpha input, prob better here but not tested.
        # # Possibly move this above setting nans to alpha = 0;
        # # Possibly multiply specified alpha by alpha in colormap??
        # if 'alpha' in self.attrs:
        #     # Over-write alpha from colormap / nans with alpha arg if provided.
        #     # Question: Might it be important tokeep alpha as an attr?
        #     a = self.attrs.pop('alpha')
        return r, g, b, a
开发者ID:gallantlab,项目名称:pycortex,代码行数:34,代码来源:view2D.py


示例18: spectrum

    def spectrum(data, attribute, roi, slc, zaxis):
        xaxis = slc.index('x')
        yaxis = slc.index('y')
        ndim, nz = data.ndim, data.shape[zaxis]

        l, r, b, t = roi.xmin, roi.xmax, roi.ymin, roi.ymax
        shp = data.shape
        # The 'or 0' is because Numpy in Python 3 cannot deal with 'None'
        l, r = np.clip([l or 0, r or 0], 0, shp[xaxis])
        b, t = np.clip([b or 0, t or 0], 0, shp[yaxis])

        # extract sub-slice, without changing dimension
        slc = [slice(s, s + 1)
               if s not in ['x', 'y'] else slice(None)
               for s in slc]
        slc[xaxis] = slice(l, r)
        slc[yaxis] = slice(b, t)
        slc[zaxis] = slice(None)
        x = Extractor.abcissa(data, zaxis)

        data = data[attribute, tuple(slc)]
        finite = np.isfinite(data)

        assert data.ndim == ndim

        for i in reversed(list(range(ndim))):
            if i != zaxis:
                data = np.nansum(data, axis=i)
                finite = finite.sum(axis=i)

        assert data.ndim == 1
        assert data.size == nz

        data = (1. * data / finite).ravel()
        return x, data
开发者ID:borkin,项目名称:glue,代码行数:35,代码来源:spectrum_tool.py


示例19: switchShape

 def switchShape(tractor, shape, var):
     # This p0/p1/changed is a hack to know which elements of 'var' to change.
     p0 = np.array(tractor.getParams())
     softe = shape.softe
     # Actually switch the parameter space
     newshape = EllipseE.fromEllipseESoft(shape, maxe=0.99)
     shape.setParams([-np.inf] * shape.numberOfParams())
     p1 = np.array(tractor.getParams())
     # ASSUME that changing to EllipseE parameterization actually
     # changes the values.
     # Could do something like: gal.shape.setParams([-np.inf] * 3) to be sure.
     changed = np.flatnonzero(p0 != p1)
     print 'shape param indices:', changed
     assert(len(changed) == 3)
     # ASSUME ordering re, e1, e2
     # We changed from log(re) to re.
     var[changed[0]] *= newshape.re**2
     # We changed from soft-e to e.
     # If soft-e is huge, var(soft-e) is huge; e is ~1 and var(e) gets hugely shrunk.
     efac = np.exp(-2. * softe)
     var[changed[1]] *= efac
     var[changed[2]] *= efac
     # Impose a minimum and maximum variance on e
     minv, maxv = 1e-6, 1.
     var[changed[1]] = np.clip(var[changed[1]], minv, maxv)
     var[changed[2]] = np.clip(var[changed[2]], minv, maxv)
     return newshape
开发者ID:barentsen,项目名称:tractor,代码行数:27,代码来源:great3.py


示例20: process

 def process(self, image, out=None):
     # 0.25 is the default value used in Ng's paper
     alpha = self.specs.get('alpha', 0.25)
     # check if we would like to do two-side thresholding. Default yes.
     if self.specs.get('twoside', True):
         # concatenate, and make sure the output is C_CONTIGUOUS
         # for the temporary product, we check if we can utilize the
         # buffer to save allocation time
         product = mathutil.dot_image(image, self.dictionary.T)
         imshape = product.shape[:-1]
         N = product.shape[-1]
         product.resize((np.prod(imshape), N))
         if out is None:
             out = np.empty((np.prod(imshape), N*2))
         else:
             out.resize((np.prod(imshape), N*2))
         out[:,:N] = product
         out[:,N:] = -product
         out.resize(imshape + (N*2,))
     elif self.specs['twoside'] == 'abs':
         out = mathutil.dot_image(image, self.dictionary.T, out=out)
         np.abs(out, out=out)
     else:
         out = mathutil.dot_image(image, self.dictionary.T, out=out)
     # do threshold
     out -= alpha
     np.clip(out, 0., np.inf, out=out)
     return out
开发者ID:WilllWang,项目名称:iceberk,代码行数:28,代码来源:pipeline.py



注:本文中的numpy.clip函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.clongdouble函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.choose函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap