• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python ndimage.convolve函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.ndimage.convolve函数的典型用法代码示例。如果您正苦于以下问题:Python convolve函数的具体用法?Python convolve怎么用?Python convolve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了convolve函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: compressed_holographic_retrieval

def compressed_holographic_retrieval(data,ARGS):
  window_f = np.hamming
  kx,ky = find_carrier(data)
  Kx,Ky = np.fft.fftfreq(data.shape[0]),np.fft.fftfreq(data.shape[1])
  KX,KY = np.meshgrid(Kx,Ky)
  s2 = np.sqrt(kx**2 + ky**2)/4.
  G2 = gaussian(KX,KY,0,0,s2)

  nx,ny = data.shape
  X,Y = np.meshgrid(np.arange(nx),np.arange(ny))
  r = plane_wave(X,Y,kx,ky)
  i3 = data*r

#  print np.around(float(s2*256)/ax) - float(s2*256)/ax
  a3 = 14./256
  G3_kernel = ndi.zoom(G2,a3)
#  phplot.dBshow(G3_kernel)
  g3_kernel = np.fft.ifft2(G3_kernel)
  w3zoom = np.sqrt(np.outer(window_f(g3_kernel.shape[0]),window_f(g3_kernel.shape[1])))
  g3_kernel *= w3zoom
  o3 = (ndi.convolve(i3.real,g3_kernel.real) -\
        ndi.convolve(i3.imag,g3_kernel.imag) +\
        1j*ndi.convolve(i3.real,g3_kernel.imag) +\
        1j*ndi.convolve(i3.imag,g3_kernel.real))
  p3 = np.arctan2(o3.imag,o3.real)
#  phplot.imageshow(p3)
  return p3
开发者ID:taylo589,项目名称:phproc,代码行数:27,代码来源:phretrieve.py


示例2: energy_image

def energy_image (im):
    gray_img = rgb2gray(im)
    double_img = im2double(gray_img)
    xconvolution = ndimage.convolve(double_img, xGrad, mode='constant', cval=0.0)
    yconvolution = ndimage.convolve(double_img, yGrad,  mode='constant', cval=0.0)
    gradient_img = np.sqrt(np.square(xconvolution) + np.square(yconvolution))
    return im2double(gradient_img)
开发者ID:sbhal,项目名称:ece5554_computer_vision,代码行数:7,代码来源:energy_image.py


示例3: run_iteration

    def run_iteration(self, update_mask=True):
        """Run one iteration."""
        # Start with images from the last iteration
        images = self._data[-1]
        
        logging.info('*** INPUT IMAGES ***')
        images.print_info()

        # Compute new exclusion mask:
        if update_mask:
            logging.info('Computing new exclusion mask')
            mask = np.where(images.significance > self.significance_threshold, 0, 1)
            #print('===', (mask == 0).sum())
            mask = np.invert(binary_dilation_circle(mask == 0, radius=self.mask_dilation_radius))
            #print('===', (mask == 0).sum())
        else:
            mask = images.mask.copy()
        
        # Compute new background estimate:
        # Convolve old background estimate with background kernel,
        # excluding sources via the old mask.
        weighted_counts = convolve(images.mask * images.counts, self.background_kernel)
        weighted_counts_normalisation = convolve(images.mask.astype(float), self.background_kernel)
        background = weighted_counts / weighted_counts_normalisation
        
        # Store new images
        images = GammaImages(counts, background, mask)
        logging.info('Computing source kernel correlated images.')
        images.compute_correlated_maps(self.source_kernel)

        logging.info('*** OUTPUT IMAGES ***')
        images.print_info()
        self._data.append(images)
开发者ID:ellisowen,项目名称:SciNeGHE_scripts,代码行数:33,代码来源:source_diffuse_estimation.py


示例4: optimal_extract

 def optimal_extract(self, data, bin=0):        
     import scipy.ndimage as nd
     
     wave = (np.arange(self.shg[1])+1-self.cutout_dimensions[1])*(self.lam[1]-self.lam[0]) + self.lam[0]
     
     if not hasattr(self, 'opt_profile'):
         m = self.compute_model(self.thumb, id=self.id, in_place=False).reshape(self.shg)
         m[m < 0] = 0
         self.opt_profile = m/m.sum(axis=0)
         
     num = self.opt_profile*data*self.ivar.reshape(self.shg)
     den = self.opt_profile**2*self.ivar.reshape(self.shg)
     opt = num.sum(axis=0)/den.sum(axis=0)
     opt_var = 1./den.sum(axis=0)
     
     if bin > 0:
         kern = np.ones(bin, dtype=float)/bin
         opt = nd.convolve(opt, kern)[bin/2::bin]
         opt_var = nd.convolve(opt_var, kern**2)[bin/2::bin]
         wave = wave[bin/2::bin]
         
     opt_rms = np.sqrt(opt_var)
     opt_rms[opt_var == 0] = 0
     
     return wave, opt, opt_rms
     
     
开发者ID:gbrammer,项目名称:wfc3,代码行数:25,代码来源:model.py


示例5: compressed_wave_retrieval

def compressed_wave_retrieval(data,ARGS):
  window_f = np.hamming

  kx,ky = find_carrier(data)
  nx,ny = data.shape
  X,Y = np.meshgrid(np.arange(nx),np.arange(ny))

  G = find_gaussian(data,kx,ky)
#  print float(ik[1])/256
#  print np.around(float(ik[1])/ax) - float(ik[1])/ax
  a = [11./256.,12./256.]
  G_kernel = ndi.zoom(G,a)
#  phplot.dBshow(G_kernel)
  g_kernel = np.fft.ifft2(G_kernel)
  wzoom = np.sqrt(np.outer(window_f(g_kernel.shape[0]),window_f(g_kernel.shape[1])))
  g_kernel *= wzoom
#  print g_kernel.shape
#  phplot.imageshow(g_kernel.real)
#  nx,ny = data.shape
#  X,Y = np.meshgrid(np.arange(nx),np.arange(ny))
  i = (ndi.convolve(data,g_kernel.real) + 1j*ndi.convolve(data,g_kernel.imag))
#  phplot.dBshow(np.fft.fft2(i))
  r = plane_wave(X,Y,kx,ky)
  o = i*r
  ph = np.arctan2(o.imag,o.real)
#  phplot.imageshow(p)
  return ph
开发者ID:taylo589,项目名称:phproc,代码行数:27,代码来源:phretrieve.py


示例6: smooth

    def smooth(self,sigma,compute_var=False,summed=False):

        sigma /= 1.5095921854516636        
        sigma /= np.abs(self._axes[0]._delta)
        
        from scipy import ndimage
        im = SkyImage(copy.deepcopy(self.wcs),
                      copy.deepcopy(self.axes()),
                      copy.deepcopy(self._counts),
                      self.roi_radius,
                      copy.deepcopy(self._roi_msk))

        # Construct a kernel
        nk =41
        fn = lambda t, s: 1./(2*np.pi*s**2)*np.exp(-t**2/(s**2*2.0))
        b = np.abs(np.linspace(0,nk-1,nk) - (nk-1)/2.)
        k = np.zeros((nk,nk)) + np.sqrt(b[np.newaxis,:]**2 +
                                        b[:,np.newaxis]**2)
        k = fn(k,sigma)
        k /= np.sum(k)

        im._counts = ndimage.convolve(self._counts,k,mode='nearest')
        
#        im._counts = ndimage.gaussian_filter(self._counts, sigma=sigma,
#                                             mode='nearest')

        if compute_var:
            var = ndimage.convolve(self._counts, k**2, mode='wrap')
            im._var = var
        else:
            im._var = np.zeros(im._counts.shape)
            
        if summed: im /= np.sum(k**2)
            
        return im
开发者ID:woodmd,项目名称:gammatools,代码行数:35,代码来源:fits_image.py


示例7: skeletonize_mitochondria

def skeletonize_mitochondria(mch_channel):
    mch_collector = np.max(mch_channel, axis=0)  # TODO: check max projection v.s. sum
    skeleton_labels = np.zeros(mch_collector.shape, dtype=np.uint8)

    # thresh = np.max(mch_collector)/2.
    thresh = threshold_otsu(mch_collector)
    # use adaptative threshold? => otsu seems to be sufficient in this case

    skeleton_labels[mch_collector > thresh] = 1
    skeleton2 = skeletonize(skeleton_labels)
    skeleton, distance = medial_axis(skeleton_labels, return_distance=True)
    active_threshold = np.mean(mch_collector[skeleton_labels]) * 5

    # print active_threshold
    transform_filter = np.zeros(mch_collector.shape, dtype=np.uint8)
    transform_filter[np.logical_and(skeleton > 0, mch_collector > active_threshold)] = 1
    skeleton = transform_filter * distance

    skeleton_ma = np.ma.masked_array(skeleton, skeleton > 0)
    skeleton_convolve = ndi.convolve(skeleton_ma, np.ones((3, 3)), mode='constant', cval=0.0)
    divider_convolve = ndi.convolve(transform_filter, np.ones((3, 3)), mode='constant', cval=0.0)
    skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] / \
                                              divider_convolve[divider_convolve > 0]
    new_skeleton = np.zeros_like(skeleton)
    new_skeleton[skeleton2] = skeleton_convolve[skeleton2]
    skeleton = new_skeleton

    return skeleton_labels, mch_collector, skeleton, transform_filter
开发者ID:chiffa,项目名称:Chromo_vision,代码行数:28,代码来源:layered_zstack_processing.py


示例8: vesiclerf_feats

def vesiclerf_feats(em):
    # return value
    xt = []
    num_features = 2

    # Kernels
    B0 = np.ones([5, 5, 1]) / (5 * 5 * 1)
    B1 = np.ones([15, 15, 3]) / (15 * 15 * 3)
    B2 = np.ones([25, 25, 5]) / (25 * 25 * 5)

    ### Intensity Feats ###
    # find weighted average of features
    I0 = ndimage.convolve(em, B0, mode="constant")
    I2 = ndimage.convolve(em, B1, mode="constant")

    # reshape data
    # I0 = [np.reshape(I0,(I0.size,1)).tolist(), num_features]
    # I2 = [np.reshape(I2,(I2.size,1)).tolist(), num_features]
    # I0 = np.reshape(I0,(I0.size,1))
    I2 = np.reshape(I2, (I2.size, 1))
    xt = I2
    # xt.append(I0)
    # xt.append(I2)

    return xt
开发者ID:Connectomics-Classes,项目名称:team-awesome,代码行数:25,代码来源:vesiclerf_feats.py


示例9: update

 def update(self, t_end, sink, source):
     """ Solves the system over using the predetermined time step dt
         until the end time of the simulation is reached.
         t_end - the end time to solve the system towards
     """
     t = 0
     epsilon = 1E-10
     diff = epsilon  * 2
     zeros = np.zeros(self.Ci.shape)
     while(t <= t_end and diff >= epsilon):
         #solve for the gradients in each direction
         l_x = ndimage.convolve(self.Ci, self._lx, mode = "constant",
                                cval = self._c_out)
         l_y = ndimage.convolve(self.Ci, self._ly, mode = "constant",
                                cval = self._c_out)
         l_z = ndimage.convolve(self.Ci, self._lz, mode = "constant",
                                cval = self._c_out)
         #first diffusion
         self.C = self.Ci + (l_x + l_y + l_z)*self._D*self.dt
         #MUST BE normalized by unit VOLUME
         temp_sink = (-sink*self.dt) / self._grid_vol
         temp_source = source*self.dt / self._grid_vol
         self.C += temp_sink + temp_source
         #get the summed difference
         diff = np.sum(np.abs(self.Ci - self.C))
         #make sure its positive
         self.C = self.C * (self.C > 0.0)
         #update the old
         self.Ci = self.C
         #update the time step
         t += self.dt
开发者ID:dmarcbriers,项目名称:Cell_Model,代码行数:31,代码来源:Gradient.py


示例10: skeletonize_mitochondria

def skeletonize_mitochondria(mCh_channel):

    mch_collector = np.max(mCh_channel, axis=0)  # TODO: check how max affects v.s. sum
    labels = np.zeros(mch_collector.shape, dtype=np.uint8)

    # thresh = np.max(mch_collector)/2.
    thresh = threshold_otsu(mch_collector)
    # TODO: use adaptative threshold? => otsu seems to be sufficient in this case
    # http://scikit-image.org/docs/dev/auto_examples/xx_applications/plot_thresholding.html#sphx
    # -glr-auto-examples-xx-applications-plot-thresholding-py
    #  log-transform? => Nope, does not work
    # TODO: hessian/laplacian of gaussian blob detection?

    labels[mch_collector > thresh] = 1
    skeleton2 = skeletonize(labels)
    skeleton, distance = medial_axis(labels, return_distance=True)
    active_threshold = np.mean(mch_collector[labels]) * 5

    # print active_threshold
    transform_filter = np.zeros(mch_collector.shape, dtype=np.uint8)
    transform_filter[np.logical_and(skeleton > 0, mch_collector > active_threshold)] = 1
    skeleton = transform_filter * distance

    skeleton_ma = np.ma.masked_array(skeleton, skeleton > 0)
    skeleton_convolve = ndi.convolve(skeleton_ma, np.ones((3, 3)), mode='constant', cval=0.0)
    divider_convolve = ndi.convolve(transform_filter, np.ones((3, 3)), mode='constant', cval=0.0)
    skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] \
                                              / divider_convolve[divider_convolve > 0]
    new_skeleton = np.zeros_like(skeleton)
    new_skeleton[skeleton2] = skeleton_convolve[skeleton2]
    skeleton = new_skeleton

    return labels, mch_collector, skeleton, transform_filter
开发者ID:chiffa,项目名称:Chromo_vision,代码行数:33,代码来源:Linhao_masks_logic.py


示例11: con

        def con(k):
            c = convolve(convolve(fimage, k, mode='nearest'),
                         k,
                         mode='nearest')
            c = (c <= (0 + bias)) & ~lowerbound

            return cv2.morphologyEx(c.astype(np.uint8), cv2.MORPH_OPEN, np.ones((self.msize,self.msize),np.uint8))
开发者ID:Leucipp-us,项目名称:Leucippus,代码行数:7,代码来源:OptiAtomDetector.py


示例12: compute_lima_on_off_image

def compute_lima_on_off_image(n_on, n_off, a_on, a_off, kernel, exposure=None):
    """
    Compute Li&Ma significance and flux images for on-off observations.

    Parameters
    ----------
    n_on : `~numpy.ndarray`
        Counts image
    n_off : `~numpy.ndarray`
        Off counts image
    a_on : `~numpy.ndarray`
        Relative background efficiency in the on region
    a_off : `~numpy.ndarray`
        Relative background efficiency in the off region
    kernel : `astropy.convolution.Kernel2D`
        Convolution kernel
    exposure : `~numpy.ndarray`
        Exposure image

    Returns
    -------
    images : `~gammapy.image.SkyImageList`
        Results images container

    See also
    --------
    gammapy.stats.significance_on_off
    """
    from scipy.ndimage import convolve

    # Kernel is modified later make a copy here
    kernel = deepcopy(kernel)

    if not kernel.is_bool:
        log.warn('Using weighted kernels can lead to biased results.')

    kernel.normalize('peak')
    conv_opt = dict(mode='constant', cval=np.nan)

    n_on_conv = convolve(n_on, kernel.array, **conv_opt)
    a_on_conv = convolve(a_on, kernel.array, **conv_opt)
    alpha_conv = a_on_conv / a_off
    background_conv = alpha_conv * n_off
    excess_conv = n_on_conv - background_conv
    significance_conv = significance_on_off(n_on_conv, n_off, alpha_conv, method='lima')

    images = SkyImageList([
        SkyImage(name='significance', data=significance_conv),
        SkyImage(name='n_on', data=n_on_conv),
        SkyImage(name='background', data=background_conv),
        SkyImage(name='excess', data=excess_conv),
        SkyImage(name='alpha', data=alpha_conv),
    ])

    # TODO: should we be doing this here?
    # Wouldn't it be better to let users decide if they want this,
    # and have it easily accessible as an attribute or method?
    _add_other_images(images, exposure, kernel, conv_opt)

    return images
开发者ID:dltiziani,项目名称:gammapy,代码行数:60,代码来源:lima.py


示例13: filter

def filter(data,filtType,par):

    if   filtType == "sobel":       filt_data = sobel(data)
    elif filtType == "roberts":     filt_data = roberts(data)
    elif filtType == "canny":       filt_data = canny(data)
    elif filtType == "lowpass_avg":
        from scipy import ndimage
        p=int(par)
        kernel = np.ones((p,p),np.float32)/(p*p)
        filt_data = ndimage.convolve(data, kernel)
    elif filtType == "highpass_avg":
        from scipy import ndimage
        p=int(par)
        kernel = np.ones((p,p),np.float32)/(p*p)
        lp_data = ndimage.convolve(data, kernel)
        filt_data = data - lp_data
    elif filtType == "lowpass_gaussian":
        filt_data = gaussian(data, sigma=float(par))
    elif filtType == "highpass_gaussian":
        lp_data   = gaussian(data, sigma=float(par))
        filt_data = data - lp_data

    #elif filtType ==  "gradient":
       
    return filt_data
开发者ID:yunjunz,项目名称:PySAR,代码行数:25,代码来源:filter_spatial.py


示例14: run

    def run(self, inputs, run_id):
        pstore = self.pstore(run_id)
        
        arr = inputs[0]
        kernel = inputs[1]

        ar, ac = arr.shape
        kr, kc = kernel.shape[0]/2, kernel.shape[1]/2

        start = time.time()
        if pstore.uses_mode(Mode.FULL_MAPFUNC):
            pstore.set_fanins([1,reduce(mul, kernel.shape)])
            pstore.set_inareas([1,reduce(mul, kernel.shape)])
            pstore.set_outarea(1)
            pstore.set_ncalls(reduce(mul, arr.shape))
            pstore.set_noutcells(reduce(mul, arr.shape))

        if pstore.uses_mode(Mode.PTR):
            for rid in xrange(ar):
                for cid in xrange(ac):
                    minr, maxr = (max(0,rid - kr), min(ar, rid + kr+1))
                    minc, maxc = (max(0,cid - kc), min(ac, cid + kc+1))
                    prov0 = [(px, py) for px in xrange(minr, maxr) for py in xrange(minc, maxc)]
                    prov1 = [(kx, ky) for kx in range(maxr-minr) for ky in xrange(maxc-minc)]
                    pstore.write(((rid, cid),), prov0, prov1)

        if pstore.uses_mode(Mode.PT_MAPFUNC):
            for x in xrange(ar):
                for y in xrange(ac):
                    pstore.write(((x,y),), '')
        end = time.time()

        output = np.empty(arr.shape, float)
        ndimage.convolve(arr, kernel, output=output, mode='constant', cval=0.0)
        return output, {'provoverhead' : end - start}
开发者ID:sirrice,项目名称:pstore,代码行数:35,代码来源:lsst.py


示例15: nudge_dataset

def nudge_dataset(X, y):
    """
    This produces a dataset 8 times bigger than the original one,
    by moving the 8x8 images in X around by 8 directions
    """
    direction_vectors = [
        [[0, 1, 0],
         [0, 0, 0],
         [0, 0, 0]],

        [[0, 0, 0],
         [1, 0, 0],
         [0, 0, 0]],

        [[0, 0, 0],
         [0, 0, 1],
         [0, 0, 0]],

        [[0, 0, 0],
         [0, 0, 0],
         [0, 1, 0]],

        [[1, 0, 0],
         [0, 0, 0],
         [0, 0, 0]],

        [[0, 0, 1],
         [0, 0, 0],
         [0, 0, 0]],

        [[0, 0, 0],
         [0, 0, 0],
         [1, 0, 0]],

        [[0, 0, 0],
         [0, 0, 0],
         [0, 0, 1]]
    ]

    new_images = []
    for vectors in direction_vectors:
        new_images.append(convolve(X[0].reshape((28, 28)), vectors, mode='constant'))
    new_images.append(X[0].reshape((28, 28)))
    f, axarr = plt.subplots(3, 3)
    for i in range(3):
        for j in range(3):
            axarr[i, j].imshow(new_images[3 * i + j], cmap='gray')

    plt.show()

    shift = lambda x, w: convolve(x.reshape((28, 28)), mode='constant',
                                  weights=w).ravel()
    X = np.concatenate([X] +
                       [np.apply_along_axis(shift, 1, X, vector)
                        for vector in direction_vectors])
    print X.shape
    y = np.concatenate([y for _ in range(len(direction_vectors) + 1)], axis=0)
    print y.shape
    return X, y
开发者ID:ktk1012,项目名称:mlexamples,代码行数:59,代码来源:mnist_test.py


示例16: compute_correlated_maps

    def compute_correlated_maps(self, kernel):
        """Compute significance image for a given kernel.
        """
        self.counts_corr = convolve(self.counts, kernel)
        self.background_corr = convolve(self.background, kernel)
        self.significance = significance(self.counts_corr, self.background_corr)

        return self
开发者ID:ellisowen,项目名称:gammapy-extra,代码行数:8,代码来源:source_diffuse_estimation.py


示例17: convolve_raw

    def convolve_raw(self, frame, frequency=0.5, theta=0):

        kernel = gabor_kernel(frequency, theta=theta)

        real = ndi.convolve(image, np.real(kernel), mode='wrap')
        imag = ndi.convolve(image, np.imag(kernel), mode='wrap')

        return real, imag
开发者ID:florianletsch,项目名称:gesture-wavelets,代码行数:8,代码来源:shape.py


示例18: apply_robert

def apply_robert(img):

	Dx  = array([[1,0],[0,-1]])
	Dy  = array([[0,1],[-1,0]])
	imx = ndimage.convolve(img,Dx)
	imy = ndimage.convolve(img,Dy)
	grad = sqrt(imx**2+imy**2)
	return imx,imy,grad
开发者ID:pockerman,项目名称:codes,代码行数:8,代码来源:discrete_operators.py


示例19: compute_lima_on_off_map

def compute_lima_on_off_map(n_on, n_off, a_on, a_off, kernel, exposure=None):
    """
    Compute Li&Ma significance and flux maps for on-off observations.

    Parameters
    ----------
    n_on : `~numpy.ndarray`
        Counts map.
    n_off : `~numpy.ndarray`
        Off counts map.
    a_on : `~numpy.ndarray`
        Relative background efficiency in the on region
    a_off : `~numpy.ndarray`
        Relative background efficiency in the off region
    kernel : `astropy.convolution.Kernel2D`
        convolution kernel.
    exposure : `~numpy.ndarray`
        Exposure map.

    Returns
    -------
    SkyImageCollection : `~gammapy.image.SkyImageCollection`
        Bunch of result maps.

    See also
    --------
    gammapy.stats.significance_on_off

    """
    from scipy.ndimage import convolve

    # Kernel is modified later make a copy here
    kernel = deepcopy(kernel)

    if not kernel.is_bool:
        log.warn('Using weighted kernels can lead to biased results.')

    kernel.normalize('peak')
    n_on_ = convolve(n_on, kernel.array, mode='constant', cval=np.nan)
    a_ = convolve(a_on, kernel.array, mode='constant', cval=np.nan)
    alpha = a_ / a_off
    background = alpha * n_off

    significance_lima = significance_on_off(n_on_, n_off, alpha, method='lima')

    result = SkyImageCollection(significance=significance_lima,
                                n_on=n_on_,
                                background=background,
                                excess=n_on_ - background,
                                alpha=alpha)

    if not exposure is None:
        kernel.normalize('integral')
        exposure_ = convolve(exposure, kernel.array, mode='constant', cval=np.nan)
        flux = (n_on_ - background_) / exposure_
        result.flux = flux

    return result
开发者ID:OlgaVorokh,项目名称:gammapy,代码行数:58,代码来源:lima.py


示例20: coherence

 def coherence(self, kernel):
     numerator = convolve(self.power(), kernel)
     
     denominator = (convolve(abs(self.wave[:,:,0])**2, kernel)
                 * convolve(abs(self.wave[:,:,1])**2, kernel))**0.5,
     #denominator = convolve((abs(self.wave[:,:,0])**2 * abs(self.wave[:,:,1])**2)**0.5,
     #                kernel)
                            
     return (numerator / denominator).reshape((self.scales.size, self.series.shape[0]))
开发者ID:ElOceanografo,项目名称:PyCWT,代码行数:9,代码来源:pycwt.py



注:本文中的scipy.ndimage.convolve函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python ndimage.convolve1d函数代码示例发布时间:2022-05-27
下一篇:
Python ndimage.center_of_mass函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap