• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python pywt.wavedec2函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pywt.wavedec2函数的典型用法代码示例。如果您正苦于以下问题:Python wavedec2函数的具体用法?Python wavedec2怎么用?Python wavedec2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了wavedec2函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: blend_images

def blend_images(base, texture, level=4, mode='sp1', base_gain=None, texture_gain=None):
    base_data = image2array(base)
    texture_data = image2array(texture)
    output_data = []

    for base_band, texture_band in zip(base_data, texture_data):
        base_band_coeffs = pywt.wavedec2(base_band, 'db2', mode, level)
        texture_band_coeffs = pywt.wavedec2(texture_band, 'db2', mode, level)

        output_band_coeffs = [base_band_coeffs[0]]
        del base_band_coeffs[0], texture_band_coeffs[0]

        for n, (base_band_details, texture_band_details) in enumerate(
            zip(base_band_coeffs, texture_band_coeffs)):
            blended_details = []
            for (base_detail, texture_detail) in zip(base_band_details, texture_band_details):
                if base_gain is not None:
                    base_detail *= base_gain
                if texture_gain is not None:
                    texture_detail *= texture_gain

                blended = numpy.where(abs(base_detail) > abs(texture_detail), base_detail, texture_detail)
                blended_details.append(blended)

            base_band_coeffs[n] = texture_band_coeffs[n] = None
            output_band_coeffs.append(blended_details)

        new_band = pywt.waverec2(output_band_coeffs, 'db2', mode)
        output_data.append(new_band)
        del new_band, base_band_coeffs, texture_band_coeffs

    del base_data, texture_data
    output_data = numpy.array(output_data)
    return array2image(output_data, base.mode)
开发者ID:ramonesteban,项目名称:vision-computacional,代码行数:34,代码来源:wavelet_blend.py


示例2: imageWT

def imageWT(image, scaleLevel):
	
	r = pywt.wavedec2(image[0], 'Haar', level=scaleLevel)[0]
	g = pywt.wavedec2(image[1], 'Haar', level=scaleLevel)[0]
	b = pywt.wavedec2(image[2], 'Haar', level=scaleLevel)[0]

	x = np.sqrt(image.shape[1]*image.shape[2]/(r.shape[0]*r.shape[1]))	
	waveImg = np.array([r,g,b]/x).astype(int)
	
	return waveImg
开发者ID:thegratefuldawg0520,项目名称:DIAToolbox,代码行数:10,代码来源:DIAToolbox.py


示例3: dwt2

def dwt2(image, wavelet, mode, level):
    signal = np.asarray(image)
    if signal.ndim == 2:
        r = pack_wave_coeff(pywt.wavedec2(signal, wavelet, mode, level))
        return r
    elif signal.ndim == 3:
        r, g, b = wv.splitRGB(image)
        rw = pack_wave_coeff(pywt.wavedec2(r, wavelet, mode, level))
        gw = pack_wave_coeff(pywt.wavedec2(g, wavelet, mode, level))
        bw = pack_wave_coeff(pywt.wavedec2(b, wavelet, mode, level))
        return (rw, gw, bw)
开发者ID:zenathark,项目名称:VideoBWT,代码行数:11,代码来源:dwt.py


示例4: multiwavelet_from_rgb

def multiwavelet_from_rgb(rgb):
    from scipy.fftpack import dct
    from pywt import wavedec2

    r = rgb[:, :, 0].astype(np.float)
    g = rgb[:, :, 1].astype(np.float)

    dctr = dct(r, norm='ortho').ravel()
    dctg = dct(g, norm='ortho').ravel()
    daubr = _unpack(wavedec2(r, 'db4'))
    daubg = _unpack(wavedec2(g, 'db4'))
    return np.hstack([dctr, dctg, daubr, daubg])
开发者ID:ChrisBeaumont,项目名称:brut,代码行数:12,代码来源:util.py


示例5: ReduceDimension

def ReduceDimension(X = np.zeros([2,2])):
    r, c = X.shape
    image = X[0,:].reshape([385,576])
    coeffs = pywt.wavedec2(image,'db1', level=4)
    cA4, (cH4, cV4, cD4), (cH3, cV3, cD3),(cH2, cV2, cD2),(cH1, cV1, cD1) = coeffs
    nr,nc = cA4.shape
    rX = np.zeros([r,nc*nr], dtype=np.float32)
    for i in range(r):
        image = X[i,:].reshape([385,576])
        coeffs = pywt.wavedec2(image,'db1', level=4)
        cA4, (cH4, cV4, cD4), (cH3, cV3, cD3),(cH2, cV2, cD2),(cH1, cV1, cD1) = coeffs
        rX[i,:] = cV4.flatten()
    return rX
开发者ID:mercaderd,项目名称:DataMining,代码行数:13,代码来源:DataMining.py


示例6: func

 def func(dframe):
     frame1, frame2 = dframe[0], dframe[1]
     frame1 = np.array(frame1)
     frame2 = np.array(frame2)
     C = pywt.wavedec2(frame1, 'db4', level=level)
     S = pywt.wavedec2(frame2, 'db4', level=level)
     tA2 = (C[0] + S[0])/2
     coeffs = fuse(tA2, C[1:], S[1:])
     fuse_img = pywt.waverec2(coeffs, 'db4')
     if frame1.dtype == np.uint16:
         fuse_img = fuse_img.clip(0,65535).astype(np.uint16)
     elif frame1.dtype == np.uint8:
         fuse_img = fuse_img.clip(0,255).astype(np.uint8)
     return np.squeeze(fuse_img)
开发者ID:genialwang,项目名称:lambda-image,代码行数:14,代码来源:fusion.py


示例7: matvec

 def matvec(x):
     xnd = x.reshape(shapein)
     yl = pywt.wavedec2(xnd, wavelet, mode=mode, level=level)
     y = yl[0].flatten()
     for el in yl[1:]:
         y = np.concatenate((y, np.concatenate(el).flatten()))
     return y
开发者ID:esoubrie,项目名称:lo,代码行数:7,代码来源:pywt_lo.py


示例8: denoise

def denoise():
    wave = 'db4'
    sig = 20
    tau1 = 3*sig
    tau2 = 3*sig/2
    noisyLena = lena + np.random.normal(scale = sig, size=lena.shape)
    lw = pywt.wavedec2(noisyLena, wave, level=4)
    lwt1 = hardThresh(lw, tau1)
    lwt2 = softThresh(lw, tau2)
    rlena1 = pywt.waverec2(lwt1, wave)
    rlena2 = pywt.waverec2(lwt2, wave)
    plt.subplot(131)
    plt.imshow(noisyLena, cmap=plt.cm.Greys_r)
    plt.axis('off')
    
    plt.subplot(132)
    plt.imshow(rlena1, cmap=plt.cm.Greys_r)
    plt.axis('off')
    
    plt.subplot(133)
    plt.imshow(rlena2, cmap=plt.cm.Greys_r)
    plt.axis('off')
    
    plt.savefig('denoise.pdf')
    plt.clf()
开发者ID:byuimpactrevisions,项目名称:numerical_computing,代码行数:25,代码来源:plotFigs.py


示例9: _get_haar_feature

def _get_haar_feature(filename):
    data = misc.imread(filename)
    data = misc.imresize(data, (64, 64))
    #data.resize(64, 64)

    feature_layers = np.zeros((32, 32, 3), dtype=np.float32)
    additional = np.empty(4)

    for index in range(3):
        layer = data[:, :, index]
        layer = np.float32(layer)
        additional[index] = layer.mean()
        #layer /= 255.0
        #print(layer.min(), layer.max(), layer.mean())
        #print(layer[:1])

        haar = pywt.wavedec2(data=layer, wavelet='haar', level=1)
        cA = haar[0]
        feature_layers[:, :, index] = cA

    height, width, _ = data.shape
    aspect = float(width)/(width+height)
    additional[-1] = aspect
    features = np.concatenate((feature_layers.reshape(32*32*3), additional))
    return features
开发者ID:balta2ar,项目名称:insave,代码行数:25,代码来源:features.py


示例10: _call

    def _call(self, x):
        """Compute the discrete wavelet transform.

        Parameters
        ----------
        x : `DiscreteLpVector`

        Returns
        -------
        arr : `numpy.ndarray`
            Flattened and concatenated coefficient array
            The length of the array depends on the size of input image to
            be transformed and on the chosen wavelet basis.
        """
        if x.space.ndim == 1:
            coeff_list = pywt.wavedec(x, self.wbasis, self.mode, self.nscales)
            coeff_arr = pywt_coeff_to_array(coeff_list, self.size_list)
            return self.range.element(coeff_arr)

        if x.space.ndim == 2:
            coeff_list = pywt.wavedec2(x, self.wbasis, self.mode, self.nscales)
            coeff_arr = pywt_coeff_to_array(coeff_list, self.size_list)
            return self.range.element(coeff_arr)

        if x.space.ndim == 3:
            coeff_dict = wavelet_decomposition3d(x, self.wbasis, self.mode,
                                                 self.nscales)
            coeff_arr = pywt_coeff_to_array(coeff_dict, self.size_list)

            return self.range.element(coeff_arr)
开发者ID:NikEfth,项目名称:odl,代码行数:30,代码来源:wavelet.py


示例11: munchetal_filter

    def munchetal_filter(im, wlevel, sigma, wname='db15'):
        # Wavelet decomposition:
        coeffs = pywt.wavedec2(im.astype(np.float32), wname, level=wlevel)
        coeffsFlt = [coeffs[0]]
        # FFT transform of horizontal frequency bands:
        for i in range(1, wlevel + 1):
            # FFT:
            fcV = np.fft.fftshift(np.fft.fft(coeffs[i][1], axis=0))
            my, mx = fcV.shape
            # Damping of vertical stripes:
            damp = 1 - np.exp(-(np.arange(-np.floor(my / 2.), -np.floor(my / 2.) + my) ** 2) / (2 * (sigma ** 2)))
            dampprime = np.kron(np.ones((1, mx)), damp.reshape((damp.shape[0], 1)))
            fcV = fcV * dampprime
            # Inverse FFT:
            fcVflt = np.real(np.fft.ifft(np.fft.ifftshift(fcV), axis=0))
            cVHDtup = (coeffs[i][0], fcVflt, coeffs[i][2])
            coeffsFlt.append(cVHDtup)

        # Get wavelet reconstruction:
        im_f = np.real(pywt.waverec2(coeffsFlt, wname))
        # Return image according to input type:
        if (im.dtype == 'uint16'):
            # Check extrema for uint16 images:
            im_f[im_f < np.iinfo(np.uint16).min] = np.iinfo(np.uint16).min
            im_f[im_f > np.iinfo(np.uint16).max] = np.iinfo(np.uint16).max
            # Return filtered image (an additional row and/or column might be present):
            return im_f[0:im.shape[0], 0:im.shape[1]].astype(np.uint16)
        else:
            return im_f[0:im.shape[0], 0:im.shape[1]]
开发者ID:pierrepaleo,项目名称:portal,代码行数:29,代码来源:rings.py


示例12: blur_feature_tong_etal

def blur_feature_tong_etal(img, thresh=35, MinZero=0.05):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img = resize_borders_to_multiple_of(img, 8)
    w = pywt.wavedec2(img, 'haar', level=3)

    emap = [np.sqrt(w[i][0]**2 + w[i][1]**2 + w[i][2]**2) for i in range(1, len(w))]
    window_size_map = [2, 4, 8]
    emax = [np.zeros((e.shape[0]/s, e.shape[1]/s)) for e, s in zip(emap, window_size_map)]

    for e, s, m in zip(emap, window_size_map, emax):
        for y in range(0, e.shape[0]/s):
            for x in range(0, e.shape[1]/s):
                ep = e[y*s:y*s+s,x*s:x*s+s]
                m[y,x] = np.amax(ep)

    r1 = edge_point = np.logical_or(emax[0] > thresh, np.logical_or(emax[1] > thresh, emax[2] > thresh))
    r2 = ds_or_as = np.logical_and(edge_point, np.logical_and(emax[0] > emax[1], emax[1] > emax[2]))
    r3 = rs_or_gs = np.logical_and(edge_point, np.logical_and(emax[0] < emax[1], emax[1] < emax[2]))
    r4 = rs = np.logical_and(edge_point, np.logical_and(emax[1] > emax[0], emax[1] > emax[2]))
    r5 = more_likely = np.logical_and(np.logical_or(rs_or_gs, rs), emax[0] < thresh)

    N_edge = np.count_nonzero(r1)
    N_da = np.count_nonzero(r2)
    N_rg = np.count_nonzero(np.logical_or(r3, r4))
    N_brg = np.count_nonzero(r5)
    Per = float(N_da)/float(N_edge)
    unblured = Per > MinZero

    # if N_rg is 0 then the image must be really blurry
    if N_rg == 0:
        BlurExtent = 1
    else:
        BlurExtent = float(N_brg)/float(N_rg)

    return BlurExtent
开发者ID:szakrewsky,项目名称:quality-feature-extraction,代码行数:35,代码来源:blind_features.py


示例13: w2d

def w2d(img, mode='haar', level=1):
    imArray = cv2.imread(img)
    #Datatype conversions
    #convert to grayscale
    imArray = cv2.cvtColor( imArray,cv2.COLOR_RGB2GRAY )
    #convert to float
    imArray =  np.float32(imArray)
    imArray /= 255.;
    # compute coefficients
    coeffs=pywt.wavedec2(imArray, mode, level=level)

    #print len(coeffs)

    #Process Coefficients
    coeffs_H=list(coeffs[1][0])
    coeffs_H *= 0
    coeffs[1][0] = coeffs_H

    # reconstruction
    imArray_H=pywt.waverec2(coeffs, mode);
    imArray_H *= 255.;
    imArray_H =  np.uint8(imArray_H)
    #Display result
    cv2.imshow('image',imArray_H)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
开发者ID:jonathanlurie,项目名称:pythonStuff,代码行数:26,代码来源:test01.py


示例14: create_haar_dictionary

def create_haar_dictionary(p=8):
    import pywt
    c = pywt.wavedec2(np.zeros((p, p)), 'haar')
    D = []
    for k in range(1, len(c)):
        for i in range(3):
            ck = c[k][i]
            l = ck.shape[0]
            for j in range(l):
                for m in range(l):
                    ck[j, m] = 1
                    D += [pywt.waverec2(c, 'haar')]
                    ck[j, m] = 0
    ck = c[0]
    l = ck.shape[0]
    for j in range(l):
        for m in range(l):
            ck[j, m] = 1
            D += [pywt.waverec2(c, 'haar')]
            ck[j, m] = 0
    D = np.array(D).reshape(-1, p*p)

    Dn = []
    for i in range(15):
        Dn += _translate(D[i].reshape((p, p)))
    Dn = np.array(Dn).reshape((-1, p*p))
    i0 = np.sum(abs(Dn), axis=1) != 0
    return Dn[i0]
开发者ID:tomMoral,项目名称:AdaptiveOptim,代码行数:28,代码来源:dictionaries.py


示例15: idwt2

    def idwt2(self):
        """
        Test pypwt for DWT reconstruction (waverec2).
        """

        W = self.W
        levels = self.levels
        # inverse DWT with pypwt
        W.forward()
        logging.info("computing Wavelets.inverse from pypwt")
        t0 = time()
        W.inverse()
        logging.info("Wavelets.inverse took %.3f ms" % elapsed_ms(t0))

        if self.do_pywt:
            # inverse DWT with pywt
            Wpy = pywt.wavedec2(self.data, self.wname, mode=per_kw, level=levels)
            logging.info("computing waverec2 from pywt")
            _ = pywt.waverec2(Wpy, self.wname, mode=per_kw)
            logging.info("pywt took %.3f ms" % elapsed_ms(t0))

        # Check reconstruction
        W_image = W.image
        maxerr = _calc_errors(self.data, W_image, "[rec]")
        self.assertTrue(maxerr < self.tol, msg="[%s] something wrong with the reconstruction (errmax = %e)" % (self.wname, maxerr))
开发者ID:pierrepaleo,项目名称:pypwt,代码行数:25,代码来源:test_wavelets.py


示例16: waveletDenoise

def waveletDenoise(u,noiseSigma):
    wavelet = pywt.Wavelet('bior6.8')
    levels  = int( np.log2(u.shape[0]) )
    waveletCoeffs = pywt.wavedec2( u, wavelet, level=levels)
    threshold=noiseSigma*np.sqrt(2*np.log2(u.size))
    NWC = [pywt.thresholding.soft(x,threshold) for x in waveletCoeffs]
    u = pywt.waverec2( NWC, wavelet)[:u.shape[0],:u.shape[1]]
    return u
开发者ID:chripell,项目名称:yaaca,代码行数:8,代码来源:astrolib.py


示例17: blend_images

def blend_images(base, texture, wavelet, level, mode='smooth', base_gain=None,
                 texture_gain=None):
    """Blend loaded images at `level` of granularity using `wavelet`"""

    base_data = image2array(base)
    texture_data = image2array(texture)
    output_data = []

    # process color bands
    for base_band, texture_band in zip(base_data, texture_data):
        # multilevel dwt
        base_band_coeffs = pywt.wavedec2(base_band, wavelet, mode, level)
        texture_band_coeffs = pywt.wavedec2(texture_band, wavelet, mode, level)

        # average coefficients of base image
        output_band_coeffs = [base_band_coeffs[0]]  # cA
        del base_band_coeffs[0], texture_band_coeffs[0]

        # blend details coefficients
        for n, (base_band_details, texture_band_details) in enumerate(
                zip(base_band_coeffs, texture_band_coeffs)):
            blended_details = []
            for (base_detail, texture_detail) in zip(base_band_details,
                                                     texture_band_details):
                if base_gain is not None:
                    base_detail *= base_gain
                if texture_gain is not None:
                    texture_detail *= texture_gain

                # select coeffs with greater energy
                blended = numpy.where(abs(base_detail) > abs(texture_detail),
                                      base_detail, texture_detail)
                blended_details.append(blended)

            base_band_coeffs[n] = texture_band_coeffs[n] = None
            output_band_coeffs.append(blended_details)

        # multilevel idwt
        new_band = pywt.waverec2(output_band_coeffs, wavelet, mode)
        output_data.append(new_band)
        del new_band, base_band_coeffs, texture_band_coeffs

    del base_data, texture_data
    output_data = numpy.array(output_data)

    return array2image(output_data, base.mode)
开发者ID:HenryZhou1002,项目名称:pywt,代码行数:46,代码来源:image_blender.py


示例18: whash

def whash(image, hash_size = 8, image_scale = None, mode = 'haar', remove_max_haar_ll = True):
	"""
	Wavelet Hash computation.
	
	based on https://www.kaggle.com/c/avito-duplicate-ads-detection/

	@image must be a PIL instance.
	@hash_size must be a power of 2 and less than @image_scale.
	@image_scale must be power of 2 and less than image size. By default is equal to max
		power of 2 for an input image.
	@mode (see modes in pywt library):
		'haar' - Haar wavelets, by default
		'db4' - Daubechies wavelets
	@remove_max_haar_ll - remove the lowest low level (LL) frequency using Haar wavelet.
	"""
	import pywt
	if image_scale is not None:
		assert image_scale & (image_scale - 1) == 0, "image_scale is not power of 2"
	else:
		image_scale = 2**int(numpy.log2(min(image.size)))
	ll_max_level = int(numpy.log2(image_scale))

	level = int(numpy.log2(hash_size))
	assert hash_size & (hash_size-1) == 0, "hash_size is not power of 2"
	assert level <= ll_max_level, "hash_size in a wrong range"
	dwt_level = ll_max_level - level

	image = image.convert("L").resize((image_scale, image_scale), Image.ANTIALIAS)
	pixels = numpy.array(image.getdata(), dtype=numpy.float).reshape((image_scale, image_scale))
	pixels /= 255

	# Remove low level frequency LL(max_ll) if @remove_max_haar_ll using haar filter
	if remove_max_haar_ll:
		coeffs = pywt.wavedec2(pixels, 'haar', level = ll_max_level)
		coeffs = list(coeffs)
		coeffs[0] *= 0
		pixels = pywt.waverec2(coeffs, 'haar')

	# Use LL(K) as freq, where K is log2(@hash_size)
	coeffs = pywt.wavedec2(pixels, mode, level = dwt_level)
	dwt_low = coeffs[0]

	# Substract median and compute hash
	med = numpy.median(dwt_low)
	diff = dwt_low > med
	return ImageHash(diff)
开发者ID:JohannesBuchner,项目名称:imagehash,代码行数:46,代码来源:__init__.py


示例19: denoise

def denoise(noisy_img, mode, level, noiseSigma):
    coeffs = pywt.wavedec2(noisy_img, mode, level=level)
    # Thresholding the detail (i.e. high frequency) coefficiens
    # using a Donoho-Johnstone universal threshold
    threshold = noiseSigma * np.sqrt(2 * np.log2(noisy_img.size))
    rec_coeffs = coeffs
    rec_coeffs[1:] = (pywt.threshold(i, value=threshold, mode="soft") for i in rec_coeffs[1:])
    return rec_coeffs
开发者ID:laputian,项目名称:dml,代码行数:8,代码来源:wavelet_threshold_denoising.py


示例20: test_waverec2_axes_subsets

def test_waverec2_axes_subsets():
    rstate = np.random.RandomState(0)
    data = rstate.standard_normal((8, 8, 8))
    # test all combinations of 2 out of 3 axes transformed
    for axes in combinations((0, 1, 2), 2):
        coefs = pywt.wavedec2(data, 'haar', axes=axes)
        rec = pywt.waverec2(coefs, 'haar', axes=axes)
        assert_allclose(rec, data, atol=1e-14)
开发者ID:rgommers,项目名称:pywt,代码行数:8,代码来源:test_multilevel.py



注:本文中的pywt.wavedec2函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pywt.wavedecn函数代码示例发布时间:2022-05-26
下一篇:
Python pywt.wavedec函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap