本文整理汇总了Python中numpy.fft.rfft2函数的典型用法代码示例。如果您正苦于以下问题:Python rfft2函数的具体用法?Python rfft2怎么用?Python rfft2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rfft2函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: getDisplacements2D
def getDisplacements2D(self, Z=None, window=False):
"""
Use phase correlation to find the relative displacement between
each time step
"""
if Z is None:
Z = self.getNbPixelsPerFrame()/self.getNbPixelsPerSlice()/2
shape = np.asarray(self.get2DShape())
if window:
ham = np.hamming(shape[1])*np.atleast_2d(np.hamming(shape[0])).T
else:
ham = 1.0
displs = np.zeros((self.getNbFrames(),2))
a = rfft2(self.get2DSlice(T=0, Z=Z)*ham)
for t in range(1,self.getNbFrames()):
b = rfft2(self.get2DSlice(T=t, Z=Z)*ham)
#calculate the normalized cross-power spectrum
#R = numexpr.evaluate(
# 'a*complex(real(b), -imag(b)/abs(a*complex(real(b), -imag(b))))'
# )
R = a*b.conj()
Ra = np.abs(a*b.conj())
R[Ra>0] /= Ra[Ra>0]
r = irfft2(R)
#Get the periodic position of the peak
l = r.argmax()
displs[t] = np.unravel_index(l, r.shape)
#prepare next step
a = b
return np.where(displs<shape/2, displs, displs-shape)
开发者ID:MathieuLeocmach,项目名称:colloids,代码行数:30,代码来源:lif.py
示例2: fft_convolve
def fft_convolve(in1, in2, times):
def _centered(arr, newsize):
# Return the center newsize portion of the array.
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
if times == 0:
return in1.copy()
s1 = np.array(in1.shape)
s2 = np.array(in2.shape)
shape = s1 + (s2 - 1) * times
# Speed up FFT by padding to optimal size for FFTPACK
fshape = [next_fast_len(int(d)) for d in shape]
fslice = tuple([slice(0, int(sz)) for sz in shape])
resfft = fast_power(rfft2(in2, fshape), times)
resfft = resfft * rfft2(in1, fshape)
ret = irfft2(resfft, fshape)[fslice].copy()
ret = ret.real
return _centered(ret, s1)
开发者ID:CellProfiler,项目名称:cellstar,代码行数:27,代码来源:image_util.py
示例3: linespectra
def linespectra (arr, freqs, sigma=4, channelWidth=20, kms=False, source_speed=0): #nb sigma is given in px (can be fractional)
"""arr should be an array of shape (x,:,>pix,>pix)
freqs an array or list of nums of length x"""
shifts=[int(round((freqs[-1]-freqs[i])*299792458/(channelWidth*freqs[-1]))) for i in xrange(len(freqs))]
# print shifts
x=[[] for _ in xrange(arr.shape[0])]
mid=arr.shape[2]/2.0-0.5
gauss_mask=garray(arr.shape[-2:],sigma)
s=[y*2 for y in gauss_mask.shape]
ftg=rfft2(gauss_mask, s)
for i in xrange(len(x)):
for j in xrange(arr.shape[1]):
convolved=irfft2(rfft2(arr[i,j,:,:],s)*ftg)
x[i].append(convolved[s[0]/2,s[1]/2])
padding=abs(max(shifts))
padded=[0 for _ in xrange(arr.shape[1]+padding*2+2)]
for i in xrange(len(x[0])):
for j in xrange(len(x)):
try:
padded[i+shifts[j]+padding]+=x[j][i]
except IndexError :
print j,i,len(x),len(x[j])
None
if kms: return [((i-150)*20/1000.0,x) for i,x in enumerate(padded)]
else: return [((i-150)*20,x) for i,x in enumerate(padded)]
开发者ID:Womble,项目名称:analysis-tools,代码行数:28,代码来源:complete.py
示例4: matches_exist
def matches_exist(template, image, tolerance=1):
# just taken from convolution def
expected = numpy.count_nonzero(template)
ih, iw = image.shape
th, tw = template.shape
# Pad image to even dimensions
if ih % 2 or iw % 2:
if ih % 2:
ih += 1
if iw % 2:
iw += 1
bin_image = pad_bin_image_to_shape(image, (ih, iw))
if expected == 0:
return []
# Calculate the convolution of the FFT's of the image & template
convolution_freqs = rfft2(image) * rfft2(template[::-1, ::-1],
image.shape)
convolution_image = irfft2(convolution_freqs)
found_bitmap = convolution_image > (expected - tolerance)
if True in found_bitmap:
return True
else:
return False
开发者ID:MMChambers,项目名称:Geist,代码行数:25,代码来源:similar_images.py
示例5: convolution
def convolution(bin_template, bin_image, tollerance=0.5):
expected = numpy.count_nonzero(bin_template)
ih, iw = bin_image.shape
th, tw = bin_template.shape
# Padd image to even dimensions
if ih % 2 or iw % 2:
if ih % 2:
ih += 1
if iw % 2:
iw += 1
bin_image = pad_bin_image_to_shape(bin_image, (ih, iw))
if expected == 0:
return []
# Calculate the convolution of the FFT's of the image & template
convolution_freqs = rfft2(bin_image) * rfft2(bin_template[::-1, ::-1],
bin_image.shape)
# Reverse the FFT to find the result image
convolution_image = irfft2(convolution_freqs)
# At this point, the maximum point in convolution_image should be the
# bottom right (why?) of the area of greatest match
# The areas in the result image within expected +- tollerance are where we
# saw matches
found_bitmap = ((convolution_image > (expected - tollerance)) &
(convolution_image < (expected + tollerance)))
match_points = numpy.transpose(numpy.nonzero(found_bitmap)) # bottom right
# Find the top left point from the template (remember match_point is
# inside the template (hence -1)
return [((fx - (tw - 1)), (fy - (th - 1))) for (fy, fx) in match_points]
开发者ID:bibi-L,项目名称:Geist,代码行数:33,代码来源:vision.py
示例6: convolve_dcr_image
def convolve_dcr_image(flux_arr, x_loc, y_loc, bandpass=None, x_size=None, y_size=None, seed=None,
psf=None, pad_image=1.5, pixel_scale=None, kernel_radius=None,
oversample_image=1, photon_noise=False, sky_noise=0.0, verbose=True, **kwargs):
"""Wrapper to call fast_dft with multiple DCR planes."""
x_size_use = int(x_size * pad_image)
y_size_use = int(y_size * pad_image)
oversample_image = int(oversample_image)
pixel_scale_use = pixel_scale / oversample_image
x0 = oversample_image * ((x_size_use - x_size) // 2)
x1 = x0 + x_size * oversample_image
y0 = oversample_image * ((y_size_use - y_size) // 2)
y1 = y0 + y_size * oversample_image
x_loc_use = x_loc * oversample_image + x0
y_loc_use = y_loc * oversample_image + y0
x_size_use *= oversample_image
y_size_use *= oversample_image
timing_model = -time.time()
source_image = fast_dft(flux_arr, x_loc_use, y_loc_use, x_size=x_size_use, y_size=y_size_use,
kernel_radius=kernel_radius, **kwargs)
timing_model += time.time()
n_star = len(x_loc)
if oversample_image > 1:
bright_star = "bright "
else:
bright_star = ""
if verbose:
if n_star == 1:
print("Time to model %i %sstar: [%0.3fs]"
% (n_star, bright_star, timing_model))
else:
print("Time to model %i %sstars: [%0.3fs | %0.5fs per star]"
% (n_star, bright_star, timing_model, timing_model / n_star))
rand_gen = np.random
if seed is not None:
rand_gen.seed(seed - 1)
# The images are purely real, so we can save time by using the real FFT,
# which uses only half of the complex plane
convol = np.zeros((y_size_use, x_size_use // 2 + 1), dtype='complex64')
dcr_gen = dcr_generator(bandpass, pixel_scale=pixel_scale_use, **kwargs)
timing_fft = -time.time()
for _i, offset in enumerate(dcr_gen):
source_image_use = source_image[_i]
psf_image = psf.drawImage(scale=pixel_scale_use, method='fft', offset=offset,
nx=x_size_use, ny=y_size_use, use_true_center=False)
if photon_noise:
base_noise = np.random.normal(scale=1.0, size=(y_size_use, x_size_use))
base_noise *= np.sqrt(np.abs(source_image_use) / photons_per_adu)
source_image_use += base_noise
if sky_noise > 0:
source_image_use += (rand_gen.normal(scale=sky_noise, size=(y_size_use, x_size_use))
/ np.sqrt(bandpass_nstep(bandpass)))
convol += rfft2(source_image_use) * rfft2(psf_image.array)
return_image = np.real(fftshift(irfft2(convol)))
timing_fft += time.time()
if verbose:
print("FFT timing for %i DCR planes: [%0.3fs | %0.3fs per plane]"
% (_i, timing_fft, timing_fft / _i))
return(return_image[y0:y1:oversample_image, x0:x1:oversample_image] * oversample_image**2)
开发者ID:isullivan,项目名称:simulations,代码行数:60,代码来源:StarFast.py
示例7: _BandPassFilter
def _BandPassFilter(image, len_noise, len_object):
"""
bandpass filter implementation.
Source: http://physics-server.uoregon.edu/~raghu/particle_tracking.html
"""
b = len_noise
w = round(len_object)
N = 2 * w + 1
# Gaussian Convolution Kernel
sm = numpy.arange(0, N, dtype=numpy.float)
r = (sm - w) / (2 * b)
gx = numpy.power(math.e, -r ** 2) / (2 * b * math.sqrt(math.pi))
gx = numpy.reshape(gx, (gx.shape[0], 1))
gy = gx.conj().transpose()
# Boxcar average kernel, background
bx = numpy.zeros((1, N), numpy.float) + 1 / N
by = bx.conj().transpose()
# Convolution with the matrix and kernels
gxy = gx * gy
bxy = bx * by
kernel = fft.rfft2(gxy - bxy, image.shape)
res = fft.irfft2(fft.rfft2(image) * kernel)
arr_out = numpy.zeros((image.shape))
arr_out[w:-w, w:-w] = res[2 * w:, 2 * w:]
res = numpy.maximum(arr_out, 0)
return res
开发者ID:pieleric,项目名称:odemis-old,代码行数:30,代码来源:coordinates.py
示例8: decompose
def decompose(self,l_edges,keep_fourier=False):
"""
Decomposes the shear map into its E and B modes components and returns the respective power spectral densities at the specified multipole moments
:param l_edges: Multipole bin edges
:type l_edges: array
:param keep_fourier: If set to True, holds the Fourier transforms of the E and B mode maps into the E and B attributes of the ShearMap instance
:type keep_fourier: bool.
:returns: :returns: tuple -- (l -- array,P_EE,P_BB,P_EB -- arrays) = (multipole moments, EE,BB power spectra and EB cross power)
>>> test_map = ShearMap.load("shear.fit",format=load_fits_default_shear)
>>> l_edges = np.arange(300.0,5000.0,200.0)
>>> l,EE,BB,EB = test_map.decompose(l_edges)
"""
#Perform Fourier transforms
ft_data1 = rfft2(self.data[0])
ft_data2 = rfft2(self.data[1])
#Compute frequencies
lx = rfftfreq(ft_data1.shape[0])
ly = fftfreq(ft_data1.shape[0])
#Safety check
assert len(lx)==ft_data1.shape[1]
assert len(ly)==ft_data1.shape[0]
#Compute sines and cosines of rotation angles
l_squared = lx[np.newaxis,:]**2 + ly[:,np.newaxis]**2
l_squared[0,0] = 1.0
sin_2_phi = 2.0 * lx[np.newaxis,:] * ly[:,np.newaxis] / l_squared
cos_2_phi = (lx[np.newaxis,:]**2 - ly[:,np.newaxis]**2) / l_squared
#Compute E and B components
ft_E = cos_2_phi * ft_data1 + sin_2_phi * ft_data2
ft_B = -1.0 * sin_2_phi * ft_data1 + cos_2_phi * ft_data2
ft_E[0,0] = 0.0
ft_B[0,0] = 0.0
assert ft_E.shape == ft_B.shape
assert ft_E.shape == ft_data1.shape
#Compute and return power spectra
l = 0.5*(l_edges[:-1] + l_edges[1:])
P_EE = _topology.rfft2_azimuthal(ft_E,ft_E,self.side_angle.to(deg).value,l_edges)
P_BB = _topology.rfft2_azimuthal(ft_B,ft_B,self.side_angle.to(deg).value,l_edges)
P_EB = _topology.rfft2_azimuthal(ft_E,ft_B,self.side_angle.to(deg).value,l_edges)
if keep_fourier:
self.fourier_E = ft_E
self.fourier_B = ft_B
return l,P_EE,P_BB,P_EB
开发者ID:TheisEizo,项目名称:LensTools,代码行数:59,代码来源:shear.py
示例9: getDispl2DImage
def getDispl2DImage(self, t0=0, t1=1, Z=0):
ham = np.hamming(self.get2DShape()[1])*np.atleast_2d(np.hamming(self.get2DShape()[0])).T
a = rfft2(self.get2DSlice(T=t0, Z=Z)*ham)
b = rfft2(self.get2DSlice(T=t1, Z=Z)*ham)
R = numexpr.evaluate(
'a*complex(real(b), -imag(b)/abs(a*complex(real(b), -imag(b))'
)
return irfft2(R)
开发者ID:MathieuLeocmach,项目名称:colloids,代码行数:8,代码来源:lif.py
示例10: register_imgs
def register_imgs(imgs,template):
"save some time by only taking fft of template once"
rfft2_template_conj = rfft2(template).conj()
shifts = []
for img in imgs:
corr = irfft2(rfft2(img)*rfft2_template_conj)
shifts.append(balanced_mod(np.unravel_index(corr.argmax(),corr.shape),corr.shape))
return shifts
开发者ID:wj2,项目名称:2p,代码行数:8,代码来源:registration.py
示例11: conv
def conv(im, ker):
''' Convolves image im with kernel ker
Both image and kernel's dimensions should be even: ker.shape % 2 == 0
'''
sy,sx = array(ker.shape)/2
y0,x0 = array(im.shape)/2
big_ker = zeros(im.shape)
big_ker[y0-sy:y0+sy,x0-sx:x0+sx] = ker
return irfft2(rfft2(im)*rfft2(fftshift(big_ker)))
开发者ID:ndaniyar,项目名称:aphot,代码行数:9,代码来源:common.py
示例12: phaseCorrel
def phaseCorrel(a,b):
print a
print b
"""phase correlation calculation"""
R = rfft2(a)*np.conj(rfft2(b))
R /= np.absolute(R)
print R
print a.shape
return irfft2(R,a.shape)
开发者ID:MichielAriens,项目名称:PENOROOD,代码行数:9,代码来源:phase_corr2.py
示例13: correlation
def correlation(f, g):
f_fft = rfft2(f)
if f is g:
g_fft = f_fft
else:
g_fft = rfft2(g)
g_conj = np.conj(g_fft)
prod = f_fft * g_conj
return np.real(irfft2(prod))
开发者ID:kwmsmith,项目名称:kaw-analysis,代码行数:9,代码来源:autocorr.py
示例14: beam_convolve
def beam_convolve(arr, sigma):
"convoles a 2D image with a gaussian profile with sigma in px"
if len(arr.shape)!=2 or 3*sigma > max(arr.shape): raise ValueError ("arr is not 2d or beam is too wide")
else:
shape=arr.shape
gauss_mask=garray(shape,sigma)
s=[y*2 for y in gauss_mask.shape]
ftg=rfft2(gauss_mask, s)
return irfft2(rfft2(arr,s)*ftg)
开发者ID:Womble,项目名称:analysis-tools,代码行数:9,代码来源:complete.py
示例15: correlate
def correlate(image, filter):
r"""Performs a normalized cross-correlation between an image and a search
template. For more details, see:
http://en.wikipedia.org/wiki/Cross_correlation#Normalized_cross-correlation
"""
si = rfft2(image - mean(image))
sf = rfft2(filter - mean(filter), image.shape)
return irfft2(si * conj(sf))
开发者ID:xperroni,项目名称:Skeye,代码行数:9,代码来源:__init__.py
示例16: lineold
def lineold (imcube, sigma, chanwidth=10):
"produces a spectrum by convolving each slice of imcube with a gaussian of width sigma and returning the value of the central pixel for each slice"
shape=imcube.shape
bandwidth=shape[0]*chanwidth
if len(shape)!=3: raise ValueError("imcube must be a cube")
gauss_mask=garray(shape[1:],sigma)
s=[y*2 for y in gauss_mask.shape]
ftg=rfft2(gauss_mask, s)
return [(i*chanwidth-bandwidth/2,irfft2(rfft2(imcube[i,:,:],s)*ftg)[s[0]/2,s[1]/2])
for i in xrange(shape[0])]
开发者ID:Womble,项目名称:analysis-tools,代码行数:11,代码来源:complete.py
示例17: correlate_windows
def correlate_windows(window_a, window_b, corr_method="fft", nfftx=None, nffty=None):
"""Compute correlation function between two interrogation windows.
The correlation function can be computed by using the correlation
theorem to speed up the computation.
Parameters
----------
window_a : 2d np.ndarray
a two dimensions array for the first interrogation window.
window_b : 2d np.ndarray
a two dimensions array for the second interrogation window.
corr_method : string
one of the two methods currently implemented: 'fft' or 'direct'.
Default is 'fft', which is much faster.
nfftx : int
the size of the 2D FFT in x-direction,
[default: 2 x windows_a.shape[0] is recommended].
nffty : int
the size of the 2D FFT in y-direction,
[default: 2 x windows_a.shape[1] is recommended].
Returns
-------
corr : 2d np.ndarray
a two dimensions array for the correlation function.
"""
if corr_method == "fft":
if nfftx is None:
nfftx = 2 * window_a.shape[0]
if nffty is None:
nffty = 2 * window_a.shape[1]
return fftshift(
irfft2(
rfft2(normalize_intensity(window_a), s=(nfftx, nffty))
* np.conj(rfft2(normalize_intensity(window_b), s=(nfftx, nffty)))
).real,
axes=(0, 1),
)
elif corr_method == "direct":
return convolve(normalize_intensity(window_a), normalize_intensity(window_b[::-1, ::-1]), "full")
else:
raise ValueError("method is not implemented")
开发者ID:joedborg,项目名称:openpiv-python,代码行数:50,代码来源:pyprocess.py
示例18: convolve_image
def convolve_image(flux_arr, x_loc, y_loc, x_size=None, y_size=None, seed=None,
psf=None, pad_image=1.5, pixel_scale=None, kernel_radius=None,
oversample_image=1, photon_noise=False, sky_noise=0.0, verbose=True, **kwargs):
"""Wrapper to call fast_dft with no DCR planes."""
x_size_use = int(x_size * pad_image)
y_size_use = int(y_size * pad_image)
oversample_image = int(oversample_image)
pixel_scale_use = pixel_scale / oversample_image
x0 = oversample_image * ((x_size_use - x_size) // 2)
x1 = x0 + x_size * oversample_image
y0 = oversample_image * ((y_size_use - y_size) // 2)
y1 = y0 + y_size * oversample_image
x_loc_use = x_loc * oversample_image + x0
y_loc_use = y_loc * oversample_image + y0
x_size_use *= oversample_image
y_size_use *= oversample_image
timing_model = -time.time()
source_image = fast_dft(flux_arr, x_loc_use, y_loc_use, x_size=x_size_use, y_size=y_size_use,
kernel_radius=kernel_radius, **kwargs)
timing_model += time.time()
n_star = len(x_loc)
if oversample_image > 1:
bright_star = "bright "
else:
bright_star = ""
if verbose:
if n_star == 1:
print("Time to model %i %sstar: [%0.3fs]" % (n_star, bright_star, timing_model))
else:
print("Time to model %i %sstars: [%0.3fs | %0.5fs per star]"
% (n_star, bright_star, timing_model, timing_model / n_star))
rand_gen = np.random
if seed is not None:
rand_gen.seed(seed - 1)
psf_image = psf.drawImage(scale=pixel_scale_use, method='fft', offset=[0, 0],
nx=x_size_use, ny=y_size_use, use_true_center=False)
if photon_noise:
base_noise = np.random.normal(scale=1.0, size=(y_size_use, x_size_use))
base_noise *= np.sqrt(np.abs(source_image) / photons_per_adu)
source_image += base_noise
if sky_noise > 0:
source_image += rand_gen.normal(scale=sky_noise, size=(y_size_use, x_size_use))
timing_fft = -time.time()
convol = rfft2(source_image) * rfft2(psf_image.array)
return_image = np.real(fftshift(irfft2(convol)))
timing_fft += time.time()
if verbose:
print("FFT timing (single plane): [%0.3fs]" % (timing_fft))
return(return_image[y0:y1:oversample_image, x0:x1:oversample_image] * oversample_image**2)
开发者ID:isullivan,项目名称:simulations,代码行数:50,代码来源:StarFast.py
示例19: output
def output(self):
""" """
# One dimension
if len(self._source.shape) == 1:
source = self._actual_source
# Use FFT convolution
if self._fft:
if not self._toric:
P = rfft(source,self._fft_shape[0])*self._fft_weights
R = irfft(P, self._fft_shape[0]).real
R = R[self._fft_indices]
else:
P = rfft(source)*self._fft_weights
R = irfft(P,source.shape[0]).real
# if self._toric:
# R = ifft(fft(source)*self._fft_weights).real
# else:
# n = source.shape[0]
# self._src_holder[n//2:n//2+n] = source
# R = ifft(fft(self._src_holder)*self._fft_weights)
# R = R.real[n//2:n//2+n]
# Use regular convolution
else:
R = convolve1d(source, self._weights[::-1], self._toric)
if self._src_rows is not None:
R = R[self._src_rows]
return R.reshape(self._target.shape)
# Two dimensions
else:
source = self._actual_source
# Use FFT convolution
if self._fft:
if not self._toric:
P = rfft2(source,self._fft_shape)*self._fft_weights
R = irfft2(P, self._fft_shape).real
R = R[self._fft_indices]
else:
P = rfft2(source)*self._fft_weights
R = irfft2(P,source.shape).real
# Use SVD convolution
else:
R = convolve2d(source, self._weights, self._USV, self._toric)
if self._src_rows is not None and self._src_cols is not None:
R = R[self._src_rows, self._src_cols]
return R.reshape(self._target.shape)
开发者ID:B-Rich,项目名称:dana,代码行数:48,代码来源:shared_connection.py
示例20: __init__
def __init__(self):
# Retina
self.R = np.zeros(retina_shape)
# Superior colliculus
self.SC_V = np.zeros(colliculus_shape)
self.SC_U = np.zeros(colliculus_shape)
# Projection from retina to colliculus
self.P = retina_projection()
# Parameters
self.sigma_e = sigma_e
self.A_e = A_e
self.sigma_i = sigma_i
self.A_i = A_i
self.alpha = alpha
self.tau = tau
self.scale = scale
self.noise = noise
# Lateral weights
# DoG
# K = A_e*gaussian((2*n+1,2*n+1), sigma_e) - A_i*gaussian((2*n+1,2*n+1), sigma_i)
# Constant inhibition
K = A_e*gaussian((2*n+1,2*n+1), sigma_e) - A_i #*gaussian((2*n+1,2*n+1), sigma_i)
# FFT for lateral weights
K_shape = np.array(K.shape)
self.fft_shape = np.array(best_fft_shape(colliculus_shape+K_shape//2))
self.K_fft = rfft2(K,self.fft_shape)
i0,j0 = K.shape[0]//2, K.shape[1]//2
i1,j1 = i0+colliculus_shape[0], j0+colliculus_shape[1]
self.K_indices = i0,i1,j0,j1
开发者ID:taoualiw,项目名称:Neurosciences,代码行数:34,代码来源:model.py
注:本文中的numpy.fft.rfft2函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论