本文整理汇总了Python中scipy.ndimage.zoom函数的典型用法代码示例。如果您正苦于以下问题:Python zoom函数的具体用法?Python zoom怎么用?Python zoom使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了zoom函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: Inversion
def Inversion(Qsca,Qabs,wavelength,diameter,nMin=1,nMax=3,kMin=0.001,kMax=1,scatteringPrecision=0.010,absorptionPrecision=0.010,spaceSize=120,interp=2):
nRange = np.linspace(nMin,nMax,spaceSize)
kRange = np.logspace(np.log10(kMin),np.log10(kMax),spaceSize)
scaSpace = np.zeros((spaceSize,spaceSize))
absSpace = np.zeros((spaceSize,spaceSize))
for ni,n in enumerate(nRange):
for ki,k in enumerate(kRange):
_derp = fastMieQ(n+(1j*k),wavelength,diameter)
scaSpace[ni][ki] = _derp[0]
absSpace[ni][ki] = _derp[1]
if interp is not None:
nRange = zoom(nRange,interp)
kRange = zoom(kRange,interp)
scaSpace = zoom(scaSpace,interp)
absSpace = zoom(absSpace,interp)
scaSolutions = np.where(np.logical_and(Qsca*(1-scatteringPrecision)<scaSpace, scaSpace<Qsca*(1+scatteringPrecision)))
absSolutions = np.where(np.logical_and(Qabs*(1-absorptionPrecision)<absSpace, absSpace<Qabs*(1+absorptionPrecision)))
validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]
solution = np.intersect1d(validScattering,validAbsorption)
# errors = [error()]
return solution
开发者ID:dalerxli,项目名称:PyMieScatt,代码行数:28,代码来源:Inverse.py
示例2: imageUp
def imageUp(img, order=1):
"""Upsample input image by a factor of 2.
Parameters
----------
img : ndarray
Image array. It can be a 2D or 3D array. If it is a 3D array,
the smoothing is applied independently to each channel.
order : integer, optional
Interpolation order. Defaults to 1
Returns :
imgUp : ndarray
Upsampled image of size (2*H, 2*W, D) where (H, W, D) is the
width, height and depth of the input image
"""
if img.ndim == 2:
imgZoomed = np.zeros([2*img.shape[0], 2*img.shape[1]], dtype=img.dtype)
nd.zoom(img, 2.0, output=imgZoomed, order=order, mode='reflect')
return imgZoomed
else:
zoomList = list()
for d in range(img.shape[2]):
imgZoomed = np.zeros([2*img.shape[0], 2*img.shape[1]], dtype=img.dtype)
nd.zoom(img[...,d], 2.0, output=imgZoomed, order=order, mode='reflect')
zoomList.append(imgZoomed)
# recombine channels and return
return np.concatenate([p[...,np.newaxis] for p in zoomList], axis=2)
开发者ID:caomw,项目名称:optical-flow-filter,代码行数:35,代码来源:misc.py
示例3: Inversion_SD
def Inversion_SD(Bsca,Babs,wavelength,dp,ndp,nMin=1,nMax=3,kMin=0,kMax=1,scatteringPrecision=0.001,absorptionPrecision=0.001,spaceSize=40,interp=2):
dp = coerceDType(dp)
ndp = coerceDType(ndp)
nRange = np.linspace(nMin,nMax,spaceSize)
kRange = np.linspace(kMin,kMax,spaceSize)
scaSpace = np.zeros((spaceSize,spaceSize))
absSpace = np.zeros((spaceSize,spaceSize))
for ni,n in enumerate(nRange):
for ki,k in enumerate(kRange):
_derp = fastMie_SD(n+(1j*k),wavelength,dp,ndp)
scaSpace[ni][ki] = _derp[0]
absSpace[ni][ki] = _derp[1]
if interp is not None:
nRange = zoom(nRange,interp)
kRange = zoom(kRange,interp)
scaSpace = zoom(scaSpace,interp)
absSpace = zoom(absSpace,interp)
scaSolutions = np.where(np.logical_and(Bsca*(1-scatteringPrecision)<scaSpace, scaSpace<Bsca*(1+scatteringPrecision)))
absSolutions = np.where(np.logical_and(Babs*(1-absorptionPrecision)<absSpace, absSpace<Babs*(1+absorptionPrecision)))
validScattering = nRange[scaSolutions[0]]+1j*kRange[scaSolutions[1]]
validAbsorption = nRange[absSolutions[0]]+1j*kRange[absSolutions[1]]
return np.intersect1d(validScattering,validAbsorption)
开发者ID:dalerxli,项目名称:PyMieScatt,代码行数:27,代码来源:Inverse.py
示例4: deepdream
def deepdream(net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_4c/output', clip=True, **step_params):
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in range(octave_n - 1):
octaves.append(
nd.zoom(octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order=1))
src = net.blobs['data']
# allocate image for network-produced details
detail = np.zeros_like(octaves[-1])
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1)
src.reshape(1, 3, h, w) # resize the network's input image size
src.data[0] = octave_base + detail
print("octave %d %s" % (octave, end))
for i in range(iter_n):
make_step(net, end=end, clip=clip, **step_params)
sys.stdout.write("%d " % i)
sys.stdout.flush()
print("")
# extract details produced on the current octave
detail = src.data[0] - octave_base
# returning the resulting image
return deprocess(net, src.data[0])
开发者ID:macpod,项目名称:DeepDreamVideo,代码行数:30,代码来源:2_dreaming_time.py
示例5: deepdream
def deepdream(
net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end="inception_4c/output", clip=True, **step_params
):
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n - 1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order=1))
src = net.blobs["data"]
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1)
src.reshape(1, 3, h, w) # resize the network's input image size
src.data[0] = octave_base + detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
# visualization
vis = deprocess(net, src.data[0])
if not clip: # adjust image contrast if clipping is disabled
vis = vis * (255.0 / np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
# extract details produced on the current octave
detail = src.data[0] - octave_base
# returning the resulting image
return deprocess(net, src.data[0])
开发者ID:IsakFalk,项目名称:deepdream,代码行数:34,代码来源:deepdream.py
示例6: compareData
def compareData(x1, y1, x2, y2, **kwargs):
"""
"""
# First compare that there x-axis are same. else report warning.
x1 = np.array(x1)
x2 = np.array(x2)
y1 = np.array(y1)
y2 = np.array(y2)
print("[INFO] Plotting")
p1, = pylab.plot(x1, y1)
p2, = pylab.plot(x2, y2)
pylab.legend([p1, p2], ["MOOSE", "NEURON"])
outfile = kwargs.get('outfile', None)
if not outfile:
pylab.show()
else:
mu.info("Saving figure to %s" % outfile)
pylab.savefig(outfile)
if len(y1) > len(y2): y1 = ndimage.zoom(y1, len(y1)/len(y2))
else: y2 = ndimage.zoom(y2, len(y2)/len(y1))
diff = y1 - y2
linDiff = diff.sum()
rms = np.zeros(len(diff))
for i, d in enumerate(diff):
rms[i] = d**2.0
rms = rms.sum() ** 0.5
print(" |- RMS diff is: {}".format(rms))
开发者ID:dilawar,项目名称:rallpacks,代码行数:29,代码来源:compare.py
示例7: overlay_velocities
def overlay_velocities(self, ax):
"""Given an axes instance, overlay a quiver plot
of Uf_ and Wf_.
Uses interpolation (scipy.ndimage.zoom) to reduce
number of quivers to readable number.
Will only work sensibly if the thing plotted in ax
has same shape as Uf_
"""
zoom_factor = (0.5, 0.05)
# TODO: proper x, z
Z, X = np.indices(self.uf_.shape)
# TODO: are the velocities going at the middle of their grid?
# NB. these are not averages. ndi.zoom makes a spline and
# then interpolates a value from this
# TODO: gaussian filter first?
# both are valid approaches
Xr = ndi.zoom(X, zoom_factor)
Zr = ndi.zoom(Z, zoom_factor)
Uf_r = ndi.zoom(self.uf_, zoom_factor)
Wf_r = ndi.zoom(self.wf_, zoom_factor)
ax.quiver(Xr, Zr, Uf_r, Wf_r, scale=100)
开发者ID:XNShen,项目名称:lab_turbulence,代码行数:25,代码来源:plot.py
示例8: __init__
def __init__(self, polmap, I0, ne, flip_ne=False):
self.fn=polmap.fn[:8]
I0=plt.imread(I0)
self.I0s=np.sum(I0,2)
I1=np.loadtxt(ne, delimiter=',')
I1=I1-np.nan_to_num(I1).min()
self.I1=np.nan_to_num(I1)
self.pm=polmap
#scale and flip to data
B0=self.pm.B0
scale=B0.shape[0]/self.I0s.shape[0]
I0z=zoom(self.I0s, scale)
crop=(I0z.shape[1]-B0.shape[1])//2
if B0.shape[1]%2==0:
I0zc=I0z[:,crop:-crop]
elif B0.shape[1]%2==1:
I0zc=I0z[:,crop:-crop-1]
self.I0zcn=np.flipud(I0zc/I0zc.max())
I1z=zoom(self.I1, scale)
if B0.shape[1]%2==0:
I1zc=I1z[:,crop:-crop]
elif B0.shape[1]%2==1:
I1zc=I1z[:,crop:-crop-1]
self.I1zc=np.flipud(I1zc)
if flip_ne is True:
self.I1zc=np.flipud(self.I1zc)
self.cmap='seismic'
开发者ID:jdhare,项目名称:magpie-analysis,代码行数:29,代码来源:magpie_data.py
示例9: deepdream
def deepdream(net, base_imarray, iter_n=50, octave_n=4, octave_scale=1.4, end='inception_4c/output', clip=True, **step_params):
octaves = [preprocess(net, base_imarray)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1])
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w)
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end, clip=clip, **step_params)
vis = deprocess(net, src.data[0])
if not clip:
vis = vis*(255.0/np.percentile(vis, 99.98))
showarray(vis)
print octave, i, end, vis.shape
clear_output(wait=True)
detail = src.data[0]-octave_base
return deprocess(net, src.data[0])
开发者ID:aptxna,项目名称:my_deep_dream,代码行数:35,代码来源:deepdream.py
示例10: dream
def dream(model,
base_img,
octave_n=6,
octave_scale=1.4,
control=None,
distance=objective_L2):
octaves = [base_img]
for i in range(octave_n - 1):
octaves.append(
nd.zoom(
octaves[-1], (1, 1, 1.0 / octave_scale, 1.0 / octave_scale),
order=1))
detail = np.zeros_like(octaves[-1])
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
h1, w1 = detail.shape[-2:]
detail = nd.zoom(
detail, (1, 1, 1.0 * h / h1, 1.0 * w / w1), order=1)
input_oct = octave_base + detail
print(input_oct.shape)
out = make_step(input_oct, model, control, distance=distance)
detail = out - octave_base
开发者ID:Raven013,项目名称:code-of-learn-deep-learning-with-pytorch,代码行数:25,代码来源:deepdream.py
示例11: deepdream
def deepdream(net, base_img, iter_n=10, octave_n=4, octave_scale=1.4,
end='inception_5b/pool_proj', jitter = 32,step_size=1.5):
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end=end,step_size=step_size,jitter=jitter)
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
开发者ID:pavitrakumar78,项目名称:DeepDreamsGIF,代码行数:25,代码来源:try-layers.py
示例12: deepdream_stepped
def deepdream_stepped(net, base_img, iter_n=10, octave_n=4, octave_scale=1.4, end='inception_3b/5x5_reduce', start_sigma=2.5, end_sigma=.1, start_jitter=48., end_jitter=4., start_step_size=3.0, end_step_size=1.5, clip=True, **step_params):
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0: # upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
sigma = start_sigma + ((end_sigma - start_sigma) * i) / iter_n
jitter = start_jitter + ((end_jitter - start_jitter) * i) / iter_n
step_size = start_step_size + ((end_step_size - start_step_size) * i) / iter_n
make_step(net, end=end, clip=clip, jitter=jitter, step_size=step_size, **step_params)
#src.data[0] = blur(src.data[0], sigma)
# extract details produced on the current octave
detail = src.data[0]-octave_base
#returning the resulting image
return deprocess(net, src.data[0])
开发者ID:genekogan,项目名称:deepdream,代码行数:27,代码来源:deepdream.py
示例13: deepdream
def deepdream(self, base_img, iter_n=10, octave_n=4, octave_scale=1.4,
end='inception_4c/output'):
# prepare base images for all octaves
octaves = [preprocess(self.net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
source = self.net.blobs['data'] # original image
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:] # octave size
if octave > 0:
# upscale details from previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1, 1.0*w/w1), order=1)
source.reshape(1, 3, h, w) # resize the network's input image size
source.data[0] = octave_base + detail
for i in xrange(iter_n):
self.make_step(end=end)
# extract details produced on the current octave
detail = source.data[0] - octave_base
return deprocess(self.net, source.data[0]) # return final image
开发者ID:JoBergs,项目名称:psycam,代码行数:28,代码来源:psycam.py
示例14: deepdream
def deepdream(net, base_img, end, iter_n=10, octave_n=4, octave_scale=1.4, clip=True, **step_params):
# prepare base images for all octaves
octaves = [preprocess(net, base_img)]
for i in xrange(octave_n-1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0/octave_scale,1.0/octave_scale), order=1))
src = net.blobs['data']
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
# upscale details from the previous octave
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0*h/h1,1.0*w/w1), order=1)
src.reshape(1,3,h,w) # resize the network's input image size
src.data[0] = octave_base+detail
for i in xrange(iter_n):
make_step(net, end, clip=clip, **step_params)
# display step
#vis = deprocess(net, src.data[0])
#if not clip: # adjust image contrast if clipping is disabled
# vis = vis*(255.0/np.percentile(vis, 99.98))
#ename = '-'.join(end.split('/'))
#saveimage(vis, '{}-{}-{}'.format(octave, i))
#print octave, i, end, vis.shape
# extract details produced on the current octave
detail = src.data[0]-octave_base
# returning the resulting image
return deprocess(net, src.data[0])
开发者ID:ZombiiKush,项目名称:DeepDream,代码行数:32,代码来源:deepdream.py
示例15: upsample_pyramid
def upsample_pyramid(self, pyramid):
target_shape = self.residual_hipass.shape
result = []
for level in pyramid:
new_level = []
for band in level:
band_shape = band.shape
if len(target_shape) > len(band_shape):
band_shape = (band_shape[0], band_shape[1], 1)
zf = array(target_shape) / array(band_shape)
band.shape = band_shape
tmp = ones(target_shape)
if any(zf != 1):
ndi.zoom(band, zf, tmp, order=1)
upsamped = tmp
else:
upsamped = band
new_level.append(upsamped)
result.append(new_level)
return result
开发者ID:davidcox,项目名称:steerable_pyramids,代码行数:27,代码来源:steerable_pyramid.py
示例16: show_downsize
def show_downsize():
for im in gen_images(n=-1, crop=True):
t_im = im['T1c']
gt = im['gt']
t_im = np.asarray(t_im, dtype='float32')
gt = np.asarray(gt, dtype='float32')
d_im = zoom(t_im, 0.5, order=3)
d_gt = zoom(gt, 0.5, order=0)
print 'New shape: ', d_im.shape
slices1 = np.arange(0, d_im.shape[0], d_im.shape[0]/20)
slices2 = np.arange(0, t_im.shape[0], t_im.shape[0]/20)
for s1, s2 in zip(slices1, slices2):
d_im_slice = d_im[s1]
d_gt_slice = d_gt[s1]
im_slice = t_im[s2]
gt_slice = gt[s2]
title0= 'Original'
title1= 'Downsized'
vis_ims(im0=im_slice, gt0=gt_slice, im1=d_im_slice,
gt1=d_gt_slice, title0=title0, title1=title1)
开发者ID:jhzhou1111,项目名称:CNNbasedMedicalSegmentation,代码行数:26,代码来源:show_images.py
示例17: deepdream
def deepdream(base_img, iter_n=5, octave_n=4, octave_scale=1.4, **step_params):
# prepare base images for all octaves
octaves = [preprocess(base_img)]
for i in xrange(octave_n - 1):
octaves.append(nd.zoom(octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order=1))
detail = np.zeros_like(octaves[-1]) # allocate image for network-produced details
for octave, octave_base in enumerate(octaves[::-1]):
h, w = octave_base.shape[-2:]
if octave > 0:
h1, w1 = detail.shape[-2:]
detail = nd.zoom(detail, (1, 1.0 * h / h1, 1.0 * w / w1), order=1)
x = np.array((1, 3, h, w)) # resize the network's input image size
x = octave_base + detail
for i in xrange(iter_n):
print h, w
make_step(x.reshape(1, 3, h, w))
# visualization
vis = deprocess(x)
# showarray(vis)
# print octave, i, end, vis.shape
# extract details produced on the current octave
detail = x - octave_base
# returning the resulting image
return deprocess(x)
开发者ID:bordesf,项目名称:IFT6266,代码行数:26,代码来源:deepDream.py
示例18: plot_all_params
def plot_all_params(filen='obj_props', out_filen='ppv_grid', log_Z=False):
"""
Read in the pickled tree parameter dictionary and plot the containing
parameters.
Parameters
----------
filen : str
File name of pickled reduced property dictionary.
out_filen : str
Basename of plots, the key of the object dictionary is appended to the
filename.
log_Z : bool
Create plots with logarithmic Z axis
"""
cmap = cm.RdYlBu_r
obj_dict = pickle.load(open(filen + '.pickle', 'rb'))
X = obj_dict['velo']
Y = obj_dict['angle']
X = ndimage.zoom(X, 3)
Y = ndimage.zoom(Y, 3)
W = ndimage.zoom(obj_dict['conflict_frac'], 3)
obj_dict['reward'] = np.log10(obj_dict['new_kdar_assoc']) / obj_dict['conflict_frac']
params = [(k, v) for k, v in obj_dict.iteritems()
if k not in ['velo', 'angle']]
clevels = [0.06, 0.12, 0.20, 0.30, 0.5]
for key, Z in params:
print ':: ', key
fig, ax = plt.subplots(figsize=(4, 4.5))
cax = fig.add_axes([0.15, 0.88, 0.8, 0.03])
plt.subplots_adjust(top=0.85, left=0.15, right=0.95, bottom=0.125)
if log_Z:
Z = np.log10(Z)
key += '_(log)'
Z = ndimage.zoom(Z, 3)
pc = ax.pcolor(X, Y, Z, cmap=cmap, vmin=Z.min(), vmax=Z.max())
cb = plt.colorbar(pc, ax=ax, cax=cax, orientation='horizontal',
ticklocation='top')
ax.plot([4], [0.065], 'ko', ms=10, markerfacecolor='none', markeredgewidth=2)
# Contours for conflict frac
cn = ax.contour(X, Y, W, levels=clevels,
colors='k', linewidth=2)
plt.setp(cn.collections,
path_effects=[PathEffects.withStroke(linewidth=2,
foreground='w')])
cl = ax.clabel(cn, fmt='%1.2f', inline=1, fontsize=10,
use_clabeltext=True)
plt.setp(cl, path_effects=[PathEffects.withStroke(linewidth=2,
foreground='w')])
# Labels
ax.set_xlabel(r'$v \ \ [{\rm km \ s^{-1}}]$')
ax.set_ylabel(r'$\theta \ \ [^{\circ}]$')
# Limits
ax.set_xlim([X.min(), X.max()])
ax.set_ylim([Y.min(), Y.max()])
# Save
plt.savefig(out_filen + '_' + key + '.pdf')
plt.savefig(out_filen + '_' + key + '.png', dpi=300)
plt.close()
开发者ID:autocorr,项目名称:besl,代码行数:59,代码来源:ppv_group_plots.py
示例19: scaleImage
def scaleImage(path_img, dilated_img, depth, color_depth, scale=1):
final_vessel = ndimage.zoom(dilated_img, scale, order=0)
final_path = skeletonize_Image(255*ndimage.zoom(path_img, scale, order=0))/255# use nearest neighbour
final_depth = final_path*ndimage.zoom(depth, scale, order=0)
final_color_depth = ndimage.zoom(color_depth, scale, order=0)
return final_path,final_vessel,final_depth, final_color_depth
开发者ID:JasmineLei,项目名称:Blood-Vessel-Flow-Visualisation,代码行数:8,代码来源:buildBG.py
示例20: __call__
def __call__(self, locs, wfImage):
"""Align a set of localizations to a widefield image.
Parameters
----------
locs : Pandas DataFrame
The DataFrame containing the localizations. x- and y-column
labels are specified in self.coordCols.
wfImage : array of int or array of float
The widefield image to align the localizations to.
Returns
-------
offsets : tuple of float
The estimated offset between the localizations and widefield
image. The first element is the offset in x and the second
in y. These should be subtracted from the input localizations
to align them to the widefield image.
"""
upsampleFactor = self.upsampleFactor
# Bin the localizations into a 2D histogram;
# x corresponds to rows for histogram2d
binsX = np.arange(0, upsampleFactor * wfImage.shape[0] + 1, 1) \
* self.pixelSize / upsampleFactor
binsY = np.arange(0, upsampleFactor * wfImage.shape[1] + 1, 1) \
* self.pixelSize / upsampleFactor
H, _, _ = np.histogram2d(locs[self.coordCols[0]],
locs[self.coordCols[1]],
bins = [binsX, binsY])
# Upsample and flip the image to align it to the histogram;
# then compute the cross correlation
crossCorr = fftconvolve(H,
zoom(np.transpose(wfImage)[::-1, ::-1],
upsampleFactor, order = 0),
mode = 'same')
# Find the maximum of the cross correlation
centerLoc = np.unravel_index(np.argmax(crossCorr), crossCorr.shape)
# Find the center of the widefield image
imgCorr = fftconvolve(zoom(np.transpose(wfImage),
upsampleFactor, order = 0),
zoom(np.transpose(wfImage)[::-1, ::-1],
upsampleFactor, order = 0),
mode = 'same')
centerWF = np.unravel_index(np.argmax(imgCorr), imgCorr.shape)
# Find the shift between the images.
# dx -> rows, dy -> cols because the image was transposed during
# fftconvolve operation.
dy = (centerLoc[1] - centerWF[1]) / upsampleFactor * self.pixelSize
dx = (centerLoc[0] - centerWF[0]) / upsampleFactor * self.pixelSize
offsets = (dx, dy)
return offsets
开发者ID:kmdouglass,项目名称:bstore,代码行数:58,代码来源:multiprocessors.py
注:本文中的scipy.ndimage.zoom函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论