本文整理汇总了Python中scipy.ndimage.filters.convolve函数的典型用法代码示例。如果您正苦于以下问题:Python convolve函数的具体用法?Python convolve怎么用?Python convolve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了convolve函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: rl_damped
def rl_damped(raw, psf, niter=2, damped=True, N=3, T=None, multiplier=1):
""" working on it"""
#psf /= psf.sum()
conversion = raw.mean() / 10
raw /= conversion
lucy = np.ones(raw.shape) * raw.mean()
#plt.ion()
#plt.figure()
#plt.plot(raw)
#plt.axhline(y=0, lw=2, color='black')
for i in xrange(niter):
if damped:
print "dampening"
lucy_temp = convolve( lucy, psf, mode='mirror')
ratio = dampen(lucy_temp, raw, N, T, multiplier)
else:
ratio = raw / convolve(lucy, psf, mode='mirror')
ratio[ np.isnan(ratio) ] = 0
top = convolve( ratio, psf, mode='mirror')
top[ np.isnan(top) ] = 0
lucy = lucy * (top / psf.sum())
#plt.plot( lucy )
print 'iteration', i, lucy.mean(), raw.mean()
print
#raw_input('Done')
return lucy * conversion
开发者ID:justincely,项目名称:lucy,代码行数:33,代码来源:lucy.py
示例2: convolve1d
def convolve1d(Z, K, toric=False):
""" Discrete, clamped, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_.
In probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
:param array Z:
One-dimensional array.
:param array K:
One-dimensional array.
:param bool toric:
Indicate whether convolution should be considered toric
:return:
Discrete, clamped, linear convolution of `Z` and `K`.
**Note**
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \sum_{m = -\infty}^{\infty} f[m] g[n-m]
**References**
.. [1] Wikipedia, "Convolution",
http://en.wikipedia.org/wiki/Convolution.
"""
if toric:
return convolve(Z,K,mode='wrap')
else:
return convolve(Z,K,mode='constant')
开发者ID:B-Rich,项目名称:dana,代码行数:34,代码来源:functions.py
示例3: process
def process(self, image):
image = image.astype(np.double)
if image.max() > 1:
# The image is between 0 and 255 - we need to convert it to [0,1]
image /= 255;
if image.ndim == 3:
# we do not deal with color images.
image = np.mean(image,axis=2)
H,W = image.shape
IH = filters.convolve(image, self._GH, mode='nearest')
IW = filters.convolve(image, self._GW, mode='nearest')
I_mag = np.sqrt(IH ** 2 + IW ** 2)
I_theta = np.arctan2(IH, IW)
alpha = self.specs.get('alpha', _ALPHA)
num_angles = self.specs.get('num_angles', _NUM_ANGLES)
I_orient = np.empty((H, W, num_angles))
if self.specs.get('twoside', True):
for i in range(num_angles):
I_orient[:,:,i] = I_mag * np.maximum(
np.cos(I_theta - self._ANGLES[i]) ** alpha, 0)
else:
for i in range(num_angles):
I_orient[:,:,i] = I_mag * np.abs(
np.cos(I_theta - self._ANGLES[i]) ** alpha)
return I_orient
开发者ID:WilllWang,项目名称:iceberk,代码行数:26,代码来源:dsift.py
示例4: step
def step(self, dt):
if dt!=self.dt:
print "I can only integrate at fixed dt!"
return
self.nCells = len(self.cellStates)
# Check we have enough space allocated
try:
s = self.specLevel[self.nCells-1]
except IndexError:
# Could resize here, then would have to rebuild views
print "Number of cells exceeded " \
+ self.__class__.__name__ \
+ "::maxCells (" + self.maxCells + ")"
self.dataLen = self.signalDataLen + self.nCells*self.nSpecies
# Do u += h(T(u_t)/2 + hf(u_t)) where T=transport operator, f(u_t) is
# our regulation function dydt
self.signalling.transportRates(self.signalRate, self.signalLevel)
self.signalRate *= 0.5
self.dydt()
self.rates[0:self.dataLen] *= self.dt
self.levels[0:self.dataLen] += self.rates[0:self.dataLen]
# Convolve (I+hT/2)u_t + f(u_t) with the Greens func to get u_{t+1}
sigLvl = self.signalLevel.reshape(self.gridDim)
convolve(sigLvl, self.greensFunc, mode='nearest')
# Put the final signal levels into the cell states
states = self.cellStates
for (id,c) in states.items():
if self.signalling:
c.signals = self.signalling.signals(c, self.signalLevel)
开发者ID:WilliamPJSmith,项目名称:CM4-A,代码行数:34,代码来源:CrankNicIntegrator.py
示例5: stitch
def stitch(targets,images):
mask = rois_mask(targets) # True where image data is
gaps_mask = mask==False # True where infill needs to go
# compute bounds relative to the camera field
(x,y,w,h) = stitched_box(targets)
uroi = img_as_float(stitch_raw(targets,images,(x,y,w,h))) # stitch with black infill
# step 1: sparsely sample background mostly ignoring blob
# compute gradient on both axes
k = [[-3,-1,0,1,3],
[-3,-1,0,1,3],
[-3,-1,0,1,3],
[-3,-1,0,1,3]]
gy = convolve(uroi,k)
gx = convolve(uroi,np.rot90(k))
# ignore all but low-gradient areas
bg = (abs(gy+gx) < 0.2) & mask
# step 2: remove less contiguous areas
filter_size = max(2,int(max(h,w)/200))
mf = minimum_filter(bg*1,filter_size)
# step 3: interpolate between samples
z = inpaint(uroi*mf,mf==False)
# step 4: subsample and re-interpolate to degrade artifacts in fill region
random = RandomState(0)
(h,w)=z.shape
ng = random.rand(h,w) < 0.01
z2 = inpaint(z*ng,ng==False)
# step 5: final composite
roi = (z2 * gaps_mask) + uroi
return (roi * 255).astype(np.uint8), mask
开发者ID:LouisK130,项目名称:oii,代码行数:34,代码来源:joestitch.py
示例6: add_pointsources
def add_pointsources(map_shape, freq, alpha0=4.5, sigma=0.5, A=1, number=1):
map = np.zeros(map_shape)
spec_list = []
for i in range(number):
ra = np.random.randint(0, map_shape[1])
dec = np.random.randint(0, map_shape[2])
alpha = np.random.normal(alpha0, sigma, 1)
spec = A * (freq/150.)**alpha
spec_list.append(spec)
map[:, ra, dec] += spec
out = np.zeros(map_shape)
for i in range(map_shape[0]):
kernel = np.arange(41) - 20. #GBT
#kernel = np.arange(21) - 10.
kernel = sp.exp(-kernel**2 / (2. * 3 ** 2.))
kernel *= 1. / (2. * sp.pi * 3 ** 2.)
kernel = kernel[:, None] * kernel[None, :]
convolve(map[i], kernel, output=out[i])
map = out
return map, spec_list
开发者ID:POFK,项目名称:From_KVM,代码行数:27,代码来源:svd.py
示例7: rl_standard
def rl_standard(raw_image, psf, niter):
""" Standerd lucy-richardson convolution
arXiv 2002 Lauer
"""
psf /= psf.sum()
psf_inverse = psf[::-1]
lucy = np.ones( raw_image.shape ) * raw_image.mean()
for i in xrange( niter ):
estimate = convolve(lucy, psf, mode='mirror')
estimate[ np.isnan(estimate) ] = 0
correction = convolve(raw_image/estimate, psf_inverse, mode='mirror')
correction[ np.isnan(correction) ] = 0
print 'Correction:',correction.mean()
lucy *= correction
print 'Means:', raw_image.mean(), lucy.mean()
chisq = scipy.nansum((lucy - raw_image)**2 / (lucy)) / (raw_image.size-1)
print chisq
return lucy
开发者ID:justincely,项目名称:lucy,代码行数:25,代码来源:lucy.py
示例8: preprocess
def preprocess(pattern, img):
#bilinear interpolation for bayer_rggb images
if pattern == 'bayer_rggb':
(z, q, h) = (0.0, 0.25, 0.5)
sparse = np.array([[q, h, q],
[h, z, h],
[q, h, q]])
dense = np.array([[z, q, z],
[q, z, q],
[z, q, z]])
img[0,:,:] = \
np.where(img[0,:,:] > 0.0,
img[0,:,:],
convolve(img[0,:,:], sparse, mode='mirror'))
img[1,:,:] = \
np.where(img[1,:,:] > 0.0,
img[1,:,:],
convolve(img[1,:,:], dense, mode='mirror'))
img[2,:,:] = \
np.where(img[2,:,:] > 0.0,
img[2,:,:],
convolve(img[2,:,:], sparse, mode='mirror'))
img = np.dstack((img[2,:,:],
img[1,:,:],
img[0,:,:]))
return np.swapaxes(np.swapaxes(img, 2,0), 1,2)
else:
raise NotImplementedError('Preprocessing is implemented only for bayer_rggb')
开发者ID:VLOGroup,项目名称:joint-demosaicing-denoising-sem,代码行数:32,代码来源:demosaicing_data.py
示例9: _postprocess_nodes
def _postprocess_nodes(self):
fluid_map = self._fluid_map(wet=False, base=True).astype(np.uint8)
wet_map_for_unused = self._fluid_map(wet=True, allow_unused=True, base=True).astype(np.uint8)
wet_map = self._fluid_map(wet=True, base=True).astype(np.uint8)
neighbors = self._lattice_kernel()
# Any *wet* node not connected to at least one *fluid* node is marked unused.
# Note that dry nodes connecting to wet nodes need to be retained.
# For instance:
# W W
# W V
# where W is a HBB wall and V is a velocity BC.
where = filters.convolve(fluid_map, neighbors, mode="constant", cval=1) == 0
self._type_map_base[where & wet_map_for_unused.astype(np.bool)] = nt._NTUnused.id
# Any dry node, not connected to at least one wet node is marked unused.
# For instance, for HBB walls: .. W W W F -> .. U U W F.
where = filters.convolve(wet_map, neighbors, mode="constant", cval=0) == 0
self._type_map_base[where & np.logical_not(wet_map.astype(np.bool))] = nt._NTUnused.id
# If an unused node touches a wet node, mark it as propagation only.
# For instance, for HBB walls: .. U U W F -> .. U P W F.
used_map = (self._type_map_base != nt._NTUnused.id).astype(np.uint8)
where = filters.convolve(used_map, neighbors, mode="constant", cval=0) > 0
self._type_map_base[where & (self._type_map_base == nt._NTUnused.id)] = nt._NTPropagationOnly.id
开发者ID:mjanusz,项目名称:sailfish,代码行数:25,代码来源:subdomain.py
示例10: __init__
def __init__(self, image, fit_par=None, dt=0, fw=None, win_size=None,
kernel=None, xkernel=None, bkg_image=None):
self.image = image
self.bkg_image = bkg_image
# Noise removal by convolving with a null sum gaussian. Its FWHM
# has to match the one of the objects we want to detect.
try:
self.fwhm = fw
self.win_size = win_size
self.kernel = kernel
self.xkernel = xkernel
self.image_conv = convolve(self.image.astype(float), self.kernel)
except RuntimeError:
# If the kernel is None, I assume all the args must be calculated
self.fwhm = tools.get_fwhm(670, 1.42) / 120
self.win_size = int(np.ceil(self.fwhm))
self.kernel = tools.kernel(self.fwhm)
self.xkernel = tools.xkernel(self.fwhm)
self.image_conv = convolve(self.image.astype(float), self.kernel)
# TODO: FIXME
if self.bkg_image is None:
self.bkg_image = self.image_conv
self.fit_par = fit_par
self.dt = dt
开发者ID:fedebarabas,项目名称:tormenta,代码行数:28,代码来源:maxima.py
示例11: mvd_lr
def mvd_lr(initImg, imgList, psfList, iterNum):
EPS = np.finfo(float).eps
viewNum = len(imgList)
initImg = initImg - np.amin(initImg)
initImg = initImg / np.sum(np.abs(initImg))
reconImg = initImg
for i in xrange(iterNum):
updateAll = np.ones(initImg.shape, dtype=float)
for j in xrange(viewNum):
img = imgList[j]
psf = psfList[j]
psf_prime = np.flipud(np.fliplr(psf))
update = convolve(img/(convolve(reconImg, psf)+EPS), psf_prime)
updateAll = updateAll * update
# display progress
progress = float(i*viewNum+j+1)/(viewNum*iterNum)
timeElapsed = time.time() - startTime
timeRemaining = timeElapsed/progress*(1-progress)
sys.stdout.write('\r%.2f%%, %.2f s elapsed, %.2f s remaining' %
(progress*100.0, timeElapsed, timeRemaining))
sys.stdout.flush()
reconImg = reconImg * updateAll
reconImg = np.abs(reconImg)
reconImg = reconImg / np.sum(reconImg)
sys.stdout.write('\n')
return reconImg
开发者ID:lirenzhucn,项目名称:MVD_VSI,代码行数:26,代码来源:mvd_algorithms.py
示例12: _postprocess_nodes
def _postprocess_nodes(self):
fluid_map = self._fluid_map_base(wet=False).astype(np.uint8)
wet_map = self._fluid_map_base(wet=True).astype(np.uint8)
neighbors = np.zeros((3, 3, 3), dtype=np.uint8)
neighbors[1,1,1] = 1
for ei in self.grid.basis:
neighbors[1 + ei[2], 1 + ei[1], 1 + ei[0]] = 1
# Any wet node not connected to at least one fluid node is marked unused.
# Note that dry nodes connecting to wet nodes need to be retained.
# For instance:
# W W
# W V
# where W is a HBB wall and V is a velocity BC.
where = (filters.convolve(fluid_map, neighbors, mode='wrap') == 0)
self._type_map_base[where & wet_map.astype(np.bool)] = nt._NTUnused.id
# Any dry node, not connected to at least one wet node is marked unused.
# For instance, for HBB walls: .. W W W F -> .. U U W F.
where = (filters.convolve(wet_map, neighbors, mode='wrap') == 0)
self._type_map_base[where & np.logical_not(wet_map)] = nt._NTUnused.id
# If an unused node touches a wet node, mark it as propagation only.
# For instance, for HBB walls: .. U U W F -> .. U P W F.
used_map = (self._type_map_base != nt._NTUnused.id).astype(np.uint8)
where = (filters.convolve(used_map, neighbors, mode='wrap') > 0)
self._type_map_base[where & (self._type_map_base == nt._NTUnused.id)] = nt._NTPropagationOnly.id
开发者ID:marcinofulus,项目名称:sailfish,代码行数:27,代码来源:subdomain.py
示例13: transportRates
def transportRates(self, signalRates, signalLevels, boundcond='constant', mode='normal'):
# Compute diffusion term, laplacian of grid levels in signalLevels,
# write into signalRates
#
# mode='greens' - do not use initLevels as these don't apply!
signalRatesView = signalRates.reshape(self.gridDim)
signalLevelsView = signalLevels.reshape(self.gridDim)
advKernel = numpy.zeros((3,3,3))
advKernel[:,1,1] = [-0.5,0,0.5]
for s in range(self.nSignals):
if boundcond=='constant' and self.initLevels and mode!='greens':
boundval = self.initLevels[s]
else:
boundval = 0.0
if self.advRates:
# Adevction term = du/dx
# Note: always use 'nearest' edge case, this gives central
# differences in middle, and forward/backward differences on edges
convolve(signalLevelsView[s], advKernel*self.advRates[s], output=signalRatesView[s], mode='nearest')
# Diffusion term = \del^2u
# Use edge case from boundary conditions for diffusion
signalRatesView[s] += laplace(signalLevelsView[s], None, mode=boundcond, cval=boundval) * \
self.diffRates[s] / 6.0
else:
signalRatesView[s] = laplace(signalLevelsView[s], None, mode=boundcond, cval=boundval) \
* self.diffRates[s] / 6.0
开发者ID:HaseloffLab,项目名称:CellModeller,代码行数:26,代码来源:GridDiffusion.py
示例14: step
def step(self, dt):
if dt!=self.dt:
print "I can only integrate at fixed dt!"
return
self.nCells = len(self.cellStates)
# Check we have enough space allocated
try:
s = self.specLevel[self.nCells-1]
except IndexError:
# Could resize here, then would have to rebuild views
print "Number of cells exceeded " \
+ self.__class__.__name__ \
+ "::maxCells (" + self.maxCells + ")"
self.dataLen = self.signalDataLen + self.nCells*self.nSpecies
# growth dilution of species
self.diluteSpecies()
# Do u += h(T(u_t)/2 + hf(u_t)) where T=transport operator, f(u_t) is
# our regulation function dydt
self.signalling.transportRates(self.signalRate, self.signalLevel, self.boundcond)
self.signalRate *= 0.5
self.dydt()
self.rates[0:self.dataLen] *= self.dt
self.levels[0:self.dataLen] += self.rates[0:self.dataLen]
# Convolve (I+hT/2)u_t + f(u_t) with the Greens func to get u_{t+1}
sigLvl = self.signalLevel.reshape(self.gridDim)
convolve(sigLvl, self.greensFunc, mode=self.boundcond)
# put local cell signal levels in array
self.signalLevel_dev.set(self.signalLevel)
self.program.setCellSignals(self.queue, (self.nCells,), None,
numpy.int32(self.nSignals),
numpy.int32(self.gridTotalSize),
numpy.int32(self.signalling.gridDim[1]),
numpy.int32(self.signalling.gridDim[2]),
numpy.int32(self.signalling.gridDim[3]),
self.gridIdxs_dev.data,
self.triWts_dev.data,
self.signalLevel_dev.data,
self.cellSigLevels_dev.data).wait()
self.cellSigLevels[:] = self.cellSigLevels_dev.get()
# Put the final signal levels into the cell states
# states = self.cellStates
# for (id,c) in states.items():
# if self.signalling:
# c.signals = self.signalling.signals(c, self.signalLevel)
# Update cellType array
for (id,c) in self.cellStates.items():
self.celltype[c.idx] = numpy.int32(c.cellType)
self.celltype_dev.set(self.celltype)
开发者ID:HaseloffLab,项目名称:CellModeller,代码行数:56,代码来源:CLCrankNicIntegrator.py
示例15: np_hs_jacobi
def np_hs_jacobi(im0, im1, u, v):
It = im1 - im0
Iy = convolve(im1, dy)
Ix = convolve(im1, dx)
denom = np.square(Ix) + np.square(Iy) + alpha ** 2
for _ in range(100):
ubar = convolve(u, jacobi)
vbar = convolve(v, jacobi)
t = (Ix * ubar + Iy * vbar + It) / denom
u = ubar - Ix * t
v = vbar - Iy * t
return u, v
开发者ID:PierreHao,项目名称:aspire-demo-2016-winter,代码行数:13,代码来源:horn_schunck.py
示例16: shade
def shade(self, grid):
k = self.make_k()
out_dtype = grid.dtype if not self.anti_alias else np.float64
out = np.empty_like(grid, dtype=out_dtype)
if len(grid.shape) == 3:
cats = grid.shape[2]
for cat in range(cats):
convolve(grid[:, :, cat], k, output=out[:, :, cat],
mode='constant', cval=0.0)
else:
convolve(grid, k, mode='constant', cval=0.0, output=out)
return out
开发者ID:Vasyka,项目名称:hat,代码行数:14,代码来源:numpyglyphs.py
示例17: _postprocess_nodes
def _postprocess_nodes(self):
fluid_map = self._fluid_map_base().astype(np.uint8)
neighbors = np.zeros((3, 3, 3), dtype=np.uint8)
neighbors[1,1,1] = 1
for ei in self.grid.basis:
neighbors[1 + ei[2], 1 + ei[1], 1 + ei[0]] = 1
# Any node not connected to at least one wet node is marked unused.
where = (filters.convolve(fluid_map, neighbors, mode='wrap') == 0)
self._type_map_base[where] = nt._NTUnused.id
# If an unused node touches a wet node, mark it as propagation only.
used_map = (self._type_map_base != nt._NTUnused.id).astype(np.uint8)
where = (filters.convolve(used_map, neighbors, mode='wrap') > 0)
self._type_map_base[where & (self._type_map_base == nt._NTUnused.id)] = nt._NTPropagationOnly.id
开发者ID:brbr520,项目名称:LBM-sailfish,代码行数:15,代码来源:subdomain.py
示例18: _filter_image
def _filter_image(image, min_scale, max_scale, mode):
response = np.zeros((image.shape[0], image.shape[1],
max_scale - min_scale + 1), dtype=np.double)
if mode == 'dob':
# make response[:, :, i] contiguous memory block
item_size = response.itemsize
response.strides = (item_size * response.shape[1], item_size,
item_size * response.shape[0] * response.shape[1])
integral_img = integral_image(image)
for i in range(max_scale - min_scale + 1):
n = min_scale + i
# Constant multipliers for the outer region and the inner region
# of the bi-level filters with the constraint of keeping the
# DC bias 0.
inner_weight = (1.0 / (2 * n + 1) ** 2)
outer_weight = (1.0 / (12 * n ** 2 + 4 * n))
_censure_dob_loop(n, integral_img, response[:, :, i],
inner_weight, outer_weight)
# NOTE : For the Octagon shaped filter, we implemented and evaluated the
# slanted integral image based image filtering but the performance was
# more or less equal to image filtering using
# scipy.ndimage.filters.convolve(). Hence we have decided to use the
# later for a much cleaner implementation.
elif mode == 'octagon':
# TODO : Decide the shapes of Octagon filters for scales > 7
for i in range(max_scale - min_scale + 1):
mo, no = OCTAGON_OUTER_SHAPE[min_scale + i - 1]
mi, ni = OCTAGON_INNER_SHAPE[min_scale + i - 1]
response[:, :, i] = convolve(image,
_octagon_kernel(mo, no, mi, ni))
elif mode == 'star':
for i in range(max_scale - min_scale + 1):
m = STAR_SHAPE[STAR_FILTER_SHAPE[min_scale + i - 1][0]]
n = STAR_SHAPE[STAR_FILTER_SHAPE[min_scale + i - 1][1]]
response[:, :, i] = convolve(image, _star_kernel(m, n))
return response
开发者ID:borevitzlab,项目名称:scikit-image,代码行数:48,代码来源:censure.py
示例19: fitScaling
def fitScaling(n_events, box, YTOF, YBVG, goodIDX=None, neigh_length_m=3):
YJOINT = 1.0 * YTOF * YBVG
YJOINT /= 1.0 * YJOINT.max()
convBox = 1.0 * \
np.ones([neigh_length_m, neigh_length_m, neigh_length_m]) / \
neigh_length_m**3
conv_n_events = convolve(n_events, convBox)
QX, QY, QZ = ICCFT.getQXQYQZ(box)
dP = 8
fitMaxIDX = tuple(
np.array(np.unravel_index(YJOINT.argmax(), YJOINT.shape)))
if goodIDX is None:
goodIDX = np.zeros_like(YJOINT).astype(np.bool)
goodIDX[max(fitMaxIDX[0] - dP, 0):min(fitMaxIDX[0] + dP, goodIDX.shape[0]),
max(fitMaxIDX[1] - dP, 0):min(fitMaxIDX[1] + dP, goodIDX.shape[1]),
max(fitMaxIDX[2] - dP, 0):min(fitMaxIDX[2] + dP, goodIDX.shape[2])] = True
goodIDX = np.logical_and(goodIDX, conv_n_events > 0)
scaleLinear = Polynomial(n=1)
scaleLinear.constrain("A1>0")
scaleX = YJOINT[goodIDX]
scaleY = n_events[goodIDX]
CreateWorkspace(OutputWorkspace='__scaleWS', dataX=scaleX, dataY=scaleY)
fitResultsScaling = Fit(Function=scaleLinear, InputWorkspace='__scaleWS',
Output='__scalefit', CostFunction='Unweighted least squares')
A0 = fitResultsScaling[3].row(0)['Value']
A1 = fitResultsScaling[3].row(1)['Value']
YRET = A1 * YJOINT + A0
chiSqRed = fitResultsScaling[1]
return YRET, chiSqRed, A1
开发者ID:mantidproject,项目名称:mantid,代码行数:33,代码来源:BVGFitTools.py
示例20: test
def test(self):
# Expected
in_channels = 3
in_dim = 11
out_channels = 5
out_dim = (in_dim/2 + 1)
img = np.arange(0,in_dim*in_dim*in_channels*1, dtype=np.float32)
img = np.reshape(img,[in_dim,in_dim,in_channels,1])
filter = np.arange(0,3*3*in_channels*out_channels, dtype=np.float32)
filter = np.reshape(filter,[3,3,in_channels,out_channels])
bias = np.zeros([5])
expected = np.zeros([out_dim,out_dim,out_channels])
for och in range(out_channels):
tmp = np.zeros([out_dim,out_dim,1])
for ich in range(in_channels):
imgslice = np.reshape(img[:,:,ich,0],[in_dim,in_dim])
filterslice = np.reshape(filter[:,:,ich,och],[3,3])
tmp += np.reshape(convolve(imgslice,filterslice,mode='constant',cval = 0.0)[::2,::2] , [out_dim, out_dim, 1])
expected[:,:,och] = np.squeeze(tmp) + bias[och]
# test
owlimg = owl.from_numpy(np.transpose(img))
owlfilter = owl.from_numpy(np.transpose(filter))
owlbias = owl.from_numpy(bias)
convolver = owl.conv.Convolver(1,1,2,2)
test = convolver.ff(owlimg, owlfilter, owlbias)
print 'Expected\n',expected
print "Actual\n",test.to_numpy()
self.assertTrue(np.allclose(expected, test))
开发者ID:lovi9573,项目名称:minerva,代码行数:30,代码来源:conv_forward.cuda-mpi.py
注:本文中的scipy.ndimage.filters.convolve函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论