本文整理汇总了Python中skimage.filters.gaussian函数的典型用法代码示例。如果您正苦于以下问题:Python gaussian函数的具体用法?Python gaussian怎么用?Python gaussian使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gaussian函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_apply_parallel
def test_apply_parallel():
import dask.array as da
# data
a = np.arange(144).reshape(12, 12).astype(float)
# apply the filter
expected1 = threshold_local(a, 3)
result1 = apply_parallel(threshold_local, a, chunks=(6, 6), depth=5,
extra_arguments=(3,),
extra_keywords={'mode': 'reflect'})
assert_array_almost_equal(result1, expected1)
def wrapped_gauss(arr):
return gaussian(arr, 1, mode='reflect')
expected2 = gaussian(a, 1, mode='reflect')
result2 = apply_parallel(wrapped_gauss, a, chunks=(6, 6), depth=5)
assert_array_almost_equal(result2, expected2)
expected3 = gaussian(a, 1, mode='reflect')
result3 = apply_parallel(
wrapped_gauss, da.from_array(a, chunks=(6, 6)), depth=5, compute=True
)
assert isinstance(result3, np.ndarray)
assert_array_almost_equal(result3, expected3)
开发者ID:TheArindham,项目名称:scikit-image,代码行数:29,代码来源:test_apply_parallel.py
示例2: filter
def filter(data,filtType,par):
if filtType == "sobel": filt_data = sobel(data)
elif filtType == "roberts": filt_data = roberts(data)
elif filtType == "canny": filt_data = canny(data)
elif filtType == "lowpass_avg":
from scipy import ndimage
p=int(par)
kernel = np.ones((p,p),np.float32)/(p*p)
filt_data = ndimage.convolve(data, kernel)
elif filtType == "highpass_avg":
from scipy import ndimage
p=int(par)
kernel = np.ones((p,p),np.float32)/(p*p)
lp_data = ndimage.convolve(data, kernel)
filt_data = data - lp_data
elif filtType == "lowpass_gaussian":
filt_data = gaussian(data, sigma=float(par))
elif filtType == "highpass_gaussian":
lp_data = gaussian(data, sigma=float(par))
filt_data = data - lp_data
#elif filtType == "gradient":
return filt_data
开发者ID:yunjunz,项目名称:PySAR,代码行数:25,代码来源:filter_spatial.py
示例3: correct_drift
def correct_drift(self, ref, threshold=0.005):
"""Align images to correct for image drift.
Detects common features on the images and tracks them moving.
Parameters
----------
ref: KerrArray or ndarray
reference image with zero drift
threshold: float
threshold for detecting imperfections in images
(see skimage.feature.corner_fast for details)
Returns
-------
shift: array
shift vector relative to ref (x drift, y drift)
transim: KerrArray
copy of self translated to account for drift"""
refed=ref.clone
refed=filters.gaussian(ref,sigma=1)
refed=feature.corner_fast(refed,threshold=0.005)
imed=self.clone
imed=filters.gaussian(imed,sigma=1)
imco=feature.corner_fast(imed,threshold=0.005)
shift,err,phase=feature.register_translation(refed,imco,upsample_factor=50)
#tform = SimilarityTransform(translation=(-shift[1],-shift[0]))
#imed = transform.warp(im, tform) #back to original image
self=self.translate(translation=(-shift[1],-shift[0]))
return [shift,self]
开发者ID:gb119,项目名称:kermit,代码行数:29,代码来源:kermitv1.py
示例4: punch
def punch(img):
# Identifiying the Tissue punches in order to Crop the image correctly
# Canny edges and RANSAC is used to fit a circe to the punch
# A Mask is created
distance = 0
r = 0
float_im, orig, ihc = create_bin(img)
gray = rgb2grey(orig)
smooth = gaussian(gray, sigma=3)
shape = np.shape(gray)
l = shape[0]
w = shape[1]
x = l - 20
y = w - 20
rows = np.array([[x, x, x], [x + 1, x + 1, x + 1]])
columns = np.array([[y, y, y], [y + 1, y + 1, y + 1]])
corner = gray[rows, columns]
thresh = np.mean(corner)
print thresh
binar = (smooth < thresh - 0.01)
bin = remove_small_holes(binar, min_size=100000, connectivity=2)
bin1 = remove_small_objects(bin, min_size=5000, connectivity=2)
bin2 = gaussian(bin1, sigma=3)
bin3 = (bin2 > 0)
# eosin = IHC[:, :, 2]
edges = canny(bin3)
coords = np.column_stack(np.nonzero(edges))
model, inliers = ransac(coords, CircleModel, min_samples=4, residual_threshold=1, max_trials=1000)
# rr, cc = circle_perimeter(int(model.params[0]),
# int(model.params[1]),
# int(model.params[2]),
# shape=im.shape)
a, b = model.params[0], model.params[1]
r = model.params[2]
ny, nx = bin3.shape
ix, iy = np.meshgrid(np.arange(nx), np.arange(ny))
distance = np.sqrt((ix - b)**2 + (iy - a)**2)
mask = np.ma.masked_where(distance > r, bin3)
return distance, r, float_im, orig, ihc, bin3
开发者ID:AidanRoss,项目名称:histology,代码行数:53,代码来源:ihc_analysis.py
示例5: get_h1
def get_h1(imgs):
ff = fftn(imgs)
h1 = np.absolute(ifftn(ff[1, :, :]))
scale = np.max(h1)
# h1 = scale * gaussian_filter(h1 / scale, 5)
h1 = scale * gaussian(h1 / scale, 5)
return h1
开发者ID:m12sl,项目名称:dsb2-keras,代码行数:7,代码来源:fourier.py
示例6: run
def run(args):
probs_map = np.load(args.probs_map_path)
X, Y = probs_map.shape
resolution = pow(2, args.level)
if args.sigma > 0:
probs_map = filters.gaussian(probs_map, sigma=args.sigma)
outfile = open(args.coord_path, 'w')
while np.max(probs_map) > args.prob_thred:
prob_max = probs_map.max()
max_idx = np.where(probs_map == prob_max)
x_mask, y_mask = max_idx[0][0], max_idx[1][0]
x_wsi = int((x_mask + 0.5) * resolution)
y_wsi = int((y_mask + 0.5) * resolution)
outfile.write('{:0.5f},{},{}'.format(prob_max, x_wsi, y_wsi) + '\n')
x_min = x_mask - args.radius if x_mask - args.radius > 0 else 0
x_max = x_mask + args.radius if x_mask + args.radius <= X else X
y_min = y_mask - args.radius if y_mask - args.radius > 0 else 0
y_max = y_mask + args.radius if y_mask + args.radius <= Y else Y
for x in range(x_min, x_max):
for y in range(y_min, y_max):
probs_map[x, y] = 0
outfile.close()
开发者ID:bootuz,项目名称:NCRF,代码行数:27,代码来源:nms.py
示例7: test_RGB
def test_RGB():
img = gaussian(data.text(), 1)
imgR = np.zeros((img.shape[0], img.shape[1], 3))
imgG = np.zeros((img.shape[0], img.shape[1], 3))
imgRGB = np.zeros((img.shape[0], img.shape[1], 3))
imgR[:, :, 0] = img
imgG[:, :, 1] = img
imgRGB[:, :, :] = img[:, :, None]
x = np.linspace(5, 424, 100)
y = np.linspace(136, 50, 100)
init = np.array([x, y]).T
snake = active_contour(imgR, init, bc='fixed',
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
refx = [5, 9, 13, 17, 21, 25, 30, 34, 38, 42]
refy = [136, 135, 134, 133, 132, 131, 129, 128, 127, 125]
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
snake = active_contour(imgG, init, bc='fixed',
alpha=0.1, beta=1.0, w_line=-5, w_edge=0, gamma=0.1)
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
snake = active_contour(imgRGB, init, bc='fixed',
alpha=0.1, beta=1.0, w_line=-5/3., w_edge=0, gamma=0.1)
assert_equal(np.array(snake[:10, 0], dtype=np.int32), refx)
assert_equal(np.array(snake[:10, 1], dtype=np.int32), refy)
开发者ID:TonyMou,项目名称:scikit-image,代码行数:25,代码来源:test_active_contour_model.py
示例8: _preprocess
def _preprocess(self, frame, stretch_intensity=True, blur=1, denoise=0):
"""
1. convert frame to grayscale
2. remove noise from frame. increase denoise value for more noise filtering
3. stretch contrast
"""
if len(frame.shape) != 2:
frm = grayspace(frame)
else:
frm = frame / self.pixel_depth * 255
frm = frm.astype('uint8')
# self.preprocessed_frame = frame
# if denoise:
# frm = self._denoise(frm, weight=denoise)
# print 'gray', frm.shape
if blur:
frm = gaussian(frm, blur) * 255
frm = frm.astype('uint8')
# frm1 = gaussian(self.preprocessed_frame, blur,
# multichannel=True) * 255
# self.preprocessed_frame = frm1.astype('uint8')
if stretch_intensity:
frm = rescale_intensity(frm)
# frm = self._contrast_equalization(frm)
# self.preprocessed_frame = self._contrast_equalization(self.preprocessed_frame)
return frm
开发者ID:NMGRL,项目名称:pychron,代码行数:31,代码来源:locator.py
示例9: create_background
def create_background(m, shape, fstd=2, bstd=10):
canvas = np.ones(shape) * m
noise = np.random.randn(shape[0], shape[1]) * bstd
noise = fi.gaussian(noise, fstd) #low-pass filter noise
canvas += noise
canvas = np.round(canvas).astype(np.uint8)
return canvas
开发者ID:tomfalainen,项目名称:neural-ctrlf,代码行数:7,代码来源:dataset_loader.py
示例10: from_points
def from_points(cls, points, shape=(100, 100), scale=1.0, blur=1.):
"""Creates a pattern from a set of points.
Currently only Gaussian peaks are implemented.
Parameters
----------
points : Points, array_like
Positions and intensities of the points in the array.
shape : Shape of the final array.
scale : float
Maximum extent of the points. Should be less than 1.
blur : float
Level of gaussian blur to apply to the pattern.
Returns
-------
Pattern
An array simulating a diffraction pattern.
"""
if not isinstance(points, Points):
points = Points(points)
positions = points.to_shape(shape, scale)
dat = np.zeros(shape)
x, y = np.mgrid[0: shape[0], 0: shape[1]]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
for position, intensity in zip(positions, points.intensities):
dat += intensity * multivariate_normal.pdf(pos, mean=position, cov=1)
dat = filters.gaussian(dat, sigma=blur)
return dat.view(cls)
开发者ID:bm424,项目名称:diffraction-toybox,代码行数:33,代码来源:core.py
示例11: BlurImage
def BlurImage(self, BlurSize): # Convolution of image with Gaussian kernel BlurSize
self.BlurSize = BlurSize
self.BlurredImage = filt.gaussian(self.Image, BlurSize)
self.ManipulatedImage = self.BlurredImage
if "-Blurred" not in self.TitleTag:
self.TitleTag = self.TitleTag + "-Blurred"
self.Show()
开发者ID:laserkelvin,项目名称:Python-Ion-Imaging,代码行数:7,代码来源:ImageTools.py
示例12: limpa_imagem
def limpa_imagem(img_cinza):
#binariza a imagem em escala de cinza
img_bin_cinza = np.where(img_cinza < np.mean(img_cinza), 0, 255)
# aplica lbp sobre a imagem em escala de cinza
# lbp foi aplicado para evitar perda de informacao em regioes
# proximas as regioes escuras (provaveis celulas)
lbp_img = local_binary_pattern(img_cinza, 24, 3, method='uniform')
# aplica efeito de blurring sobre a imagem resultante do lbp
blur_img = gaussian(lbp_img,sigma=6)
img_bin_blur = np.where(blur_img < np.mean(blur_img), 0, 255)
# junta as duas regiões definidas pela binarizacao da imagem em escala
# de cinza e a binarizacao do blurring
mascara = np.copy(img_bin_cinza)
for (a,b), valor in np.ndenumerate(img_bin_blur):
if valor == 0:
mascara[a][b] = 0
# aplica a mascara obtida sobre a imagem original (em escala de cinza)
# para delimitar melhor as regiões que não fornecerao informacoes (regioes
# totalmente brancas)
img_limpa = np.copy(img_cinza)
for (a,b), valor in np.ndenumerate(mascara):
if valor == 255:
img_limpa[a][b] = 255
return (img_limpa)
开发者ID:willianfatec,项目名称:PatchWiser,代码行数:29,代码来源:binarypattern.py
示例13: k_means_classifier
def k_means_classifier(image):
n_clusters = 8
# blur and take local maxima
blur_image = gaussian(image, sigma=8)
blur_image = ndi.maximum_filter(blur_image, size=3)
# get texture features
feats = local_binary_pattern(blur_image, P=40, R=5, method="uniform")
feats_r = feats.reshape(-1, 1)
# cluster the texture features
km = k_means(n_clusters=n_clusters, batch_size=500)
clus = km.fit(feats_r)
# copy relevant attributes
labels = clus.labels_
clusters = clus.cluster_centers_
# reshape label arrays
labels = labels.reshape(blur_image.shape[0], blur_image.shape[1])
# segment shadow
img = blur_image.ravel()
shadow_seg = img.copy()
for i in range(0, n_clusters):
# set up array of pixel indices matching cluster
mask = np.nonzero((labels.ravel() == i) == True)[0]
if len(mask) > 0:
thresh = threshold_otsu(img[mask])
shadow_seg[mask] = shadow_seg[mask] < thresh
shadow_seg = shadow_seg.reshape(*image.shape)
return shadow_seg
开发者ID:charlienewey,项目名称:shadow-detection-notebook,代码行数:34,代码来源:shadow.py
示例14: hsv_modulation
def hsv_modulation(lesion_image):
img_path = os.path.join('../', lesion_image.path)
if not os.path.exists(img_path):
print('no image found: ', lesion_image.name)
return []
image = Image.open(img_path)
mode = image.mode
format = image.format
height = image.height
width = image.width
image = array(image)
if mode == 'RGBA':
image = image[:,:,0:3]
if lesion_image.source == 'DermQuest':
image = image[0:-100, :]
center = (int(height / 2), int(width / 2))
image_hsv = rgb2hsv(image)
sigma = image.size/800000
oimage = np.copy(image)
image = gaussian(image, sigma=sigma, multichannel=True)
h = image_hsv[:,:,0]
s = image_hsv[:,:,1]
v = image_hsv[:,:,2]
h = gaussian(h, sigma=sigma)
p2, p98 = np.percentile(h, (2, 98))
h = exposure.rescale_intensity(h, in_range=(p2, p98))
s_inv_v = s * ((v * -1) + 1)
s_inv_v_h = s * ((v * -1) + 1) * ((h * -1) + 1)
slic_s = prep(oimage, s * 256)
path_string = 'media/{0}.slic_s.jpeg'.format(lesion_image.name)
media_path = path(path_string)
imsave(media_path.abspath(), slic_s)
return [{'name': 'foo'}, {'name': 'bar'}]
开发者ID:alexgustafson,项目名称:BATests,代码行数:47,代码来源:processing.py
示例15: preprocessing_filters
def preprocessing_filters(image,
blur_params=None,
temperature_params=None,
low_contrast_params=None,
center=True):
"""
Meta function for preprocessing images.
Parameters
----------
image : ndarray
input rgb image
blur_band : int
band of rgb to check for blur
blur_params : dict or `None`
parameters for `pyroots.detect_blur`
temperature_params : dict or `None`
parameters for `pyroots.calc_temperature_distance`
low_contrast_params : dict or `None`
parameters for `skimage.exposure.is_low_contrast`
center : bool
Take middle 25% of an image for blur detection?
Returns
-------
bool - should the image be pre-processed? Must pass all criteria given.
"""
try:
if center is True:
blur = detect_motion_blur(_center_image(image), **blur_params)
else:
blur = detect_motion_blur(image, **blur_params)
except:
blur = True
if blur_params is not None:
warn("Skipping motion blur check", UserWarning)
pass
try:
bands = calc_temperature_distance(image, **temperature_params)
except:
bands = True
if missing_band_params is not None:
warn("Skipping temperature check", UserWarning)
pass
try:
contrast = ~exposure.is_low_contrast(filters.gaussian(image, sigma=10, multichannel=True), **low_contrast_params)
except:
contrast = True
if low_contrast_params is not None:
warn("Skipping low contrast check", UserWarning)
pass
return(blur * bands * contrast)
开发者ID:pme1123,项目名称:PyRoots,代码行数:59,代码来源:preprocessing.py
示例16: infer
def infer(edge_image, edge_lengths, mu, phi, sigma2,
update_slice=slice(None),
scale_estimate=None,
rotation=0,
translation=(0, 0)):
# edge_points = np.array(np.where(edge_image)).T
# edge_points[:, [0, 1]] = edge_points[:, [1, 0]]
# edge_score = edge_image.shape[0] * np.exp(-edge_lengths[edge_image] / (0.25 * edge_image.shape[0])).reshape(-1, 1)
# edge_points = np.concatenate((edge_points, edge_score), axis=1)
#
# edge_nn = NearestNeighbors(n_neighbors=1).fit(edge_points)
edge_near = scipy.ndimage.distance_transform_edt(~edge_image)
edge_near_blur = gaussian(edge_near, 2)
Gy, Gx = np.gradient(edge_near_blur)
mag = np.sqrt(np.power(Gy, 2) + np.power(Gx, 2))
if scale_estimate is None:
scale_estimate = min(edge_image.shape) * 4
mu = (mu.reshape(-1, 2) - mu.reshape(-1, 2).mean(axis=0)).reshape(-1, 1)
average_distance = np.sqrt(np.power(mu.reshape(-1, 2), 2).sum(axis=1)).mean()
scale_estimate /= average_distance * np.sqrt(2)
h = np.zeros((phi.shape[1], 1))
psi = SimilarityTransform(scale=scale_estimate, rotation=rotation, translation=translation)
while True:
w = (mu + phi @ h).reshape(-1, 2)
image_points = matrix_transform(w, psi.params)[update_slice, :]
image_points = np.concatenate((image_points, np.zeros((image_points.shape[0], 1))), axis=1)
# closest_edge_point_indices = edge_nn.kneighbors(image_points)[1].flatten()
# closest_edge_points = edge_points[closest_edge_point_indices, :2]
closest_edge_points = gradient_step(Gy, Gx, mag, image_points)
w = mu.reshape(-1, 2)
psi = estimate_transform('similarity', w[update_slice, :], closest_edge_points)
image_points = matrix_transform(w, psi.params)[update_slice, :]
image_points = np.concatenate((image_points, np.zeros((image_points.shape[0], 1))), axis=1)
# closest_edge_point_indices = edge_nn.kneighbors(image_points)[1].flatten()
# closest_edge_points = edge_points[closest_edge_point_indices, :2]
closest_edge_points = gradient_step(Gy, Gx, mag, image_points)
mu_slice = mu.reshape(-1, 2)[update_slice, :].reshape(-1, 1)
K = phi.shape[-1]
phi_full = phi.reshape(-1, 2, K)
phi_slice = phi_full[update_slice, :].reshape(-1, K)
h = update_h(sigma2, phi_slice, closest_edge_points, mu_slice, psi)
w = (mu + phi @ h).reshape(-1, 2)
image_points = matrix_transform(w, psi.params)
update_slice = yield image_points, closest_edge_points
开发者ID:jrdurrant,项目名称:vision,代码行数:59,代码来源:subspace_shape.py
示例17: test
def test():
image_series = glob.glob('full_dewar/puck*_*in*_200.jpg')
templates = [n.replace('200', '*') for n in image_series]
template_empty = imread('template_empty.jpg')
h, w = template_empty.shape
print 'len(templates)', len(templates)
fig, axes = plt.subplots(3, 4)
a = axes.ravel()
k = 0
used = []
while k<12:
#for template in templates[:12]:
template = random.choice(templates)
if template in used:
pass
else:
used.append(template)
original_image = img_as_float(combine(template, length=200))
ax = a[k]
gray_image = color.rgb2gray(original_image)
img_sharp = unsharp(gray_image)
edges = canny(img_sharp, sigma=3.0, low_threshold=0.04, high_threshold=0.05)
med_unsharp = median(img_sharp/img_sharp.max(), selem=disk(4))
sharp_med_unsharp = unsharp(med_unsharp)
edges_med = canny(sharp_med_unsharp, sigma=7)
match = match_template(gaussian(edges_med, 4), template_empty)
print 'match.max()'
print match.max()
peaks = peak_local_max(gaussian(match, 3), threshold_abs=0.3, indices=True)
print 'template', template
print '# peaks', len(peaks)
print peaks
ax.imshow(original_image) #, cmap='gray')
#ax.imshow(gaussian(edges_med, 3), cmap='gnuplot')
for peak in peaks:
y, x = peak
rect = plt.Rectangle((x, y), w, h, edgecolor='g', linewidth=2, facecolor='none')
ax.add_patch(rect)
#ax[edges] = (0, 1, 0)
#image = img_as_int(original_image)
#image[edges==True] = (0, 255, 0)
ax.set_title(template.replace('full_dewar/', '').replace('_*.jpg', '') + ' detected %s' % (16-len(peaks),))
k += 1
plt.show()
开发者ID:MartinSavko,项目名称:eiger,代码行数:46,代码来源:detect_pins.py
示例18: BackgroundSubtraction
def BackgroundSubtraction(self):
self.ManipulatedImage = self.BlurredImage
for Index in self.BackgroundImages.keys():
self.ManipulatedImage = self.ManipulatedImage - filt.gaussian(self.BackgroundImages[Index], self.BlurSize)
# Mask array, don't let ions oversubtract
Mask = self.ManipulatedImage < 0. # find values that are negative
self.ManipulatedImage[Mask] = 0.
self.Show()
开发者ID:laserkelvin,项目名称:Python-Ion-Imaging,代码行数:8,代码来源:ImageTools.py
示例19: get_saliency_image
def get_saliency_image(self, image_fname):
image, image_filtersize, targetsize = util.preprocess_image(
image_fname, config.filtersize)
saliency = self.convolution_function(
image_filtersize.reshape((1, 1, image_filtersize.shape[0],
image_filtersize.shape[1])))[0]
saliency = gaussian(saliency[0, 0], sigma=3.)
return saliency, image
开发者ID:nebw,项目名称:saliency-localizer,代码行数:8,代码来源:localizer.py
示例20: test_apply_parallel_wrap
def test_apply_parallel_wrap():
def wrapped(arr):
return gaussian(arr, 1, mode='wrap')
a = np.arange(144).reshape(12, 12).astype(float)
expected = gaussian(a, 1, mode='wrap')
result = apply_parallel(wrapped, a, chunks=(6, 6), depth=5, mode='wrap')
assert_array_almost_equal(result, expected)
开发者ID:AceHao,项目名称:scikit-image,代码行数:8,代码来源:test_apply_parallel.py
注:本文中的skimage.filters.gaussian函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论