本文整理汇总了Python中skimage.data.astronaut函数的典型用法代码示例。如果您正苦于以下问题:Python astronaut函数的具体用法?Python astronaut怎么用?Python astronaut使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了astronaut函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: main
def main():
image = data.astronaut()
image = ia.imresize_single_image(image, (64, 64))
print("image shape:", image.shape)
print("Press any key or wait %d ms to proceed to the next image." % (TIME_PER_STEP,))
k = [
1,
3,
5,
7,
(3, 3),
(1, 11)
]
cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
cv2.resizeWindow("aug", 64*NB_AUGS_PER_IMAGE, 64)
for ki in k:
aug = iaa.MedianBlur(k=ki)
img_aug = [aug.augment_image(image) for _ in range(NB_AUGS_PER_IMAGE)]
img_aug = np.hstack(img_aug)
print("dtype", img_aug.dtype, "averages", np.average(img_aug, axis=tuple(range(0, img_aug.ndim-1))))
title = "k=%s" % (str(ki),)
img_aug = ia.draw_text(img_aug, x=5, y=5, text=title)
cv2.imshow("aug", img_aug[..., ::-1]) # here with rgb2bgr
cv2.waitKey(TIME_PER_STEP)
开发者ID:AtomWrite,项目名称:imgaug,代码行数:29,代码来源:check_median_blur.py
示例2: test_histogram_of_oriented_gradients
def test_histogram_of_oriented_gradients():
img = img_as_float(data.astronaut()[:256, :].mean(axis=2))
fd = feature.hog(img, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(1, 1))
assert len(fd) == 9 * (256 // 8) * (512 // 8)
开发者ID:Britefury,项目名称:scikit-image,代码行数:7,代码来源:test_hog.py
示例3: get_HOG_features
def get_HOG_features(data_path, pickle_name):
size = len(data_path)
rowPatchCnt = 4
colPatchCnt = 4
var_features = np.zeros((size, colPatchCnt*rowPatchCnt*3))
print var_features.shape
image = color.rgb2gray(data.astronaut())
#print image
fd, hog_image = hog(image, orientation = 8, pixels_per_cell=(16, 16), cells_per_block = (1,1), visualise=True)
print fd
im = util.load_image(data_path[0])
#print im
#for i in range(size):
#if i % 500 == 0: print "{}/{}".format(i, size)
#im = util.load_image(data_path[i])
#patchH = im.shape[0] / rowPatchCnt
#patchW = im.shape[1] / colPatchCnt
#pass
#im = np.array(im)
pass
开发者ID:HunjaeJung,项目名称:imagenet2014-modified,代码行数:25,代码来源:featurizer.py
示例4: load_batches
def load_batches():
# Here, load 10 batches of size 4 each.
# You can also load an infinite amount of batches, if you don't train
# in epochs.
batch_size = 4
nb_batches = 10
# Here, for simplicity we just always use the same image.
astronaut = data.astronaut()
astronaut = ia.imresize_single_image(astronaut, (64, 64))
for i in range(nb_batches):
# A list containing all images of the batch.
batch_images = []
# A list containing IDs per image. This is not necessary for the
# background augmentation and here just used to showcase that you
# can transfer additional information.
batch_data = []
# Add some images to the batch.
for b in range(batch_size):
batch_images.append(astronaut)
batch_data.append((i, b))
# Create the batch object to send to the background processes.
batch = ia.Batch(
images=np.array(batch_images, dtype=np.uint8),
data=batch_data
)
yield batch
开发者ID:liuzhiit,项目名称:imgaug,代码行数:31,代码来源:test_readme_examples.py
示例5: main
def main():
image = data.astronaut()
cv2.namedWindow("aug", cv2.WINDOW_NORMAL)
cv2.imshow("aug", image)
cv2.waitKey(TIME_PER_STEP)
height, width = image.shape[0], image.shape[1]
center_x = width // 2
center_y = height // 2
r = int(min(image.shape[0], image.shape[1]) / 3)
for deg in cycle(np.arange(0, 360, DEG_PER_STEP)):
rad = np.deg2rad(deg-90)
point_x = int(center_x + r * np.cos(rad))
point_y = int(center_y + r * np.sin(rad))
direction = deg / 360
aug = iaa.DirectedEdgeDetect(alpha=1.0, direction=direction)
img_aug = aug.augment_image(image)
img_aug[point_y-POINT_SIZE:point_y+POINT_SIZE+1, point_x-POINT_SIZE:point_x+POINT_SIZE+1, :] =\
np.array([0, 255, 0])
cv2.imshow("aug", img_aug)
cv2.waitKey(TIME_PER_STEP)
开发者ID:AtomWrite,项目名称:imgaug,代码行数:25,代码来源:check_directed_edge_detect.py
示例6: test_astronaut
def test_astronaut():
from skimage import data
from skimage import color
from skimage.transform import resize
in_shape = (200, 200)
n_imgs = 1
astronaut = resize(color.rgb2gray(data.astronaut()), in_shape).astype(
np.float32)
astronaut -= astronaut.min()
astronaut /= astronaut.max()
imgs = astronaut.reshape((n_imgs,)+in_shape)
model = cnnr.models.fg11_ht_l3_1_description
extractor = cnnr.BatchExtractor(in_shape, model)
feat_set = extractor.extract(imgs)
assert feat_set.shape == (n_imgs, 10, 10, 256)
feat_set.shape = n_imgs, -1
test_chunk_computed = feat_set[0, 12798:12802]
test_chunk_expected = np.array(
[0.028979, 0.03315, 0.024466, 0.009412], dtype=np.float32)
assert_allclose(test_chunk_computed, test_chunk_expected,
rtol=RTOL, atol=ATOL)
开发者ID:giovanichiachia,项目名称:convnet-rfw,代码行数:31,代码来源:test_extractor.py
示例7: test_rotated_img
def test_rotated_img():
"""
The harris filter should yield the same results with an image and it's
rotation.
"""
im = img_as_float(data.astronaut().mean(axis=2))
im_rotated = im.T
# Moravec
results = peak_local_max(corner_moravec(im),
min_distance=10, threshold_rel=0)
results_rotated = peak_local_max(corner_moravec(im_rotated),
min_distance=10, threshold_rel=0)
assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
# Harris
results = peak_local_max(corner_harris(im),
min_distance=10, threshold_rel=0)
results_rotated = peak_local_max(corner_harris(im_rotated),
min_distance=10, threshold_rel=0)
assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
# Shi-Tomasi
results = peak_local_max(corner_shi_tomasi(im),
min_distance=10, threshold_rel=0)
results_rotated = peak_local_max(corner_shi_tomasi(im_rotated),
min_distance=10, threshold_rel=0)
assert (np.sort(results[:, 0]) == np.sort(results_rotated[:, 1])).all()
assert (np.sort(results[:, 1]) == np.sort(results_rotated[:, 0])).all()
开发者ID:ameya005,项目名称:scikit-image,代码行数:31,代码来源:test_corner.py
示例8: hsi_equalize_hist
def hsi_equalize_hist():
image=data.astronaut()
h=color.rgb2hsv(image)
h[:,:,2]=exposure.equalize_hist(h[:,:,2])
image_equal=color.hsv2rgb(h)
io.imshow(image_equal)
io.imsave('astronautequal.png',image_equal)
开发者ID:xingnix,项目名称:learning,代码行数:7,代码来源:colorimage.py
示例9: test_li_astro_image
def test_li_astro_image():
image = skimage.img_as_ubyte(data.astronaut())
threshold = threshold_li(image)
ce_actual = _cross_entropy(image, threshold)
assert 64 < threshold < 65
assert ce_actual < _cross_entropy(image, threshold + 1)
assert ce_actual < _cross_entropy(image, threshold - 1)
开发者ID:jmetz,项目名称:scikit-image,代码行数:7,代码来源:test_thresholding.py
示例10: load_images
def load_images(n_batches=10, sleep=0.0, draw_text=True):
batch_size = 4
astronaut = data.astronaut()
astronaut = ia.imresize_single_image(astronaut, (64, 64))
kps = ia.KeypointsOnImage([ia.Keypoint(x=15, y=25)], shape=astronaut.shape)
counter = 0
for i in range(n_batches):
if draw_text:
batch_images = []
batch_kps = []
for b in range(batch_size):
astronaut_text = ia.draw_text(astronaut, x=0, y=0, text="%d" % (counter,), color=[0, 255, 0], size=16)
batch_images.append(astronaut_text)
batch_kps.append(kps)
counter += 1
batch = ia.Batch(
images=np.array(batch_images, dtype=np.uint8),
keypoints=batch_kps
)
else:
if i == 0:
batch_images = np.array([np.copy(astronaut) for _ in range(batch_size)], dtype=np.uint8)
batch = ia.Batch(
images=np.copy(batch_images),
keypoints=[kps.deepcopy() for _ in range(batch_size)]
)
yield batch
if sleep > 0:
time.sleep(sleep)
开发者ID:AtomWrite,项目名称:imgaug,代码行数:31,代码来源:check_pool.py
示例11: main
def main():
image = data.astronaut()
image = ia.imresize_single_image(image, (HEIGHT, WIDTH))
kps = []
for y in range(NB_ROWS):
ycoord = BB_Y1 + int(y * (BB_Y2 - BB_Y1) / (NB_COLS - 1))
for x in range(NB_COLS):
xcoord = BB_X1 + int(x * (BB_X2 - BB_X1) / (NB_ROWS - 1))
kp = (xcoord, ycoord)
kps.append(kp)
kps = set(kps)
kps = [ia.Keypoint(x=xcoord, y=ycoord) for (xcoord, ycoord) in kps]
kps = ia.KeypointsOnImage(kps, shape=image.shape)
bb = ia.BoundingBox(x1=BB_X1, x2=BB_X2, y1=BB_Y1, y2=BB_Y2)
bbs = ia.BoundingBoxesOnImage([bb], shape=image.shape)
seq = iaa.Affine(rotate=45)
seq_det = seq.to_deterministic()
image_aug = seq_det.augment_image(image)
kps_aug = seq_det.augment_keypoints([kps])[0]
bbs_aug = seq_det.augment_bounding_boxes([bbs])[0]
image_before = np.copy(image)
image_before = kps.draw_on_image(image_before)
image_before = bbs.draw_on_image(image_before)
image_after = np.copy(image_aug)
image_after = kps_aug.draw_on_image(image_after)
image_after = bbs_aug.draw_on_image(image_after)
ia.imshow(np.hstack([image_before, image_after]))
imageio.imwrite("bb_aug.jpg", np.hstack([image_before, image_after]))
开发者ID:AtomWrite,项目名称:imgaug,代码行数:34,代码来源:check_bb_augmentation.py
示例12: test_daisy_normalization
def test_daisy_normalization():
img = img_as_float(data.astronaut()[:64, :64].mean(axis=2))
descs = daisy(img, normalization='l1')
for i in range(descs.shape[0]):
for j in range(descs.shape[1]):
assert_almost_equal(np.sum(descs[i, j, :]), 1)
descs_ = daisy(img)
assert_almost_equal(descs, descs_)
descs = daisy(img, normalization='l2')
for i in range(descs.shape[0]):
for j in range(descs.shape[1]):
assert_almost_equal(sqrt(np.sum(descs[i, j, :] ** 2)), 1)
orientations = 8
descs = daisy(img, orientations=orientations, normalization='daisy')
desc_dims = descs.shape[2]
for i in range(descs.shape[0]):
for j in range(descs.shape[1]):
for k in range(0, desc_dims, orientations):
assert_almost_equal(sqrt(np.sum(
descs[i, j, k:k + orientations] ** 2)), 1)
img = np.zeros((50, 50))
descs = daisy(img, normalization='off')
for i in range(descs.shape[0]):
for j in range(descs.shape[1]):
assert_almost_equal(np.sum(descs[i, j, :]), 0)
assert_raises(ValueError, daisy, img, normalization='does_not_exist')
开发者ID:AbdealiJK,项目名称:scikit-image,代码行数:31,代码来源:test_daisy.py
示例13: test_binary_descriptors_rotation_crosscheck_true
def test_binary_descriptors_rotation_crosscheck_true():
"""Verify matched keypoints and their corresponding masks results between
image and its rotated version with the expected keypoint pairs with
cross_check enabled."""
img = data.astronaut()
img = rgb2gray(img)
tform = tf.SimilarityTransform(scale=1, rotation=0.15, translation=(0, 0))
rotated_img = tf.warp(img, tform, clip=False)
extractor = BRIEF(descriptor_size=512)
keypoints1 = corner_peaks(corner_harris(img), min_distance=5,
threshold_abs=0, threshold_rel=0.1)
extractor.extract(img, keypoints1)
descriptors1 = extractor.descriptors
keypoints2 = corner_peaks(corner_harris(rotated_img), min_distance=5,
threshold_abs=0, threshold_rel=0.1)
extractor.extract(rotated_img, keypoints2)
descriptors2 = extractor.descriptors
matches = match_descriptors(descriptors1, descriptors2, cross_check=True)
exp_matches1 = np.array([ 0, 2, 3, 4, 5, 6, 9, 11, 12, 13, 14, 17,
18, 19, 21, 22, 23, 26, 27, 28, 29, 31, 32, 33,
34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46])
exp_matches2 = np.array([ 0, 2, 3, 1, 4, 6, 5, 7, 13, 10, 9, 11,
15, 8, 14, 12, 16, 18, 19, 21, 20, 24, 25, 26,
28, 27, 22, 23, 29, 30, 31, 32, 35, 33, 34, 36])
assert_equal(matches[:, 0], exp_matches1)
assert_equal(matches[:, 1], exp_matches2)
开发者ID:AbdealiJK,项目名称:scikit-image,代码行数:31,代码来源:test_match.py
示例14: ton_and_color_corrections
def ton_and_color_corrections():
#色调和彩色校正
image=data.astronaut()
h1=color.rgb2hsv(image)
h2=h1.copy()
h1[:,:,1]=h1[:,:,1]*0.5
image1=color.hsv2rgb(h1)
h2[:,:,1]=h2[:,:,1]*0.5+0.5
image2=color.hsv2rgb(h2)
io.imshow(image)
io.imsave('astronaut.png',image)
io.imshow(image1)
io.imsave('astronautlight.png',image1)
io.imshow(image2)
io.imsave('astronautdark.png',image2)
imagered=image.copy()
imagered[:,:,0]=image[:,:,0]*127.0/255+128
io.imsave('astronautred.png',imagered)
imageblue=image.copy()
imageblue[:,:,2]=image[:,:,2]*127.0/255+128
io.imsave('astronautblue.png',imageblue)
imageyellow=image.copy()
imageyellow[:,:,0]=image[:,:,0]*127.0/255+128
imageyellow[:,:,1]=image[:,:,1]*127.0/255+128
io.imsave('astronautyellow.png',imageyellow)
io.imshow(imageyellow)
开发者ID:xingnix,项目名称:learning,代码行数:27,代码来源:colorimage.py
示例15: test_hog_output_size
def test_hog_output_size():
img = img_as_float(data.astronaut()[:256, :].mean(axis=2))
fd = feature.hog(img, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(1, 1), block_norm='L1')
assert len(fd) == 9 * (256 // 8) * (512 // 8)
开发者ID:jarrodmillman,项目名称:scikit-image,代码行数:7,代码来源:test_hog.py
示例16: color_segment
def color_segment():
image=data.astronaut()
r=np.uint8((image[:,:,0]>100 ) & (image[:,:,1]<100) & (image[:,:,2]<100))
io.imsave('astronautsegr.png',r*255)
g=np.uint8((image[:,:,0]<100 ) & (image[:,:,1]>100) & (image[:,:,2]<100))
io.imsave('astronautsegg.png',g*255)
b=np.uint8((image[:,:,0]<100 ) & (image[:,:,1]<100) & (image[:,:,2]>100))
io.imsave('astronautsegb.png',b*255)
开发者ID:xingnix,项目名称:learning,代码行数:8,代码来源:colorimage.py
示例17: main
def main():
im = astronaut()
patches = extract_patches_2d(im, (96, 96), 10, random_state=0)
# save these files on disk
file_names = ['sample_{}.png'.format(x) for x in range(10)]
for idx, filename in enumerate(file_names):
patch_this = patches[idx]
if not exists(filename):
imsave(filename, patch_this)
开发者ID:leelabcnbc,项目名称:early-vision-toolbox,代码行数:9,代码来源:generate_test_examples.py
示例18: test_hog_output_equivariance_multichannel
def test_hog_output_equivariance_multichannel():
img = data.astronaut()
img[:, :, (1, 2)] = 0
hog_ref = feature.hog(img, multichannel=True, block_norm='L1')
for n in (1, 2):
hog_fact = feature.hog(np.roll(img, n, axis=2), multichannel=True,
block_norm='L1')
assert_almost_equal(hog_ref, hog_fact)
开发者ID:jarrodmillman,项目名称:scikit-image,代码行数:9,代码来源:test_hog.py
示例19: test_threshold_minimum
def test_threshold_minimum():
camera = skimage.img_as_ubyte(data.camera())
threshold = threshold_minimum(camera)
assert_equal(threshold, 76)
astronaut = skimage.img_as_ubyte(data.astronaut())
threshold = threshold_minimum(astronaut)
assert_equal(threshold, 114)
开发者ID:andreydung,项目名称:scikit-image,代码行数:9,代码来源:test_thresholding.py
示例20: test_histogram_of_oriented_gradients_output_correctness
def test_histogram_of_oriented_gradients_output_correctness():
img = color.rgb2gray(data.astronaut())
correct_output = np.load(os.path.join(si.data_dir, 'astronaut_GRAY_hog.npy'))
output = feature.hog(img, orientations=9, pixels_per_cell=(8, 8),
cells_per_block=(3, 3), feature_vector=True,
normalise=False, visualise=False)
assert_almost_equal(output, correct_output)
开发者ID:ClinicalGraphics,项目名称:scikit-image,代码行数:9,代码来源:test_hog.py
注:本文中的skimage.data.astronaut函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论