本文整理汇总了Python中skimage.feature.ORB类的典型用法代码示例。如果您正苦于以下问题:Python ORB类的具体用法?Python ORB怎么用?Python ORB使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ORB类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_keypoints_orb_desired_no_of_keypoints
def test_keypoints_orb_desired_no_of_keypoints():
detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20)
detector_extractor.detect(img)
exp_rows = np.array([ 141. , 108. , 214.56 , 131. , 214.272,
67. , 206. , 177. , 108. , 141. ])
exp_cols = np.array([ 323. , 328. , 282.24 , 292. , 281.664,
85. , 260. , 284. , 328.8 , 267. ])
exp_scales = np.array([1, 1, 1.44, 1, 1.728, 1, 1, 1, 1.2, 1])
exp_orientations = np.array([ -53.97446153, 59.5055285 , -96.01885186,
-149.70789506, -94.70171899, -45.76429535,
-51.49752849, 113.57081195, 63.30428063,
-79.56091118])
exp_response = np.array([ 1.01168357, 0.82934145, 0.67784179, 0.57176438,
0.56637459, 0.52248355, 0.43696175, 0.42992376,
0.37700486, 0.36126832])
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
assert_almost_equal(exp_scales, detector_extractor.scales)
assert_almost_equal(exp_response, detector_extractor.responses)
assert_almost_equal(exp_orientations,
np.rad2deg(detector_extractor.orientations), 5)
detector_extractor.detect_and_extract(img)
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
开发者ID:TheArindham,项目名称:scikit-image,代码行数:29,代码来源:test_orb.py
示例2: iris_scan_orb
def iris_scan_orb(request):
from skimage import io
from skimage.feature import (match_descriptors, ORB)
from skimage.color import rgb2gray
from .settings import MEDIA_ROOT
img1 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS3.jpg')) # Query
img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS6.jpg')) # Comparing to
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors # Query Descriptor
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors # Comparing To Descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
# print("Matched: ", len(matches12), " of ", len(descriptors1))
percent = len(matches12) / len(descriptors1) * 100
# print("Percent Match - ", percent, "%")
"""if percent > 80:
print("Matched!")
else:
print("Not Matched!")"""
return render(request, 'scan.html', {'percent': percent})
开发者ID:rap999a,项目名称:django_iris_scan,代码行数:33,代码来源:views.py
示例3: test_keypoints_orb_less_than_desired_no_of_keypoints
def test_keypoints_orb_less_than_desired_no_of_keypoints():
detector_extractor = ORB(n_keypoints=15, fast_n=12,
fast_threshold=0.33, downscale=2, n_scales=2)
detector_extractor.detect(img)
exp_rows = np.array([ 58., 65., 108., 140., 203.])
exp_cols = np.array([ 291., 130., 293., 202., 267.])
exp_scales = np.array([1., 1., 1., 1., 1.])
exp_orientations = np.array([-158.26941428, -59.42996346, 151.93905955,
-79.46341354, -56.90052451])
exp_response = np.array([ 0.2667641 , 0.04009017, -0.17641695, -0.03243431,
0.26521259])
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
assert_almost_equal(exp_scales, detector_extractor.scales)
assert_almost_equal(exp_response, detector_extractor.responses)
assert_almost_equal(exp_orientations,
np.rad2deg(detector_extractor.orientations), 5)
detector_extractor.detect_and_extract(img)
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
开发者ID:TheArindham,项目名称:scikit-image,代码行数:26,代码来源:test_orb.py
示例4: epipolar_rectify
def epipolar_rectify(imL,imR,show_matches=True):
descriptor_extractor = ORB(n_keypoints=2000)
descriptor_extractor.detect_and_extract(imL)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(imR)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2,metric='hamming', cross_check=True)
pts1=keypoints1[matches12[:,0],:]
pts2=keypoints2[matches12[:,1],:]
F, mask = cv2.findFundamentalMat(pts1,pts2,cv2.FM_RANSAC)
pts1 = pts1[mask.ravel()==1]
pts2 = pts2[mask.ravel()==1]
res,H1,H2=cv2.stereoRectifyUncalibrated(pts1,pts2,F,imL.shape,10)
if show_matches:
fig, ax = plt.subplots(nrows=1, ncols=1)
plot_matches(ax, imL, imR, keypoints1, keypoints2, matches12)
return H1,H2
开发者ID:ceroytres,项目名称:lens_blur,代码行数:28,代码来源:lens_blur.py
示例5: iris_scan_orb_android
def iris_scan_orb_android(file_name):
from skimage import io
from skimage.feature import (match_descriptors, ORB)
from skimage.color import rgb2gray
from .settings import MEDIA_ROOT
img1 = rgb2gray(io.imread(MEDIA_ROOT + '/'+ file_name)) # Query
img2 = rgb2gray(io.imread(MEDIA_ROOT + '/IRIS9.jpg')) # Comparing to
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors # Query Descriptor
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors # Comparing To Descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
percent = len(matches12) / len(descriptors1) * 100
return percent
开发者ID:rap999a,项目名称:django_iris_scan,代码行数:25,代码来源:views.py
示例6: test_keypoints_orb_desired_no_of_keypoints
def test_keypoints_orb_desired_no_of_keypoints():
detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20)
detector_extractor.detect(img)
exp_rows = np.array([ 435. , 435.6 , 376. , 455. , 434.88, 269. ,
375.6 , 310.8 , 413. , 311.04])
exp_cols = np.array([ 180. , 180. , 156. , 176. , 180. , 111. ,
156. , 172.8, 70. , 172.8])
exp_scales = np.array([ 1. , 1.2 , 1. , 1. , 1.44 , 1. ,
1.2 , 1.2 , 1. , 1.728])
exp_orientations = np.array([-175.64733392, -167.94842949, -148.98350192,
-142.03599837, -176.08535837, -53.08162354,
-150.89208271, 97.7693776 , -173.4479964 ,
38.66312042])
exp_response = np.array([ 0.96770745, 0.81027306, 0.72376257,
0.5626413 , 0.5097993 , 0.44351774,
0.39154173, 0.39084861, 0.39063076,
0.37602487])
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
assert_almost_equal(exp_scales, detector_extractor.scales)
assert_almost_equal(exp_response, detector_extractor.responses)
assert_almost_equal(exp_orientations,
np.rad2deg(detector_extractor.orientations), 5)
detector_extractor.detect_and_extract(img)
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
开发者ID:AlexG31,项目名称:scikit-image,代码行数:31,代码来源:test_orb.py
示例7: test_keypoints_orb_less_than_desired_no_of_keypoints
def test_keypoints_orb_less_than_desired_no_of_keypoints():
detector_extractor = ORB(n_keypoints=15, fast_n=12,
fast_threshold=0.33, downscale=2, n_scales=2)
detector_extractor.detect(img)
exp_rows = np.array([ 67., 247., 269., 413., 435., 230., 264.,
330., 372.])
exp_cols = np.array([ 157., 146., 111., 70., 180., 136., 336.,
148., 156.])
exp_scales = np.array([ 1., 1., 1., 1., 1., 2., 2., 2., 2.])
exp_orientations = np.array([-105.76503839, -96.28973044, -53.08162354,
-173.4479964 , -175.64733392, -106.07927215,
-163.40016243, 75.80865813, -154.73195911])
exp_response = np.array([ 0.13197835, 0.24931321, 0.44351774,
0.39063076, 0.96770745, 0.04935129,
0.21431068, 0.15826555, 0.42403573])
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
assert_almost_equal(exp_scales, detector_extractor.scales)
assert_almost_equal(exp_response, detector_extractor.responses)
assert_almost_equal(exp_orientations,
np.rad2deg(detector_extractor.orientations), 5)
detector_extractor.detect_and_extract(img)
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
开发者ID:AlexG31,项目名称:scikit-image,代码行数:30,代码来源:test_orb.py
示例8: get_displacement
def get_displacement(image0, image1):
"""
Gets displacement (in pixels I think) difference between 2 images using scikit-image
not as accurate as the opencv version i think.
:param image0: reference image
:param image1: target image
:return:
"""
from skimage.feature import (match_descriptors, ORB, plot_matches)
from skimage.color import rgb2gray
from scipy.spatial.distance import hamming
from scipy import misc
image0_gray = rgb2gray(image0)
image1_gray = rgb2gray(image1)
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(image0_gray)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(image1_gray)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
# Sort the matches based on distance. Least distance
# is better
distances12 = []
for match in matches12:
distance = hamming(descriptors1[match[0]], descriptors2[match[1]])
distances12.append(distance)
indices = np.arange(len(matches12))
indices = [index for (_, index) in sorted(zip(distances12, indices))]
matches12 = matches12[indices]
# collect displacement from the first 10 matches
dx_list = []
dy_list = []
for mat in matches12[:10]:
# Get the matching key points for each of the images
img1_idx = mat[0]
img2_idx = mat[1]
# x - columns
# y - rows
(x1, y1) = keypoints1[img1_idx]
(x2, y2) = keypoints2[img2_idx]
dx_list.append(abs(x1 - x2))
dy_list.append(abs(y1 - y2))
dx_median = np.median(np.asarray(dx_list, dtype=np.double))
dy_median = np.median(np.asarray(dy_list, dtype=np.double))
# plot_matches(image0, image1, descriptors1, descriptors2, matches12[:10])
return dx_median, dy_median
开发者ID:borevitzlab,项目名称:Gigvaision-ControlSoftware,代码行数:57,代码来源:calibrate.py
示例9: PanormaGroup
class PanormaGroup(GroupChecker):
""" This is to check wheter the new image can be
considered to be part of a panorama of previous image
Based on: http://nbviewer.ipython.org/github/scikit-image/skimage-demos/blob/master/pano/pano.ipynb?raw=true """
def __init__(self, name, startGID = 0):
super(PanormaGroup,self).__init__(name, startGID = startGID)
# "Oriented FAST and rotated BRIEF" feature detector
self.orb = ORB(n_keypoints=4000, fast_threshold=0.05)
# self.ImagesWithOverlap = [] # List to store images which has overlap
self.ImagesKeypointsDescriptors = [] # List of tuples storing ORB (keypoints, descrioptors)
# Minus one to compensate for the increment which will happen for the first image
self.CurrentGroupID -= 1
def NextGID(self,image):
""" Calculates the next Group ID for the input image """
NewImg = self.LoadImage(image,Greyscale=True,scale=0.25)
self.orb.detect_and_extract(NewImg)
NewImgKeyDescr = (self.orb.keypoints, self.orb.descriptors)
for PreImgKeyDescr in reversed(self.ImagesKeypointsDescriptors):
# Check for overlap
matcheOfDesc = match_descriptors(PreImgKeyDescr[1], NewImgKeyDescr[1], cross_check=True)
# Select keypoints from the source (image to be registered)
# and target (reference image)
src = NewImgKeyDescr[0][matcheOfDesc[:, 1]][:, ::-1]
dst = PreImgKeyDescr[0][matcheOfDesc[:, 0]][:, ::-1]
model_robust, inliers = ransac((src, dst), ProjectiveTransform,
min_samples=4, residual_threshold=1, max_trials=300)
NumberOfTrueMatches = np.sum(inliers) #len(inliers[inliers])
if NumberOfTrueMatches > 100 :
# Image has overlap
logger.debug('Image {0} found a match! (No: of Matches={1})'.format(image,NumberOfTrueMatches))
break
else :
logger.debug('Image {0} not matching..(No: of Matches={1})'.format(image,NumberOfTrueMatches))
continue
else:
# None of the images in the for loop has any overlap...So this is a new Group
self.ImagesKeypointsDescriptors = [] # Erase all previous group items
# self.ImagesWithOverlap = []
# Increment Group ID
self.CurrentGroupID += 1
logger.debug('Starting a new Panorama group (GID={0})'.format(self.CurrentGroupID))
# Append the latest image to the current group
self.ImagesKeypointsDescriptors.append(NewImgKeyDescr)
# self.ImagesWithOverlap.append(NewImg)
# Return the current group ID
return self.CurrentGroupID
开发者ID:indiajoe,项目名称:PhotographyScripts,代码行数:57,代码来源:GroupImages.py
示例10: image_features_orb
def image_features_orb(img,keypoints):
# X is the feature vector with one row of features per image
#
Xsize=2*keypoints
X=np.zeros(Xsize, dtype=float)
# extract patches using scikit library.
orb=ORB(downscale=1.2, n_scales=8, n_keypoints=keypoints, fast_n=4, fast_threshold=0.00001, harris_k=0.01)
orb.detect_and_extract(img)
X[0:Xsize] = np.reshape(orb.keypoints,(1, Xsize))
return X
开发者ID:kailex,项目名称:Bowl,代码行数:10,代码来源:Prepare_Features.py
示例11: orb_extractor
def orb_extractor(img, n_keypoints=100):
"""Try orb binary descriptor using binaries created by Otsu's method."""
descriptor_extractor = ORB(n_keypoints)
""" Extract descriptors for the original images """
descriptor_extractor.detect_and_extract(img)
keypoints = descriptor_extractor.keypoints
descriptors = descriptor_extractor.descriptors
return(keypoints, descriptors)
开发者ID:ThunderShiviah,项目名称:AllenBrainAtlasAPI,代码行数:12,代码来源:register_methods.py
示例12: __init__
def __init__(self, name, startGID = 0):
super(PanormaGroup,self).__init__(name, startGID = startGID)
# "Oriented FAST and rotated BRIEF" feature detector
self.orb = ORB(n_keypoints=4000, fast_threshold=0.05)
# self.ImagesWithOverlap = [] # List to store images which has overlap
self.ImagesKeypointsDescriptors = [] # List of tuples storing ORB (keypoints, descrioptors)
# Minus one to compensate for the increment which will happen for the first image
self.CurrentGroupID -= 1
开发者ID:indiajoe,项目名称:PhotographyScripts,代码行数:8,代码来源:GroupImages.py
示例13: getDisplacement
def getDisplacement(Image0, Image1):
Image0Gray = rgb2gray(Image0)
Image1Gray = rgb2gray(Image1)
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(Image0Gray)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(Image1Gray)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
# Sort the matches based on distance. Least distance
# is better
distances12 = []
for match in matches12:
distance = hamming(descriptors1[match[0]], descriptors2[match[1]])
distances12.append(distance)
indices = np.range(len(matches12))
indices = [index for (_, index) in sorted(zip(distances12, indices))]
matches12 = matches12[indices]
# collect displacement from the first 10 matches
dxList = []
dyList = []
for mat in matches12[:10]:
# Get the matching keypoints for each of the images
img1_idx = mat[0]
img2_idx = mat[1]
# x - columns
# y - rows
(x1, y1) = keypoints1[img1_idx]
(x2, y2) = keypoints2[img2_idx]
dxList.append(abs(x1 - x2))
dyList.append(abs(y1 - y2))
dxMedian = np.median(np.asarray(dxList, dtype=np.double))
dyMedian = np.median(np.asarray(dyList, dtype=np.double))
plot_matches(Image0, Image1, descriptors1, descriptors2, matches12[:10])
return dxMedian, dyMedian
开发者ID:gitter-badger,项目名称:Gigvaision-ControlSoftware,代码行数:45,代码来源:pantiltzoomlib.py
示例14: test_descriptor_orb
def test_descriptor_orb():
detector_extractor = ORB(fast_n=12, fast_threshold=0.20)
exp_descriptors = np.array([[ True, False, True, True, False, False, False, False, False, False],
[False, False, True, True, False, True, True, False, True, True],
[ True, False, False, False, True, False, True, True, True, False],
[ True, False, False, True, False, True, True, False, False, False],
[False, True, True, True, False, False, False, True, True, False],
[False, False, False, False, False, True, False, True, True, True],
[False, True, True, True, True, False, False, True, False, True],
[ True, True, True, False, True, True, True, True, False, False],
[ True, True, False, True, True, True, True, False, False, False],
[ True, False, False, False, False, True, False, False, True, True],
[ True, False, False, False, True, True, True, False, False, False],
[False, False, True, False, True, False, False, True, False, False],
[False, False, True, True, False, False, False, False, False, True],
[ True, True, False, False, False, True, True, True, True, True],
[ True, True, True, False, False, True, False, True, True, False],
[False, True, True, False, False, True, True, True, True, True],
[ True, True, True, False, False, False, False, True, True, True],
[False, False, False, False, True, False, False, True, True, False],
[False, True, False, False, True, False, False, False, True, True],
[ True, False, True, False, False, False, True, True, False, False]], dtype=bool)
detector_extractor.detect(img)
detector_extractor.extract(img, detector_extractor.keypoints,
detector_extractor.scales,
detector_extractor.orientations)
assert_equal(exp_descriptors,
detector_extractor.descriptors[100:120, 10:20])
detector_extractor.detect_and_extract(img)
assert_equal(exp_descriptors,
detector_extractor.descriptors[100:120, 10:20])
开发者ID:AlexG31,项目名称:scikit-image,代码行数:34,代码来源:test_orb.py
示例15: get_translation_tool
def get_translation_tool(self, n_keypoints=1000):
# Convert images to grayscale
src_image = rgb2gray(self.src_image)
dst_image = rgb2gray(self.dst_image)
# Initiate an ORB class object which can extract features & descriptors from images.
# Set the amount of features that should be found (more = more accurate)
descriptor_extractor = ORB(n_keypoints=n_keypoints)
# Extract features and descriptors from source image
descriptor_extractor.detect_and_extract(src_image)
self.keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
# Extract features and descriptors from destination image
descriptor_extractor.detect_and_extract(dst_image)
self.keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
# Matches the descriptors and gives them rating as to how similar they are
self.matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
# Selects the coordinates from source image and destination image based on the
# indices given from the match_descriptors function.
src = self.keypoints1[self.matches12[:, 0]][:, ::-1]
dst = self.keypoints2[self.matches12[:, 1]][:, ::-1]
# Filters out the outliers and generates the transformation matrix based on only the inliers
model_robust, inliers = \
ransac((src, dst), ProjectiveTransform,
min_samples=4, residual_threshold=2)
# This returns the object "model_robust" which contains the tranformation matrix and
# uses that to translate any coordinate point from source to destination image.
return model_robust, inliers
开发者ID:VenturaFranklin,项目名称:TuPLE,代码行数:36,代码来源:automatic_registration_class.py
示例16: test_no_descriptors_extracted_orb
def test_no_descriptors_extracted_orb():
img = np.ones((128, 128))
detector_extractor = ORB()
with testing.raises(RuntimeError):
detector_extractor.detect_and_extract(img)
开发者ID:TheArindham,项目名称:scikit-image,代码行数:5,代码来源:test_orb.py
示例17: import
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_harris,
corner_peaks, ORB, plot_matches)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
import numpy as np
test = np.array([2,6,4,8,9])
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(test)
开发者ID:SiegmannGiS,项目名称:master2,代码行数:14,代码来源:feature_detector.py
示例18: import
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_harris,
corner_peaks, ORB, plot_matches)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.coins())
img2 = tf.rotate(img1, 180)
tform = tf.AffineTransform(scale=(1.3, 1.1), rotation=0.5,
translation=(0, -200))
img3 = tf.warp(img1, tform)
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img3)
keypoints3 = descriptor_extractor.keypoints
descriptors3 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
开发者ID:ralbayaty,项目名称:KaggleRetina,代码行数:30,代码来源:orb.py
示例19: main
def main():
image_base_dir = '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/decoded'
canonical_dir = 'canonical'
# template = os.path.join(image_base_dir, 'bottombun.0.00.27.34.-24.61.0.81.png')
fig, axes = plt.subplots(7, 7, figsize=(7, 6), sharex=True, sharey=True)
fig.delaxes(axes[0][0])
ssims = numpy.zeros( (len(BurgerElement.__members__), len(BurgerElement.__members__)), dtype=float)
mses = numpy.zeros( (len(BurgerElement.__members__), len(BurgerElement.__members__)), dtype=float)
for i, layer in enumerate(BurgerElement.__members__):
template = os.path.join(canonical_dir, '%s.png' % layer)
img1 = imread(template)
# img1_padded = numpy.zeros( (WIDTH, HEIGHT,3), dtype=numpy.uint8)
img1_padded = numpy.resize( [255,255,255], (WIDTH, HEIGHT, 3))
s = img1.shape
w = s[0]
h = s[1]
nb = img1_padded.shape[0]
na = img1.shape[0]
lower1 = (nb) // 2 - (na // 2)
upper1 = (nb // 2) + (na // 2)
nb = img1_padded.shape[1]
na = img1.shape[1]
lower2 = (nb) // 2 - (na // 2)
upper2 = (nb // 2) + (na // 2)
img1_padded[lower1:upper1, lower2:upper2] = img1
img1_padded_float = img1_padded.astype(numpy.float64)/255.
print img1_padded_float.shape
img1_gray = rgb2gray(img1_padded_float)
descriptor_extractor = ORB()
try:
descriptor_extractor.detect_and_extract(img1_gray)
except RuntimeError:
continue
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
axes[i][0].imshow(img1_padded_float)
axes[i][0].set_title("Template image")
for j, layer2 in enumerate(BurgerElement.__members__):
rot, tx, ty, scale = get_random_orientation()
img2 = draw_example(layer2, WIDTH, HEIGHT, rot, tx, ty, scale)
# match = os.path.join(canonical_dir, '%s.png' % layer2)
# img2 = imread(match)
img2_padded = numpy.resize( [255,255,255], (WIDTH, HEIGHT, 3))
s = img2.shape
img2_padded[:s[0], :s[1]] = img2
img2_padded_float = img2_padded.astype(numpy.float64)/255.
img2_gray = rgb2gray(img2_padded_float)
try:
descriptor_extractor.detect_and_extract(img2_gray)
except RuntimeError:
continue
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
src = keypoints2[matches12[:, 1]][:, ::-1]
dst = keypoints1[matches12[:, 0]][:, ::-1]
model_robust, inliers = \
ransac((src, dst), SimilarityTransform,
min_samples=4, residual_threshold=2)
if not model_robust:
print "bad"
continue
img2_transformed = transform.warp(img2_padded_float, model_robust.inverse, mode='constant', cval=1)
sub = img2_transformed - img1_padded_float
ssim = compare_ssim(img2_transformed, img1_padded_float, win_size=5, multichannel=True)
mse = compare_mse(img2_transformed, img1_padded_float)
ssims[i,j] = ssim
mses[i,j] = mse
axes[0][j].imshow(img2_padded_float)
axes[0][j].set_title("Match image")
axes[i][j].imshow(img2_transformed)
axes[i][j].set_title("Transformed image")
axes[i][j].set_xlabel("SSIM: %9.4f MSE: %9.4f" % (ssim, mse))
# ax = plt.gca()
# plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)
print ssims
print numpy.argmax(ssims, axis=1)
print numpy.argmin(mses, axis=1)
#.........这里部分代码省略.........
开发者ID:google,项目名称:makerfaire-2016,代码行数:101,代码来源:compare.py
示例20: main
def main():
image_base_dir = '/home/dek/makerfaire-booth/2018/burger/experimental/dek/train_object_detector/decoded'
canonical_dir = 'canonical'
# template = os.path.join(image_base_dir, 'bottombun.0.00.27.34.-24.61.0.81.png')
template = os.path.join(canonical_dir, 'patty.png')
img1 = imread(template)
# img1_padded = numpy.zeros( (256, 256,3), dtype=numpy.uint8)
img1_padded = numpy.resize( [255,255,255], (256, 256, 3))
s = img1.shape
img1_padded[:s[0], :s[1]] = img1
img1_gray = rgb2gray(img1)
descriptor_extractor = ORB()
descriptor_extractor.detect_and_extract(img1_gray)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
# g = glob.glob(os.path.join(image_base_dir, 'patty*.nobox.png'))
# for moving in g:
while True:
rot, tx, ty, scale = get_random_orientation()
# img2 = imread(moving)
img2 = draw_example('patty', 256, 256, rot, tx, ty, scale)
img2_gray = rgb2gray(img2)
try:
descriptor_extractor.detect_and_extract(img2_gray)
except RuntimeError:
continue
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
src = keypoints2[matches12[:, 1]][:, ::-1]
dst = keypoints1[matches12[:, 0]][:, ::-1]
model_robust, inliers = \
ransac((src, dst), SimilarityTransform,
min_samples=4, residual_threshold=2)
if not model_robust:
print "bad"
continue
img2_transformed = transform.warp(img2, model_robust.inverse, mode='constant', cval=1)
img1_padded_float = img1_padded.astype(numpy.float64)/255.
sub = img2_transformed - img1_padded_float
print compare_ssim(img2_transformed, img1_padded_float, win_size=5, multichannel=True)
fig, axes = plt.subplots(2, 2, figsize=(7, 6), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(img1_padded_float)
ax[1].imshow(img2)
ax[1].set_title("Template image")
ax[2].imshow(img2_transformed)
ax[2].set_title("Matched image")
ax[3].imshow(sub)
ax[3].set_title("Subtracted image")
# plt.gray()
# ax = plt.gca()
# plot_matches(ax, img1, img2, keypoints1, keypoints2, matches12)
plt.show()
开发者ID:google,项目名称:makerfaire-2016,代码行数:67,代码来源:blob.py
注:本文中的skimage.feature.ORB类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论