• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python measure.ransac函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中skimage.measure.ransac函数的典型用法代码示例。如果您正苦于以下问题:Python ransac函数的具体用法?Python ransac怎么用?Python ransac使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ransac函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: loop

    def loop(self):
        while not rospy.is_shutdown():
            ranges = np.array(self.scan.ranges)
            angles = np.array([self.scan.angle_min + self.scan.angle_increment * i for i in range(len(ranges))])

            # Filter out ranges (and corresponding angles) not in the interval [range_min, range_max].
            good_indices = np.where(np.logical_and(ranges > self.scan.range_min, ranges < self.scan.range_max))
            ranges = ranges[good_indices]
            angles = angles[good_indices]

            # Skip iteration if too few good values.
            if np.size(good_indices) < 2:
                continue

            # Points in Cartesian coordinates.
            points = np.array([pol2cart(dist, angle) for dist, angle in itertools.izip(ranges, angles)])

            # Split points in the middle.
            left_points = points[:len(points) / 2]
            right_points = points[len(points) / 2:]

            # Fit line models with RANSAC algorithm.
            left_model, left_inliers = ransac(left_points, LineModel, min_samples=5, residual_threshold=0.1, max_trials=100)
            right_model, right_inliers = ransac(right_points, LineModel, min_samples=5, residual_threshold=0.1, max_trials=100)

            # Determine validity of the lines
            left_valid = True
            right_valid = True

            if np.size(left_inliers) < 15:
                left_valid = False

            if np.size(right_inliers) < 15:
                right_valid = False

            # Publish row message.
            self.publish_wall(left_model, left_valid, right_model, right_valid)

            # RViz visualization of lines and which points are considered in/outliers.
            # Predict y's using the two outermost x's. This gives us two points on each line.
            left_wall_x = np.array([left_points[0][0], left_points[-1][0]])
            right_wall_x = np.array([right_points[0][0], right_points[-1][0]])
            left_wall_y = left_model.predict_y(left_wall_x)
            right_wall_y = right_model.predict_y(right_wall_x)

            # Publish markers.
            self.publish_visualization_marker(left_wall_x, left_wall_y, Marker.LINE_STRIP, "line_left", (0.2, 1.0, 0.2))
            self.publish_visualization_marker(right_wall_x, right_wall_y, Marker.LINE_STRIP, "line_right", (0.2, 0.2, 1.0))
            self.publish_visualization_marker(left_points[left_inliers, 0], left_points[left_inliers, 1], Marker.POINTS, "left_inliers", (0.5, 1.0, 0.5))
            self.publish_visualization_marker(right_points[right_inliers, 0], right_points[right_inliers, 1], Marker.POINTS, "right_inliers", (0.5, 0.5, 1.0))
            left_outliers = left_inliers == False
            right_outliers = right_inliers == False
            self.publish_visualization_marker(left_points[left_outliers, 0], left_points[left_outliers, 1], Marker.POINTS, "left_outliers", (0.0, 0.5, 0.0))
            self.publish_visualization_marker(right_points[right_outliers, 0], right_points[right_outliers, 1], Marker.POINTS, "right_outliers", (0.0, 0.0, 0.5))

            self.rate.sleep()
开发者ID:Birkehoj,项目名称:frobit_lego_transporter,代码行数:56,代码来源:wall_extractor_node.py


示例2: run3

    def run3(self):
        """ Cette fonction test des alternatives à SIFT et ORB. Ne fonctionne pas."""
        for x in xrange(len(self.stack)-1):
            print('Traitement image ' + str(x+1))
            im1,im2 = 255.*gaussian_filter(self.stack[x,...], sqrt(self.initial_sigma**2 - 0.25)), 255.*gaussian_filter(self.stack[x+1,...], sqrt(self.initial_sigma**2 - 0.25))
            im1,im2 = enhance_contrast(normaliser(im1), square(3)), enhance_contrast(normaliser(im2), square(3))
            im1, im2 = normaliser(im1), normaliser(im2)
            
            b = cv2.BRISK()
            #b.create("Feature2D.BRISK")
            
            k1,d1 = b.detectAndCompute(im1,None)
            k2,d2 = b.detectAndCompute(im2,None)
            
            bf = cv2.BFMatcher(cv2.NORM_HAMMING)
            matches = bf.match(d1,d2)
            
            g1,g2 = [],[]
            for i in matches:
                g1.append(k1[i.queryIdx].pt)
                g2.append(k2[i.trainIdx].pt)

            model, inliers = ransac((np.array(g1), np.array(g2)), AffineTransform, min_samples=3, residual_threshold=self.min_epsilon, max_trials=self.max_trials, stop_residuals_sum=self.min_inlier_ratio)
            
            self.stack[x+1,...] = warp(self.stack[x+1,...], AffineTransform(rotation=model.rotation, translation=model.translation), output_shape=self.stack[x+1].shape)

        self.stack = self.stack.astype(np.uint8)
开发者ID:atbd,项目名称:PythonUtile,代码行数:27,代码来源:align.py


示例3: run4

    def run4(self):
        """ Cette fonction recadre les images grâce à SURF et RANSAC, fonctionne bien."""
        for x in xrange(len(self.stack)-1):
            print('Traitement image ' + str(x+1))
            im1,im2 = 255.*gaussian_filter(self.stack[x,...], sqrt(self.initial_sigma**2 - 0.25)), 255.*gaussian_filter(self.stack[x+1,...], sqrt(self.initial_sigma**2 - 0.25))
            im1,im2 = enhance_contrast(normaliser(im1), square(5)), enhance_contrast(normaliser(im2), square(5))
            im1, im2 = normaliser(im1), normaliser(im2)
            
            b = cv2.SURF()
            #b.create("Feature2D.BRISK")
            
            k1,d1 = b.detectAndCompute(im1,None)
            k2,d2 = b.detectAndCompute(im2,None)
            
            bf = cv2.BFMatcher()
            matches = bf.knnMatch(d1,d2, k=2)

            # Apply ratio test
            good = []
            for m,n in matches:
                if m.distance < 0.75*n.distance:
                    good.append(m)
            
            g1,g2 = [],[]
            for i in good:
                g1.append(k1[i.queryIdx].pt)
                g2.append(k2[i.trainIdx].pt)

            model, inliers = ransac((np.array(g1), np.array(g2)), AffineTransform, min_samples=3, residual_threshold=self.min_epsilon, max_trials=self.max_trials, stop_residuals_sum=self.min_inlier_ratio)
            
            self.stack[x+1,...] = warp(self.stack[x+1,...], AffineTransform(rotation=model.rotation, translation=model.translation), output_shape=self.stack[x+1].shape)

        self.stack = self.stack.astype(np.uint8)
开发者ID:atbd,项目名称:PythonUtile,代码行数:33,代码来源:align.py


示例4: calc_transformations

    def calc_transformations(self):
        print('Calculating each pair translation matrix')
        self.images[0].M = numpy.float32([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
        bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)

        for i in xrange(1, len(self.images)):
            image_1 = self.images[i]
            image_2 = self.images[i - 1]

            matches = bf.knnMatch(image_1.des, image_2.des, k=2)
            good = [m for m, n in matches if m.distance <
                    self.knnRatio * n.distance]

            src_pts = numpy.float32(
                [image_1.kp[m.queryIdx].pt for m in good]).reshape(-1, 2)
            dst_pts = numpy.float32(
                [image_2.kp[m.trainIdx].pt for m in good]).reshape(-1, 2)

            model_robust, _ = ransac((src_pts, dst_pts), TranslationTransform,
                                     min_samples=6,
                                     residual_threshold=self.ransacThreshold,
                                     max_trials=1000,
                                     stop_sample_num=0.9 * src_pts.shape[0])

            tx, ty = model_robust.params
            M = numpy.float32([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
            image_1.M = M.dot(image_2.M)

            tx, ty = image_1.M[0, 2], image_1.M[1, 2]
            if ty > 0 and ty > self.drift_y_down:
                self.drift_y_down = ty
            elif ty < 0 and ty < self.drift_y_up:
                self.drift_y_up = ty

            self.drift_x_max = tx
开发者ID:tnq177,项目名称:cvaaa,代码行数:35,代码来源:translational_panorama.py


示例5: test_ransac_is_data_valid

def test_ransac_is_data_valid():
    np.random.seed(1)

    is_data_valid = lambda data: data.shape[0] > 2
    model, inliers = ransac(np.empty((10, 2)), LineModel, 2, np.inf, is_data_valid=is_data_valid)
    assert_equal(model, None)
    assert_equal(inliers, None)
开发者ID:almarklein,项目名称:scikit-image,代码行数:7,代码来源:test_fit.py


示例6: test_ransac_is_model_valid

def test_ransac_is_model_valid():
    def is_model_valid(model, data):
        return False
    model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
                            is_model_valid=is_model_valid, random_state=1)
    assert_equal(model, None)
    assert_equal(inliers, None)
开发者ID:Cadair,项目名称:scikit-image,代码行数:7,代码来源:test_fit.py


示例7: test_ransac_is_data_valid

def test_ransac_is_data_valid():

    is_data_valid = lambda data: data.shape[0] > 2
    model, inliers = ransac(np.empty((10, 2)), LineModelND, 2, np.inf,
                            is_data_valid=is_data_valid, random_state=1)
    assert_equal(model, None)
    assert_equal(inliers, None)
开发者ID:noahstier,项目名称:scikit-image,代码行数:7,代码来源:test_fit.py


示例8: robustEstimate

def robustEstimate(ptsA, ptsB):
	"""
	Perform robust estimation on the given
	correspondences using RANSAC.

	Args:
	----
		ptsA: A 2 x N matrix of points.
		ptsB: A 2 x N matrix of points.

	Returns:
	-------
		The number of inliers within the points.
	"""
	src, dst, N = [], [], ptsA.shape[1]
	for i in xrange(N):
		src.append((ptsA[0, i], ptsA[1, i]))
		dst.append((ptsB[0, i], ptsB[1, i]))

	src, dst = np.asarray(src), np.asarray(dst)

	model = ProjectiveTransform()
	model.estimate(src, dst)
	model_robust, inliers = ransac((src, dst), ProjectiveTransform, min_samples=3, residual_threshold=2, max_trials=100)

	return inliers
开发者ID:ev0,项目名称:k-db,代码行数:26,代码来源:homography.py


示例9: auto_find_center_rings

def auto_find_center_rings(avg_img, sigma=1, no_rings=4, min_samples=3,
                           residual_threshold=1, max_trials=1000):
    """This will find the center of the speckle pattern and the radii of the
    most intense rings.

    Parameters
    ----------
    avg_img : 2D array
        shape of the image
    sigma : float, optional
        Standard deviation of the Gaussian filter.
    no_rings : int, optional
        number of rings
    min_sample : int, optional
        The minimum number of data points to fit a model to.
    residual_threshold : float, optional
        Maximum distance for a data point to be classified as an inlier.
    max_trials : int, optional
        Maximum number of iterations for random sample selection.

    Returns
    -------
    center : tuple
        center co-ordinates of the speckle pattern
    image : 2D array
        Indices of pixels that belong to the rings,
        directly index into an array
    radii : list
        values of the radii of the rings

    Note
    ----
    scikit-image ransac
    method(http://www.imagexd.org/tutorial/lessons/1_ransac.html) is used to
    automatically find the center and the most intense rings.
    """

    image = img_as_float(color.rgb2gray(avg_img))
    edges = feature.canny(image, sigma)
    coords = np.column_stack(np.nonzero(edges))
    edge_pts_xy = coords[:, ::-1]
    radii = []

    for i in range(no_rings):
        model_robust, inliers = ransac(edge_pts_xy, CircleModel, min_samples,
                                       residual_threshold,
                                       max_trials=max_trials)
        if i == 0:
            center = int(model_robust.params[0]), int(model_robust.params[1])
        radii.append(model_robust.params[2])

        rr, cc = draw.circle_perimeter(center[1], center[0],
                                       int(model_robust.params[2]),
                                       shape=image.shape)
        image[rr, cc] = i + 1
        edge_pts_xy = edge_pts_xy[~inliers]

    return center, image, radii
开发者ID:souravsingh,项目名称:scikit-beam,代码行数:58,代码来源:roi.py


示例10: get_best_matches

def get_best_matches(k1, k2, matches):
    src = k1[matches[:,0]][:,::-1]
    dst = k2[matches[:,1]][:,::-1]
    # if there are not enough matches, this fails
    model_robust, inliers = ransac((src, dst), AffineTransform,
                                   min_samples=20, residual_threshold=1,
                                   max_trials=40)

    return model_robust, inliers
开发者ID:johannah,项目名称:iceview,代码行数:9,代码来源:models.py


示例11: test_ransac_is_model_valid

def test_ransac_is_model_valid():
    np.random.seed(1)

    def is_model_valid(model, data):
        return False
    model, inliers = ransac(np.empty((10, 2)), LineModel, 2, np.inf,
                            is_model_valid=is_model_valid)
    assert_equal(model, None)
    assert_equal(inliers, None)
开发者ID:Autodidact24,项目名称:scikit-image,代码行数:9,代码来源:test_fit.py


示例12: main

def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)

  # Read features.
  locations_1, _, descriptors_1, _, _ = feature_io.ReadFromFile(
      cmd_args.features_1_path)
  num_features_1 = locations_1.shape[0]
  tf.logging.info("Loaded image 1's %d features" % num_features_1)
  locations_2, _, descriptors_2, _, _ = feature_io.ReadFromFile(
      cmd_args.features_2_path)
  num_features_2 = locations_2.shape[0]
  tf.logging.info("Loaded image 2's %d features" % num_features_2)

  # Find nearest-neighbor matches using a KD tree.
  d1_tree = cKDTree(descriptors_1)
  _, indices = d1_tree.query(
      descriptors_2, distance_upper_bound=_DISTANCE_THRESHOLD)

  # Select feature locations for putative matches.
  locations_2_to_use = np.array([
      locations_2[i,]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])
  locations_1_to_use = np.array([
      locations_1[indices[i],]
      for i in range(num_features_2)
      if indices[i] != num_features_1
  ])

  # Perform geometric verification using RANSAC.
  _, inliers = ransac(
      (locations_1_to_use, locations_2_to_use),
      AffineTransform,
      min_samples=3,
      residual_threshold=20,
      max_trials=1000)

  tf.logging.info('Found %d inliers' % sum(inliers))

  # Visualize correspondences, and save to file.
  _, ax = plt.subplots()
  img_1 = mpimg.imread(cmd_args.image_1_path)
  img_2 = mpimg.imread(cmd_args.image_2_path)
  inlier_idxs = np.nonzero(inliers)[0]
  plot_matches(
      ax,
      img_1,
      img_2,
      locations_1_to_use,
      locations_2_to_use,
      np.column_stack((inlier_idxs, inlier_idxs)),
      matches_color='b')
  ax.axis('off')
  ax.set_title('DELF correspondences')
  plt.savefig(cmd_args.output_image)
开发者ID:812864539,项目名称:models,代码行数:56,代码来源:match_images.py


示例13: landmark_registration

def landmark_registration(points_file1, points_file2, out_file, residual_threshold=2, max_trials=100, delimiter="\t"):
    points1 = pd.read_csv(points_file1, delimiter=delimiter)
    points2 = pd.read_csv(points_file2, delimiter=delimiter)

    src = np.concatenate([np.array(points1['x']).reshape([-1,1]), np.array(points1['y']).reshape([-1,1])], axis=-1)
    dst = np.concatenate([np.array(points2['x']).reshape([-1,1]), np.array(points2['y']).reshape([-1,1])], axis=-1)

    model = AffineTransform()
    model_robust, inliers = ransac((src, dst), AffineTransform, min_samples=3,
                                   residual_threshold=residual_threshold, max_trials=max_trials)
    pd.DataFrame(model_robust.params).to_csv(out_file, header=None, index=False, sep="\t")
开发者ID:ThomasWollmann,项目名称:galaxy-image-analysis,代码行数:11,代码来源:landmark_registration.py


示例14: punch

def punch(img):
    # Identifiying the Tissue punches in order to Crop the image correctly
    # Canny edges and RANSAC is used to fit a circe to the punch
    # A Mask is created

    distance = 0
    r = 0

    float_im, orig, ihc = create_bin(img)
    gray = rgb2grey(orig)
    smooth = gaussian(gray, sigma=3)

    shape = np.shape(gray)
    l = shape[0]
    w = shape[1]

    x = l - 20
    y = w - 20

    rows = np.array([[x, x, x], [x + 1, x + 1, x + 1]])
    columns = np.array([[y, y, y], [y + 1, y + 1, y + 1]])

    corner = gray[rows, columns]

    thresh = np.mean(corner)
    print thresh
    binar = (smooth < thresh - 0.01)

    bin = remove_small_holes(binar, min_size=100000, connectivity=2)
    bin1 = remove_small_objects(bin, min_size=5000, connectivity=2)
    bin2 = gaussian(bin1, sigma=3)
    bin3 = (bin2 > 0)

    # eosin = IHC[:, :, 2]
    edges = canny(bin3)
    coords = np.column_stack(np.nonzero(edges))

    model, inliers = ransac(coords, CircleModel, min_samples=4, residual_threshold=1, max_trials=1000)

    # rr, cc = circle_perimeter(int(model.params[0]),
    #                          int(model.params[1]),
    #                          int(model.params[2]),
    #                          shape=im.shape)

    a, b = model.params[0], model.params[1]
    r = model.params[2]
    ny, nx = bin3.shape
    ix, iy = np.meshgrid(np.arange(nx), np.arange(ny))
    distance = np.sqrt((ix - b)**2 + (iy - a)**2)

    mask = np.ma.masked_where(distance > r, bin3)

    return distance, r, float_im, orig, ihc, bin3
开发者ID:AidanRoss,项目名称:histology,代码行数:53,代码来源:ihc_analysis.py


示例15: test_ransac_invalid_input

def test_ransac_invalid_input():
    with testing.raises(ValueError):
        ransac(np.zeros((10, 2)), None, min_samples=2,
               residual_threshold=0, max_trials=-1)
    with testing.raises(ValueError):
        ransac(np.zeros((10, 2)), None, min_samples=2,
               residual_threshold=0, stop_probability=-1)
    with testing.raises(ValueError):
        ransac(np.zeros((10, 2)), None, min_samples=2,
               residual_threshold=0, stop_probability=1.01)
开发者ID:Cadair,项目名称:scikit-image,代码行数:10,代码来源:test_fit.py


示例16: fit

    def fit(self, line_img):
        """Estimate the tranform matrix self.M based on a binary image
        with line detected.
        - `line_img`: image with two lines detected, representing the 
        left and right boundaries of lanes. In the transformed
        bird-eye view, the two boundaries should be roughly parallel.
        """
        # image shape
        H, W = line_img.shape[:2]
        # find line coordinates
        ys, xs = np.where(line_img > 0)
        # clustering of two lines
        cluster2 = KMeans(2)
        cluster2.fit(np.c_[xs, ys])
        # build robust linear model for each line
        linear_models = []
        for c in [0, 1]:
            i = (cluster2.labels_ == c)

            robust_model, inliers = ransac(np.c_[xs[i], ys[i]], LineModel, 
                                        min_samples=2, residual_threshold=1., max_trials=500)
            linear_models.append(robust_model)
        # get the vertices of a trapezoid as source points
        if self.forward_distance is None:
            middle_h = H/2 + 100#160
        else:
            middle_h = H - self.forward_distance
        line0 = [(linear_models[0].predict_x(H), H), (linear_models[0].predict_x(middle_h), middle_h)]
        line1 = [(linear_models[1].predict_x(H), H), (linear_models[1].predict_x(middle_h), middle_h)]
        src_pts = np.array(line0 + line1, dtype=np.float32)
        # get the vertices of destination points
        # here simply map it to a rect with same width/length from bottom
        bottom_x1, bottom_x2 = line0[0][0], line1[0][0]
        
        v = np.array(line0[1]) - np.array(line0[0])
        # it must be the same as source trapzoid length otherwise y_mpp will change
        L = H#int(np.sqrt(np.sum( v*v ))) #H
        dst_pts = np.array([(bottom_x1, H), (bottom_x1, H-L),
                           (bottom_x2, H), (bottom_x2, H-L)], 
                          dtype=np.float32)
        # estimate the transform matrix
        self.M =  cv2.getPerspectiveTransform(src_pts, dst_pts)
        self.invM = cv2.getPerspectiveTransform(dst_pts, src_pts)
        # estimate meter-per-pixel in the transformed image
        self.x_mpp = 3 / np.abs(bottom_x1-bottom_x2)
        self.y_mpp = self.estimate_ympp(line_img)
        return self
开发者ID:dvu4,项目名称:CarND-Advanced-Lane-Lines,代码行数:47,代码来源:transform.py


示例17: refineBoundaries

def refineBoundaries(img_orig, plate):
    np.random.seed(7)

    minr, minc, maxr, maxc = plate.bbox
    img_window = img_orig[minr: maxr, minc: maxc]

    plate_points = [(minc, minr), (maxc, minr), (maxc, maxr), (minc, maxr)]
    plate_x = minc
    plate_y = minr

    img_window = np.absolute(filters.prewitt_v(img_window))
    thresh = filters.threshold_otsu(img_window)
    img_window = img_window <= thresh
    labels = measure.label(img_window)

    points = []

    for region in measure.regionprops(labels):
        minr, minc, maxr, maxc = region.bbox
        ratio = float(maxr - minr) / float(maxc - minc)
        heigh = maxr - minr
        area = region.area

        if (ratio > 1 and area > 10 and heigh > 10):
            points.append((minc, minr, maxc, maxr))

    if len(points) > 1:
        points = np.array(points)
        x1 = np.min(points[:, 0])
        x2 = np.max(points[:, 2])

        ransac_model, inliers = measure.ransac(points[:, 0:2], measure.LineModel, 5, 3, max_trials=30)
        points = points[inliers]

        if ransac_model.params[1] != 0:
            average_heigh = int(np.mean(points[:, 3]) - np.mean(points[:, 1]))
            pad_t = average_heigh / 2
            pad_b = average_heigh + (average_heigh / 3)

            y1 = ransac_model.predict_y(x1)
            y2 = ransac_model.predict_y(x2)

            refined_points = [(x1, y1 - pad_t), (x2, y2 - pad_t), (x2, y2 + pad_b), (x1, y1 + pad_b)]
            refined_points = [(x + plate_x, y + plate_y) for (x, y) in refined_points]
            return refined_points

    return plate_points
开发者ID:allansp84,项目名称:license-plate,代码行数:47,代码来源:functions.py


示例18: NextGID

    def NextGID(self,image):
        """ Calculates the next Group ID for the input image """
        NewImg = self.LoadImage(image,Greyscale=True,scale=0.25)
        self.orb.detect_and_extract(NewImg)
        NewImgKeyDescr = (self.orb.keypoints, self.orb.descriptors)

        for PreImgKeyDescr in reversed(self.ImagesKeypointsDescriptors):
            # Check for overlap
            matcheOfDesc = match_descriptors(PreImgKeyDescr[1], NewImgKeyDescr[1], cross_check=True)

            # Select keypoints from the source (image to be registered)
            # and target (reference image)
            src = NewImgKeyDescr[0][matcheOfDesc[:, 1]][:, ::-1]
            dst = PreImgKeyDescr[0][matcheOfDesc[:, 0]][:, ::-1]

            model_robust, inliers = ransac((src, dst), ProjectiveTransform,
                                           min_samples=4, residual_threshold=1, max_trials=300)                
                
            NumberOfTrueMatches = np.sum(inliers)  #len(inliers[inliers])

            if NumberOfTrueMatches > 100 :
                # Image has overlap
                logger.debug('Image {0} found a match! (No: of Matches={1})'.format(image,NumberOfTrueMatches))
                break
            else :
                logger.debug('Image {0} not matching..(No: of Matches={1})'.format(image,NumberOfTrueMatches))
                continue

        else:
            # None of the images in the for loop has any overlap...So this is a new Group
            self.ImagesKeypointsDescriptors = [] # Erase all previous group items
            # self.ImagesWithOverlap = [] 

            # Increment Group ID
            self.CurrentGroupID += 1
            logger.debug('Starting a new Panorama group (GID={0})'.format(self.CurrentGroupID))

        # Append the latest image to the current group
        self.ImagesKeypointsDescriptors.append(NewImgKeyDescr) 
        # self.ImagesWithOverlap.append(NewImg)

        # Return the current  group ID
        return self.CurrentGroupID
开发者ID:indiajoe,项目名称:PhotographyScripts,代码行数:43,代码来源:GroupImages.py


示例19: test_ransac_geometric

def test_ransac_geometric():
    np.random.seed(1)

    # generate original data without noise
    src = 100 * np.random.random((50, 2))
    model0 = AffineTransform(scale=(0.5, 0.3), rotation=1, translation=(10, 20))
    dst = model0(src)

    # add some faulty data
    outliers = (0, 5, 20)
    dst[outliers[0]] = (10000, 10000)
    dst[outliers[1]] = (-100, 100)
    dst[outliers[2]] = (50, 50)

    # estimate parameters of corrupted data
    model_est, inliers = ransac((src, dst), AffineTransform, 2, 20)

    # test whether estimated parameters equal original parameters
    assert_almost_equal(model0._matrix, model_est._matrix)
    assert np.all(np.nonzero(inliers == False)[0] == outliers)
开发者ID:almarklein,项目名称:scikit-image,代码行数:20,代码来源:test_fit.py


示例20: _orb_ransac_shift

    def _orb_ransac_shift(self, im1, im2, template):
        descriptor_extractor = ORB() #n_keypoints=self.parameters['n_keypoints'])
        key1, des1 = self._find_key_points(descriptor_extractor, im1)
        key2, des2 = self._find_key_points(descriptor_extractor, im2)
        matches = match_descriptors(des1, des2, cross_check=True)

        # estimate affine transform model using all coordinates
        src = key1[matches[:, 0]]
        dst = key2[matches[:, 1]]

        # robustly estimate affine transform model with RANSAC
        model_robust, inliers = ransac((src, dst), AffineTransform,
                                       min_samples=3, residual_threshold=1,
                                       max_trials=100)
#        diff = []
#        for p1, p2 in zip(src[inliers], dst[inliers]):
#            diff.append(p2-p1)
#        return np.mean(diff, axis=0)

        return model_robust.translation
开发者ID:DiamondLightSource,项目名称:Savu,代码行数:20,代码来源:projection_shift.py



注:本文中的skimage.measure.ransac函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python measure.regionprops函数代码示例发布时间:2022-05-27
下一篇:
Python measure.label函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap