• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python morphology.skeletonize函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中skimage.morphology.skeletonize函数的典型用法代码示例。如果您正苦于以下问题:Python skeletonize函数的具体用法?Python skeletonize怎么用?Python skeletonize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了skeletonize函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: edge_detect

def edge_detect(depth, color):
    # Get the gradient direction from the depth image
    graddir = grad_dir(depth)
    # plt.imshow(graddir)
    # plt.show()

    # kernel for dilation
    kernel = np.ones((5, 5), np.uint8)

    # Threshold the image so it is in the RGB color space
    bw2 = (((graddir - graddir.min()) / (graddir.max() - graddir.min())) * 255.9).astype(np.uint8)

    # removes the salt and pepper noise
    # by replacing pixels with
    # the median value of the area
    median = cv2.medianBlur(bw2, 9)

    # find edges with the canny edge detector
    bw2 = auto_canny(median)
    dilation2 = cv2.dilate(bw2, kernel, iterations=1)
    skel2 = morphology.skeletonize(dilation2 > 0)

    # Now run canny edge detector on the colour image
    # create a CLAHE object (Arguments are optional).
    # this does adaptive histogram equalization on the image
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
    cl1 = clahe.apply(color)
    # median = cv2.medianBlur(bw2,5)
    # bw1 = cv2.GaussianBlur(cl1, (3,3), 0)

    # Perform canny edge detection on colour image, twice
    # 1. Detect outlines and fill with close function
    # 2. Detect outlines of now filled contoured image
    bw1 = auto_canny(cl1)
    closing = cv2.morphologyEx(bw1, cv2.MORPH_CLOSE, kernel, iterations=6)
    # dilation1 = cv2.dilate(bw1,kernel,iterations = 1)
    # skel1 = morphology.skeletonize(dilation1 > 0)
    bw1 = auto_canny(closing)

    # combine the edges from the color image and the depth image
    orop = (np.logical_or(bw1, skel2)).astype('uint8')

    # display results
    plt.subplot(1, 2, 1), plt.imshow(graddir, cmap='jet')
    plt.title('gradient dir'), plt.xticks([]), plt.yticks([])
    plt.subplot(1, 2, 2), plt.imshow(median, cmap='gray')
    plt.title('blurred image'), plt.xticks([]), plt.yticks([])
    plt.show()

    # dilate and skeletonize on combined image
    kernel = np.ones((5, 5), np.uint8)
    dilation = cv2.dilate(orop, kernel, iterations=1)
    img_out = morphology.skeletonize(dilation > 0)

    return img_out
开发者ID:Jordan-Zhu,项目名称:EdgeSegmentFitting,代码行数:55,代码来源:new_main.py


示例2: get_mask

def get_mask(img_org):
    noise_ratio = 0.3
    img = img_org > noise_ratio * img_org.max()
    skeleton = skeletonize(img)
    
    mask = dilation(skeleton > 0, disk(2))
    return mask
开发者ID:ymnliu,项目名称:cyto_lib,代码行数:7,代码来源:local_feature.py


示例3: get_descriptor_lvX

 def get_descriptor_lvX(self,img):
     ori = img
     #img = cv2.bitwise_not(numpy.array(img))
     #img = threshold_adaptive(numpy.array(img), 40)
     #img = cv2.bitwise_not(img*255.)
     img = skeletonize(numpy.array(img)/255.)*255.
     '''figure()
     gray()
     subplot(221)
     imshow(ori)
     subplot(222)
     imshow(img)
     show()'''
     #e = stats.entropy(img.flatten())
     #if math.isnan(e) or math.isinf(e):
     #    return 0
     #else:
     #    return e
     descs = hog(numpy.array(img), orientations=4, pixels_per_cell=(10, 10),cells_per_block=(3, 3),visualise=False)
     '''figure()
     gray()
     imshow(img)
     figure()
     imshow(hpgimg)
     show()'''
     return descs
开发者ID:caoym,项目名称:odr,代码行数:26,代码来源:odr.py


示例4: polylinesFromBinImage

def polylinesFromBinImage(img, minimum_cluster_size=6,
                          remove_small_obj_size=3,
                          reconnect_size=3,
                          max_n_contours=None, max_len_contour=None,
                          copy=True):
    '''
    return a list of arrays of un-branching contours

    img -> (boolean) array 

    optional:
    ---------
    minimum_cluster_size -> minimum number of pixels connected together to build a contour

    ##search_kernel_size -> TODO
    ##min_search_kernel_moment -> TODO

    numeric:
    -------------
    max_n_contours -> maximum number of possible contours in img
    max_len_contour -> maximum contour length

    '''
    assert minimum_cluster_size > 1
    assert reconnect_size % 2, 'ksize needs to be odd'

    # assert search_kernel_size == 0 or search_kernel_size > 2 and search_kernel_size%2, 'kernel size needs to be odd'
    # assume array size parameters, is not given:
    if max_n_contours is None:
        max_n_contours = max(img.shape)
    if max_len_contour is None:
        max_len_contour = sum(img.shape[:2])
    # array containing coord. of all contours:
    contours = np.zeros(shape=(max_n_contours, max_len_contour, 2),
                        dtype=np.uint16)  # if not search_kernel_size else np.float32)

    if img.dtype != np.bool:
        img = img.astype(bool)
    elif copy:
        img = img.copy()

    if remove_small_obj_size:
        remove_small_objects(img, remove_small_obj_size,
                             connectivity=2, in_place=True)
    if reconnect_size:
        # remove gaps
        maximum_filter(img, reconnect_size, output=img)
        # reduce contour width to 1
        img = skeletonize(img)

    n_contours = _populateContoursArray(img, contours, minimum_cluster_size)
    contours = contours[:n_contours]

    l = []
    for c in contours:
        ind = np.zeros(shape=len(c), dtype=bool)
        _getValidInd(c, ind)
        # remove all empty spaces:
        l.append(c[ind])
    return l
开发者ID:radjkarl,项目名称:imgProcessor,代码行数:60,代码来源:polylinesFromBinImage.py


示例5: bottleneck_distribution

def bottleneck_distribution(image):
    """
    Count the distribution of bottlenecks

    :param image: data (binary)
    :type image: :py:class:`numpy.ndarray`

    :return: count of bottlenecks of size 4 and size 2
    :rtype: tuple(int)
    """
    skel = morphology.skeletonize(image)
    # get the distances
    dists = ndimage.distance_transform_edt(image)
    # ok for all the nonzero in the skeleton, we get the distances
    x_nz, y_nz = skel.nonzero() # get all the nonzero indices
    width4 = 0
    width2 = 0
    for i in range(len(x_nz)):
        x = x_nz[i]
        y = y_nz[i]

        dist = dists[x,y]
        if dist <= 4:
            width4 += 1
        if dist <= 2:
            width2 += 1

    return width4, width2
开发者ID:JoshuaSBrown,项目名称:langmuir,代码行数:28,代码来源:ga_analyze.py


示例6: skeletonize_mitochondria

def skeletonize_mitochondria(mch_channel):
    mch_collector = np.max(mch_channel, axis=0)  # TODO: check max projection v.s. sum
    skeleton_labels = np.zeros(mch_collector.shape, dtype=np.uint8)

    # thresh = np.max(mch_collector)/2.
    thresh = threshold_otsu(mch_collector)
    # use adaptative threshold? => otsu seems to be sufficient in this case

    skeleton_labels[mch_collector > thresh] = 1
    skeleton2 = skeletonize(skeleton_labels)
    skeleton, distance = medial_axis(skeleton_labels, return_distance=True)
    active_threshold = np.mean(mch_collector[skeleton_labels]) * 5

    # print active_threshold
    transform_filter = np.zeros(mch_collector.shape, dtype=np.uint8)
    transform_filter[np.logical_and(skeleton > 0, mch_collector > active_threshold)] = 1
    skeleton = transform_filter * distance

    skeleton_ma = np.ma.masked_array(skeleton, skeleton > 0)
    skeleton_convolve = ndi.convolve(skeleton_ma, np.ones((3, 3)), mode='constant', cval=0.0)
    divider_convolve = ndi.convolve(transform_filter, np.ones((3, 3)), mode='constant', cval=0.0)
    skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] / \
                                              divider_convolve[divider_convolve > 0]
    new_skeleton = np.zeros_like(skeleton)
    new_skeleton[skeleton2] = skeleton_convolve[skeleton2]
    skeleton = new_skeleton

    return skeleton_labels, mch_collector, skeleton, transform_filter
开发者ID:chiffa,项目名称:Chromo_vision,代码行数:28,代码来源:layered_zstack_processing.py


示例7: ruler_scale_factor

def ruler_scale_factor(image, distance):
    """Returns the scale factor to convert from image coordinates to real world coordinates

    Args:
        image: BGR image of shape n x m x 3.
        distance: The real world size of the smallest graduation spacing
    Returns:
        float: Unitless scale factor from image coordinates to real world coordinates.

    """

    height, width = image.shape[:2]
    image, mask = find_ruler(image)
    binary_image = mask * threshold(image, mask)

    if binary_image[mask].mean() > 0.5:
        binary_image[mask] = ~binary_image[mask]
    remove_large_components(binary_image, max(height, width))
    edges = skeletonize(binary_image)
    hspace, angles, distances = hough_transform(edges)
    features = hspace_features(hspace, splits=16)
    angle_index = best_angles(np.array(features))

    max_graduation_size = int(max(image.shape))
    line_separation_pixels = find_grid(hspace[:, angle_index], max_graduation_size)

    logging.info('Line separation: {:.3f}'.format(line_separation_pixels))
    return distance / line_separation_pixels
开发者ID:jrdurrant,项目名称:vision,代码行数:28,代码来源:find_scale.py


示例8: skeletonize

def skeletonize(mask):
    """Reduces binary objects to 1 pixel wide representations (skeleton)

    Inputs:
    mask       = Binary image data

    Returns:
    skeleton   = skeleton image

    :param mask: numpy.ndarray
    :return skeleton: numpy.ndarray
    """
    # Store debug
    debug = params.debug
    params.debug = None

    # Convert mask to boolean image, rather than 0 and 255 for skimage to use it
    skeleton = skmorph.skeletonize(mask.astype(bool))

    skeleton = skeleton.astype(np.uint8) * 255

    # Reset debug mode
    params.debug = debug
    # Auto-increment device
    params.device += 1

    if params.debug == 'print':
        print_image(skeleton, os.path.join(params.debug_outdir, str(params.device) + '_skeleton.png'))
    elif params.debug == 'plot':
        plot_image(skeleton, cmap='gray')

    return skeleton
开发者ID:danforthcenter,项目名称:plantcv,代码行数:32,代码来源:skeletonize.py


示例9: skeletonize_mitochondria

def skeletonize_mitochondria(mCh_channel):

    mch_collector = np.max(mCh_channel, axis=0)  # TODO: check how max affects v.s. sum
    labels = np.zeros(mch_collector.shape, dtype=np.uint8)

    # thresh = np.max(mch_collector)/2.
    thresh = threshold_otsu(mch_collector)
    # TODO: use adaptative threshold? => otsu seems to be sufficient in this case
    # http://scikit-image.org/docs/dev/auto_examples/xx_applications/plot_thresholding.html#sphx
    # -glr-auto-examples-xx-applications-plot-thresholding-py
    #  log-transform? => Nope, does not work
    # TODO: hessian/laplacian of gaussian blob detection?

    labels[mch_collector > thresh] = 1
    skeleton2 = skeletonize(labels)
    skeleton, distance = medial_axis(labels, return_distance=True)
    active_threshold = np.mean(mch_collector[labels]) * 5

    # print active_threshold
    transform_filter = np.zeros(mch_collector.shape, dtype=np.uint8)
    transform_filter[np.logical_and(skeleton > 0, mch_collector > active_threshold)] = 1
    skeleton = transform_filter * distance

    skeleton_ma = np.ma.masked_array(skeleton, skeleton > 0)
    skeleton_convolve = ndi.convolve(skeleton_ma, np.ones((3, 3)), mode='constant', cval=0.0)
    divider_convolve = ndi.convolve(transform_filter, np.ones((3, 3)), mode='constant', cval=0.0)
    skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] \
                                              / divider_convolve[divider_convolve > 0]
    new_skeleton = np.zeros_like(skeleton)
    new_skeleton[skeleton2] = skeleton_convolve[skeleton2]
    skeleton = new_skeleton

    return labels, mch_collector, skeleton, transform_filter
开发者ID:chiffa,项目名称:Chromo_vision,代码行数:33,代码来源:Linhao_masks_logic.py


示例10: get_thinned

def get_thinned(binaryArr):
    """
    Return thinned output
    Parameters
    ----------
    binaryArr : Numpy array
        2D or 3D binary numpy array

    Returns
    -------
    result : boolean Numpy array
        2D or 3D binary thinned numpy array of the same shape
    """
    assert np.max(binaryArr) in [0, 1], "input must always be a binary array"
    voxCount = np.sum(binaryArr)
    if binaryArr.sum() == 0:
        return binaryArr.astype(bool)
    elif len(binaryArr.shape) == 2:
        return skeletonize(binaryArr).astype(bool)
    else:
        start_skeleton = time.time()
        zOrig, yOrig, xOrig = np.shape(binaryArr)
        orig = np.lib.pad(binaryArr, 1, 'constant')
        result = cy_get_thinned3D(np.uint64(orig))
        print("thinned %i number of pixels in %0.2f seconds" % (voxCount, time.time() - start_skeleton))
        return result[1:zOrig + 1, 1: yOrig + 1, 1: xOrig + 1].astype(bool)
开发者ID:3Scan,项目名称:3scan-skeleton,代码行数:26,代码来源:thinVolume.py


示例11: calculate

 def calculate(self, image: np.ndarray, disk_size: int=9,
               mean_threshold: int=100, min_object_size: int=750) -> float:
     # Find edges that have a strong vertical direction
     vertical_edges = sobel_v(image)
     # Separate out the areas where there is a large amount of vertically-oriented stuff
     segmentation = self._segment_edge_areas(vertical_edges, disk_size, mean_threshold, min_object_size)
     # Draw a line that follows the center of the segments at each point, which should be roughly vertical
     # We should expect this to give us four approximately-vertical lines, possibly with many gaps in
     # each line
     skeletons = skeletonize(segmentation)
     # Use the Hough transform to get the closest lines that approximate those four lines
     hough = transform.hough_line(skeletons, np.arange(-constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                       constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                       0.0001))
     # Create a list of the angles (in radians) of all of the lines the Hough transform produced, with 0.0
     # being completely vertical
     # These angles correspond to the angles of the four sides of the channels, which we need to
     # correct for
     angles = [angle for _, angle, dist in zip(*transform.hough_line_peaks(*hough))]
     if not angles:
         raise ValueError("Image rotation could not be calculated. Check the images to see if they're weird.")
     else:
         # Get the average angle and convert it to degrees
         offset = sum(angles) / len(angles) * 180.0 / math.pi
         if offset > constants.ACCEPTABLE_SKEW_THRESHOLD:
             log.warn("Image is heavily skewed. Check that the images are valid.")
         return offset
开发者ID:jimrybarski,项目名称:fylm_critic,代码行数:27,代码来源:rotate.py


示例12: label_image

def label_image(image):
    
    ROI = np.zeros((470,400,3), dtype=np.uint8)
    for c in range(3):
        for i in range(50,520):
            for j in range(240,640):
                ROI[i-50,j-240,c] = image[i,j,c]

    
    gray_ROI = cv2.cvtColor(ROI,cv2.COLOR_BGR2GRAY)
    
    ROI_flou = cv2.medianBlur((ROI).astype('uint8'),3)
    
    Laser = Detecte_laser.Detect_laser(ROI_flou)
    
    open_laser = cv2.morphologyEx(Laser, cv2.MORPH_DILATE, disk(3))
    
    skel = skeletonize(open_laser > 0)
    
    tranche = Detecte_laser.tranche(skel,90,30)    
    
    ret, thresh = cv2.threshold(gray_ROI*tranche.astype('uint8'),0,1,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
    thresh01 = thresh<1.0
    
    open_thresh = cv2.morphologyEx(thresh01.astype('uint8'), cv2.MORPH_OPEN, disk(10))
    
    labelised = (label(open_thresh,8,0))+1
    
    return gray_ROI,labelised
开发者ID:elekhac,项目名称:Projet_Polype,代码行数:29,代码来源:feature_extraction_2.py


示例13: label_particles_edge

def label_particles_edge(im, sigma=2, closing_size=0, **extra_args):
    """ Segment image using Canny edge-finding filter.

        parameters
        ----------
        im : image in which to find particles
        sigma : size of the Canny filter
        closing_size : size of the closing filter

        returns
        -------
        labels : an image array of uniquely labeled segments
    """
    from skimage.morphology import square, binary_closing, skeletonize
    if skimage_version < StrictVersion('0.11'):
        from skimage.filter import canny
    else:
        from skimage.filters import canny
    edges = canny(im, sigma=sigma)
    if closing_size > 0:
        edges = binary_closing(edges, square(closing_size))
    edges = skeletonize(edges)
    labels = sklabel(edges)
    print "found {} segments".format(labels.max())
    # in ma.array mask, False is True, and vice versa
    labels = np.ma.array(labels, mask=edges == 0)
    return labels
开发者ID:leewalsh,项目名称:square-tracking,代码行数:27,代码来源:positions.py


示例14: enhance_edges

    def enhance_edges(self):
        """ GNIRS_edge_detector method to enhance the footprint edges using the Sobel
            kernel. Generate two binary images, one with the left edges
            and the other showing the right edges only. This is because the
            MDF information about the footprints location is not well 
            determined.
        """
        sdata=nd.sobel(self.image,axis=self.axis)
        std = np.std(sdata)
        bdata=np.where(sdata>std,1,0)
 
        # Make the edges one pixel wide
        self.left_bin_image = skeletonize(bdata)

        bdata=np.where(sdata < -std,1,0)
        self.right_bin_image = skeletonize(bdata)
开发者ID:pyrrho314,项目名称:recipesystem,代码行数:16,代码来源:segmentation.py


示例15: test_skeletonize_already_thinned

 def test_skeletonize_already_thinned(self):
     im = np.zeros((5, 5), np.uint8)
     im[3, 1:-1] = 1
     im[2, -1] = 1
     im[4, 0] = 1
     result = skeletonize(im)
     numpy.testing.assert_array_equal(result, im)
开发者ID:ChrisBeaumont,项目名称:scikit-image,代码行数:7,代码来源:test_skeletonize.py


示例16: main

def main():
    c.clear_temp()

    img = dataset_manager.get_training_image()
    recognizer = captcha_recognizer.CaptchaRecognizer()
    mpimg.imsave(c.temp_path('00.origin.png'), img)

    # 1
    img_01 = time_func(
        'remove_noise_with_hsv',
        lambda: recognizer.remove_noise_with_hsv(img)
    )
    mpimg.imsave(c.temp_path('01.hsv.png'), img_01, cmap=cm_greys)

    # 2
    img_02 = time_func(
        'remove_noise_with_neighbors',
        lambda: repeat(recognizer.remove_noise_with_neighbors, 2)(img_01)
    )
    mpimg.imsave(c.temp_path('02.neighbor.png'), img_02, cmap=cm_greys)

    img_03a = time_func(
        'skeletonize',
        lambda: morph.skeletonize(img_02)
    )
    mpimg.imsave(c.temp_path('03a.skeleton.png'), img_03a, cmap=cm_greys)
开发者ID:xieyanfu,项目名称:bilibili-captcha,代码行数:26,代码来源:feature_study.py


示例17: test_skeletonize_num_neighbours

    def test_skeletonize_num_neighbours(self):
        # an empty image
        image = np.zeros((300, 300))

        # foreground object 1
        image[10:-10, 10:100] = 1
        image[-100:-10, 10:-10] = 1
        image[10:-10, -100:-10] = 1

        # foreground object 2
        rs, cs = draw.bresenham(250, 150, 10, 280)
        for i in range(10):
            image[rs + i, cs] = 1
        rs, cs = draw.bresenham(10, 150, 250, 280)
        for i in range(20):
            image[rs + i, cs] = 1

        # foreground object 3
        ir, ic = np.indices(image.shape)
        circle1 = (ic - 135)**2 + (ir - 150)**2 < 30**2
        circle2 = (ic - 135)**2 + (ir - 150)**2 < 20**2
        image[circle1] = 1
        image[circle2] = 0
        result = skeletonize(image)

        # there should never be a 2x2 block of foreground pixels in a skeleton
        mask = np.array([[1,  1],
                         [1,  1]], np.uint8)
        blocks = correlate(result, mask, mode='constant')
        assert not numpy.any(blocks == 4)
开发者ID:ChrisBeaumont,项目名称:scikit-image,代码行数:30,代码来源:test_skeletonize.py


示例18: calculate_rotation

def calculate_rotation(image):
    # sometimes we snag corners, by cropping the left and right 10% of the image we focus only on the
    # vertical bars formed by the structure
    height, width = image.shape
    crop = int(width * 0.1)
    cropped_image = image[:, crop: width - crop]
    # Find edges that have a strong vertical direction
    vertical_edges = sobel_v(cropped_image)
    # Separate out the areas where there is a large amount of vertically-oriented stuff
    segmentation = segment_edge_areas(vertical_edges)
    # Draw a line that follows the center of the segments at each point, which should be roughly vertical
    # We should expect this to give us four approximately-vertical lines, possibly with many gaps in
    # each line
    skeletons = skeletonize(segmentation)
    # Use the Hough transform to get the closest lines that approximate those four lines
    hough = transform.hough_line(skeletons, np.arange(-constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                      constants.FIFTEEN_DEGREES_IN_RADIANS,
                                                      0.0001))
    # Create a list of the angles (in radians) of all of the lines the Hough transform produced, with 0.0
    # being completely vertical
    # These angles correspond to the angles of the four sides of the channels, which we need to
    # correct for
    angles = [angle for _, angle, dist in zip(*transform.hough_line_peaks(*hough))]
    if not angles:
        raise ValueError("Image rotation could not be calculated. Check the images to see if they're weird.")
    else:
        # Get the average angle and convert it to degrees
        offset = sum(angles) / len(angles) * 180.0 / math.pi
        if offset > constants.ACCEPTABLE_SKEW_THRESHOLD:
            log.warn("Image is heavily skewed. Check that the images are valid.")
        return offset
开发者ID:jimrybarski,项目名称:fylm_critic,代码行数:31,代码来源:tiff_to_hdf5.py


示例19: sample_points

def sample_points(img, n_points=100):
    """Sample points along edges in a binary image.

    Returns an array of shape ``(n_points, 2)`` in image coordinates.

    If there are several disconnected contours, they are sampled
    seperately and appended in order of their minimum distance to the
    origin of ``img`` in NumPy array coordinates.

    """
    # FIXME: what if contour crosses itself? for example: an infinity
    # symbol?
    assert img.ndim == 2
    assert n_points > 0

    boundaries = skeletonize(find_boundaries(img))

    # reorder along curves; account for holes and disconnected lines
    # with connected components.
    labels, n_labels = ndimage.label(boundaries, structure=np.ones((3, 3)))
    n_labeled_pixels = labels.sum()
    all_labels = range(1, n_labels + 1)
    curve_n_pixels = list((labels == lab).sum() for lab in all_labels)
    curve_n_points = list(int(np.ceil((n / n_labeled_pixels) * n_points))
                          for n in curve_n_pixels)

    # sample a linear subset of each connected curve
    samples = list(_sample_single_contour(labels == lab, n_points)
                   for lab, n_points in zip(all_labels, curve_n_points))

    # append them together. They should be in order, because
    # ndimage.label() labels in order.
    points = list(itertools.chain(*samples))
    return np.vstack(points)
开发者ID:kemaleren,项目名称:brainstem,代码行数:34,代码来源:shape_context.py


示例20: formatTS

def formatTS(start, stop, r, useIntersects=True):
    aStarts = {}
    X = []
    Y = []
    coords = []
    sigNum = []
    strcArr = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
    for sig in xrange(start, stop):
        if sig % 50 == 0:
            print sig
        path = np.array(sigs[sig])
        imgO = imread(getFilePath(sig), as_grey=True)
        xs = [n for n in xrange()]
        plt.subplot(1, 2, 1)
        plt.imshow(imgO)
        plt.subplot(1, 2, 2)
        plt.imshow(guassian_filter(imgO, 1))
        plt.show()
        thresh = 0.9
        img = gaussian_filter(imgO, 1) < thresh
        imgX, imgY = np.nonzero(img)
        imgW = imgX.max() - imgX.min()
        imgH = imgY.max() - imgY.min()
        img = skeletonize(img)
        img = img[imgX.min() : imgX.max(), imgY.min() : imgY.max()]
        skltnSegs, nSkltnSegs = label(img, structure=strcArr)
        # print nSkltnSegs
        # plt.imshow(skltnSegs)
        # plt.show()
        imgO = imgO[imgX.min() : imgX.max(), imgY.min() : imgY.max()]
        sumArr = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
        summed = convolve(img, sumArr, mode="constant", cval=0)
        corners = (summed == 2) & (img == 1)
        startx = path[0][0] * img.shape[1]
        starty = path[0][1] * img.shape[0]
        aStarts[sig] = [startx, starty]
        if useIntersects:
            intersects = (summed >= 4) & (img == 1)
            labeled, nLabels = label(intersects, structure=strcArr)
            itrscts = []
            for l in xrange(1, nLabels + 1):
                intersect = np.array((labeled == l), dtype=int)
                posX, posY = np.nonzero(intersect)
                xC, yC = np.array([[np.sum(posX) / posX.size], [np.sum(posY) / posY.size]])
                itrscts.append([xC[0], yC[0]])
            itrscts = np.array(itrscts)
        corners = np.transpose(np.array(np.nonzero(corners)))
        if useIntersects:
            try:
                corners = np.vstack((itrscts, corners))
            except:
                print "something went wrong at", sig
        corners[:, [0, 1]] = corners[:, [1, 0]]
        for i, corner in enumerate(corners):
            x, y = corner[0], corner[1]
            x = getFeatures(imgO, x, y, r, imgH, imgW, skltnSegs)
            X.append(x)
            sigNum.append(sig)
            coords.append([x, y])
    return np.array(X), np.array(sigNum), np.array(coords)
开发者ID:Newmu,项目名称:Stroke-Prediction,代码行数:60,代码来源:explore.py



注:本文中的skimage.morphology.skeletonize函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python morphology.square函数代码示例发布时间:2022-05-27
下一篇:
Python morphology.remove_small_objects函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap