• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python color.label2rgb函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中skimage.color.label2rgb函数的典型用法代码示例。如果您正苦于以下问题:Python label2rgb函数的具体用法?Python label2rgb怎么用?Python label2rgb使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了label2rgb函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: plot_cup

def plot_cup(img, bbox):
    img = FC.resize_image(img)
    lab_img = FC.apply_color_model(img)

    labels1, labels2 = FC.segment_image(img, lab_img)

    cup_labels = FC.select_cup_segment(labels2, bbox)
#     out3 = skic.label2rgb(cup_labels, img, kind='avg')
    out3 = cup_labels

    neg_segments = FC.select_negative_segments(cup_labels, labels2)
    out4 = skic.label2rgb(neg_segments, img, kind='avg')

    plt.figure()
    plt.imshow(img)
    plt.title("Image")
    plt.figure()
    plt.imshow(skic.label2rgb(labels1, img, kind='avg'))
    plt.title("Plain SLIC result")
    plt.figure()
    plt.imshow(skic.label2rgb(labels2, img, kind='avg'))
    plt.title("Merged segments")
    plt.figure()
    plt.imshow(out3)
    if np.max(out3.ravel()) == 1:
        plt.title("cup only")
    else:
        plt.title("rejected cup")

    plt.figure()
    plt.imshow(out4)
    plt.title("Negative segments")
开发者ID:groakat,项目名称:cup_detection,代码行数:32,代码来源:cup.py


示例2: logo_iterate

def logo_iterate(labels, image, fns=d + 'logo-%03i.png'):
    height, width = labels.shape
    background = (labels == 0)
    foreground = ~background
    counter = it.count()

    # part one: just foreground/background
    colorcombos = it.permutations(colors, 2)
    lab2 = np.zeros(labels.shape, np.uint8)
    lab2[foreground] = 1
    for cs in colorcombos:
        img = color.label2rgb(lab2, image, colors=cs)
        io.imsave(fns % next(counter), img)

    # part two: background split
    splits = np.arange(500, 1600, 100).astype(int)
    colorcombos = it.permutations(colors, 3)
    for s, cs in it.product(splits, colorcombos):
        im, lab = _split_img_horizontal(image, lab2, background, s)
        img = color.label2rgb(lab, im, colors=cs)
        io.imsave(fns % next(counter), img)

    # part three: foreground split
    colorcombos = it.permutations(colors, 3)
    for cs in colorcombos:
        img = color.label2rgb(labels, image, colors=cs)
        io.imsave(fns % next(counter), img)

    # part four: both split
    colorcombos = it.permutations(colors, 4)
    for s, cs in it.product(splits, colorcombos):
        im, lab = _split_img_horizontal(image, labels, background, s)
        img = color.label2rgb(lab, im, colors=cs)
        io.imsave(fns % next(counter), img)
开发者ID:DaniUPC,项目名称:gala,代码行数:34,代码来源:logo.py


示例3: normcut_segmentations

def normcut_segmentations(img):

    #labels1 = segmentation.slic(img, compactness=3, n_segments=50)
    labels1 = segmentation.slic(img,compactness=3,n_segments=20)
    out1 = color.label2rgb(labels1, img)#, kind='avg')
    #return labels1
    g = graph.rag_mean_color(img, labels1, mode='similarity')
    labels2 = graph.cut_normalized(labels1, g)
    out2 = color.label2rgb(labels2, img,image_alpha=0.2)#, kind='avg')
    return (labels1,labels2)
开发者ID:prateek-s,项目名称:cartmandetect,代码行数:10,代码来源:segmentation.py


示例4: number_nucleus

def number_nucleus(image):

    elevation_map = sobel(image)
    markers = np.zeros_like(image)
    markers[image < 250] = 1
    markers[image > 2000] = 2

    segmentation = watershed(elevation_map, markers)
    label_img = label(segmentation)
    prop = regionprops(label_img)

    width, height = plt.rcParams['figure.figsize']
    plt.rcParams['image.cmap'] = 'gray'

    image_label_overlay = label2rgb(label_img, image=image)

    fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(15, 8))
    ax1.imshow(image_label_overlay)
    ax2.imshow(image, cmap=plt.cm.gray, interpolation='nearest')

    # create list of region with are < 1000
    image_labeled = [region for region in prop if region.area > 5000]


    return len(image_labeled)
开发者ID:cespenel,项目名称:image_processing,代码行数:25,代码来源:blobs_per_cell.py


示例5: show_all

def show_all(fname,images,titles,numsegs=1):
    
    num_images = len(images)
    num_titles = len(titles)
    titles += ['']*(num_images-num_titles)
    
    fig, axes = plt.subplots(ncols=num_images, figsize=(9, 2.5))

    im = images[0]

    for i in range(numsegs):
        axes[i].imshow(images[i])
        axes[i].set_title(titles[i])
        print titles[i]

    for i in range(numsegs,num_images) :
        j=i #numsegs+i
        segimg = label2rgb(images[j], image=im, image_alpha=0.5)
        axes[j].imshow(segimg, interpolation='nearest')
        axes[j].set_title(titles[j])
        print titles[j]

    for ax in axes:
        ax.axis('off')
    fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
    plt.show()
    #plt.savefig(fname+"_seg.jpg")
    print fname+"_seg.jpg"
    return True
开发者ID:prateek-s,项目名称:cartmandetect,代码行数:29,代码来源:segmentation.py


示例6: build_region

	def build_region(self):
		start_time = time.time();
		labels = segmentation.slic(self.q_frame,self.num_superpixels, self.compactness,convert2lab=True,multichannel=True)
		_num_superpixels = np.max(labels) + 1;
		self.s_frame = color.label2rgb(labels,self.q_frame, kind='avg')
		self.freq = np.array([np.sum(labels==label) for label in range(_num_superpixels)])
		self.mean = np.array([region['centroid'] for region in regionprops(labels+1)],dtype=np.int16);	
		
		self.color_data = np.array([np.sum(self.q_frame[np.where(labels==label)],0) for label in range(_num_superpixels)])		
		_inv_freq = 1/(self.freq+0.0000001);  self.color_data = self.color_data*_inv_freq[:,None]
		gray_frame = cv2.cvtColor(self.q_frame,cv2.COLOR_RGB2GRAY)
		def texture_prop(label,patch_size = 5):
			_mean_min = self.mean[label]-patch_size;
			_mean_max = self.mean[label]+patch_size;
			glcm = greycomatrix(gray_frame[_mean_min[0]:_mean_max[0],_mean_min[1]:_mean_max[1]],
						[3], [0], 256, symmetric=True, normed=True)
			_dis = greycoprops(glcm, 'dissimilarity')[0, 0];
			_cor = greycoprops(glcm, 'correlation')[0, 0];
			return (_dis,_cor);
		self.texture_data = np.array([texture_prop(label) for label in range(_num_superpixels)])
		self.data = np.hstack((self.color_data,self.texture_data))
		
		cv2.imwrite('outs.png',self.s_frame);				
		print "Build region (preprocess) : ",time.time()-start_time
		return (labels,_num_superpixels);
开发者ID:sudhargk,项目名称:video-annotator,代码行数:25,代码来源:saliency_gmm.py


示例7: _apply

 def _apply(self, img_msg, label_msg):
     bridge = cv_bridge.CvBridge()
     img = bridge.imgmsg_to_cv2(img_msg)
     label_img = bridge.imgmsg_to_cv2(label_msg)
     # publish only valid label region
     applied = img.copy()
     applied[label_img == 0] = 0
     applied_msg = bridge.cv2_to_imgmsg(applied, encoding=img_msg.encoding)
     applied_msg.header = img_msg.header
     self.pub_img.publish(applied_msg)
     # publish visualized label
     if img_msg.encoding in {'16UC1', '32SC1'}:
         # do dynamic scaling to make it look nicely
         min_value, max_value = img.min(), img.max()
         img = (img - min_value) / (max_value - min_value) * 255
         img = gray2rgb(img)
     label_viz_img = label2rgb(label_img, img, bg_label=0)
     label_viz_img = mark_boundaries(label_viz_img, label_img, (1, 0, 0))
     label_viz_img = (label_viz_img * 255).astype(np.uint8)
     label_viz_msg = bridge.cv2_to_imgmsg(label_viz_img, encoding='rgb8')
     label_viz_msg.header = img_msg.header
     self.pub_label_viz.publish(label_viz_msg)
     # publish mask
     if self._publish_mask:
         bg_mask = (label_img == 0)
         fg_mask = ~bg_mask
         bg_mask = (bg_mask * 255).astype(np.uint8)
         fg_mask = (fg_mask * 255).astype(np.uint8)
         fg_mask_msg = bridge.cv2_to_imgmsg(fg_mask, encoding='mono8')
         fg_mask_msg.header = img_msg.header
         bg_mask_msg = bridge.cv2_to_imgmsg(bg_mask, encoding='mono8')
         bg_mask_msg.header = img_msg.header
         self.pub_fg_mask.publish(fg_mask_msg)
         self.pub_bg_mask.publish(bg_mask_msg)
开发者ID:Horisu,项目名称:jsk_recognition,代码行数:34,代码来源:label_image_decomposer.py


示例8: getRegions

def getRegions():
    """Geocode address and retreive image centered
    around lat/long"""
    address = request.args.get('address')
    results = Geocoder.geocode(address)
    lat, lng = results[0].coordinates
    zip_code = results[0].postal_code

    map_url = 'https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&size=640x640&zoom=19&sensor=false&maptype=roadmap&&style=visibility:simplified|gamma:0.1'
    request_url = map_url.format(lat, lng)
    req = urllib.urlopen(request_url)
    img = io.imread(req.geturl(),flatten=True)
    labels, numobjects = ndimage.label(img)
    image = filter.canny(img, sigma=3)
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(image_label_overlay)
开发者ID:frenchja,项目名称:SunnySideUp,代码行数:29,代码来源:views.py


示例9: __build_region__

	def __build_region__(self,q_frame):
		start_time = time.time();
		regions = segmentation.slic(q_frame,self.props.num_superpixels, self.props.compactness,
				convert2lab=self.props.useLAB,multichannel=True)
		num_regions = len(np.unique(regions));
		s_frame = color.label2rgb(regions,q_frame, kind='avg')
		mean = np.array([region['centroid'] for region in regionprops(regions+1)])
		freq = np.array([np.sum(regions==region) for region in range(num_regions)])
		region_props = (mean,freq);
		
		if self.props.useColor:
			color_data = self.__extract_color__(q_frame,regions,region_props);		
		if self.props.useTexture:
			texture_data = self.__extract_texture__(q_frame,regions,region_props);
			
		if self.props.useTexture and self.props.useColor:
			data = np.hstack((color_data,texture_data))
		elif self.props.useTexture:
			data = texture_data
		else :
			data = color_data
				
		if self.props.doProfile:
			cv2.imwrite(self.PROFILE_PATH+self.method+'_s.png',s_frame);					
			print "Build region (preprocess) : ",time.time()-start_time
	
		return (num_regions,regions,region_props,data);
开发者ID:sudhargk,项目名称:video-annotator,代码行数:27,代码来源:__init__.py


示例10: detectOpticDisc

def detectOpticDisc(image):
    labels = segmentation.slic(image, n_segments = 70)
    out = color.label2rgb(labels, image, kind='avg')
    gray = cv2.cvtColor(out, cv2.COLOR_RGB2GRAY)
    minimum = np.max(gray)
    image[gray==minimum] = 255
    return image
开发者ID:fvermeij,项目名称:cad-doge,代码行数:7,代码来源:opticdiscdetection.py


示例11: roofRegion

def roofRegion(edge):
    """Estimate region based on edges of roofRegion
    """
    # apply threshold
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(image_label_overlay)

    for region in regionprops(label_image):

        # skip small images
        if region.area < 100:
            continue

        # draw rectangle around segmented coins
        minr, minc, maxr, maxc = region.bbox
        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, edgecolor='red', linewidth=2)
        ax.add_patch(rect)

    plt.show()
开发者ID:frenchja,项目名称:SunnySideUp,代码行数:33,代码来源:gmaps.py


示例12: SuperPixel

 def SuperPixel(self, Image):
     segments = slic(Image, n_segments=20, sigma=5)
     # show the output of SLIC
     segments = segments + 1
     # So that no labelled region is 0 and ignored by regionprops
     label_rgb = color.label2rgb(segments, Image, kind='avg')
     return label_rgb
开发者ID:pazagra,项目名称:catkin_ws,代码行数:7,代码来源:listv3.0.py


示例13: detectOpticDisc

def detectOpticDisc(image):
    kernel = octagon(10, 10)
    thresh = threshold_otsu(image[:,:,1])
    binary = image > thresh
    print binary.dtype
    luminance = convertToHLS(image)[:,:,2]
    t = threshold_otsu(luminance)
    t = erosion(luminance, kernel)
    
    
    labels = segmentation.slic(image[:,:,1], n_segments = 3)
    out = color.label2rgb(labels, image[:,:,1], kind='avg')
    skio.imshow(out)
    
    x, y = computeCentroid(t)
    print x, y
    rows, cols, _ = image.shape
    p1 = closing(image[:,:,1],kernel)
    p2 = opening(p1, kernel)
    p3 = reconstruction(p2, p1, 'dilation')
    p3 = p3.astype(np.uint8)
    #g = dilation(p3, kernel)-erosion(p3, kernel)
    #g = rank.gradient(p3, disk(5))
    g = cv2.morphologyEx(p3, cv2.MORPH_GRADIENT, kernel)
    #markers = rank.gradient(p3, disk(5)) < 10
    markers = drawCircle(rows, cols, x, y, 85)
    #markers = ndimage.label(markers)[0]
    #skio.imshow(markers)
    g = g.astype(np.uint8)
    #g = cv2.cvtColor(g, cv2.COLOR_GRAY2RGB)
    w = watershed(g, markers)
    print np.max(w), np.min(w)
    w = w.astype(np.uint8)
    #skio.imshow(w)
    return w
开发者ID:fvermeij,项目名称:cad-doge,代码行数:35,代码来源:opticDiscVesselDetection.py


示例14: view_dataset

 def view_dataset(self):
     for datum in self.val:
         rgb, label = self.load_datum(datum, train=False)
         label_viz = label2rgb(label, rgb, bg_label=-1)
         label_viz[label == 0] = 0
         plt.imshow(label_viz)
         plt.show()
开发者ID:barongeng,项目名称:fcn,代码行数:7,代码来源:apc2016.py


示例15: get_cells

def get_cells(image):
    '''
    Get cellls from the polygon.
    '''
    # apply threshold    
    thresh = threshold_otsu(image)
    binary = image > thresh
    bw=binary
    plt.imshow(bw)

    # Remove connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = skimage.measure.label(cleared)
    #find_contours
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    #extract the regions and get a polygon per region
    polygons=[]
    for i,region in enumerate(regionprops(label_image)):
        # skip small images
        if region.area < 100:
            continue
        a=np.zeros([len(region.coords),2])
        #a=np.zeros(
        plt.imshow(bw)
        for i in range(len(region.coords)):
            a[i,:]=[region.coords[i][0],region.coords[i][1]]
        polygons.append(a)
    return polygons     
开发者ID:kerenl,项目名称:cell_analysis,代码行数:34,代码来源:utils2.py


示例16: plot_preprocessed_image

    def plot_preprocessed_image(self):
        """
        plots pre-processed image. The plotted image is the same as obtained at the end
        of the get_text_candidates method.
        """
        image = restoration.denoise_tv_chambolle(self.image, weight=0.1)
        thresh = threshold_otsu(image)
        bw = closing(image > thresh, square(2))
        cleared = bw.copy()

        label_image = measure.label(cleared)
        borders = np.logical_xor(bw, cleared)

        label_image[borders] = -1
        image_label_overlay = label2rgb(label_image, image=image)

        fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
        ax.imshow(image_label_overlay)

        for region in regionprops(label_image):
            if region.area < 10:
                continue

            minr, minc, maxr, maxc = region.bbox
            rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                      fill=False, edgecolor='red', linewidth=2)
            ax.add_patch(rect)

        plt.show()
开发者ID:kmiddleton,项目名称:ImageTextRecognition,代码行数:29,代码来源:userimageski.py


示例17: get_cells

def get_cells(image):
    '''
    Get cellls from the polygon.
    '''
    new_image=np.ones([3,image.shape[0],image.shape[1]],dtype=float)
    # apply threshold
    thresh = threshold_otsu(image)
    bw=image

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    #skimage.measure.label
    #find_contours
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    #extract the regions and get a polygon per region
    polygons=[]
    for i,region in enumerate(regionprops(label_image)):
        # skip small images
        if region.area < 100:
            continue
        #polygons.append(matplotlib.path.Path(region.coords))
        print (region.coords.shape)
        a=np.zeros(region.coords.shape)
        a[:,0]=region.coords[:,1]
        a[:,1]=region.coords[:,0]
        polygons.append(a)   
    return polygons
开发者ID:kerenl,项目名称:cell_analysis,代码行数:34,代码来源:medial_axis_skeletonization_keren_v2.py


示例18: _callback

    def _callback(self, img_msg, mask_msg):
        bridge = cv_bridge.CvBridge()
        bgr_img = bridge.imgmsg_to_cv2(img_msg, desired_encoding='bgr8')
        mask_img = bridge.imgmsg_to_cv2(mask_msg, desired_encoding='mono8')
        if mask_img.size < 1:
            logwarn_throttle(10, 'Too small sized image')
            return
        logwarn_throttle(10, '[FCNMaskForLabelNames] >> Start Processing <<')
        if mask_img.ndim == 3 and mask_img.shape[2] == 1:
            mask_img = mask_img.reshape(mask_img.shape[:2])
        if mask_img.shape != bgr_img.shape[:2]:
            jsk_logwarn('Size of mask and color image is different.'
                        'Resizing.. mask {0} to {1}'
                        .format(mask_img.shape, bgr_img.shape[:2]))
            mask_img = resize(mask_img, bgr_img.shape[:2],
                              preserve_range=True).astype(np.uint8)

        blob = bgr_img - self.mean_bgr
        blob = blob.transpose((2, 0, 1))

        x_data = np.array([blob], dtype=np.float32)
        if self.gpu != -1:
            x_data = cuda.to_gpu(x_data, device=self.gpu)
        x = Variable(x_data, volatile=True)
        self.model(x)
        pred_datum = cuda.to_cpu(self.model.score.data[0])

        candidate_labels = [self.target_names.index(name)
                            for name in self.tote_contents]
        label_pred_in_candidates = pred_datum[candidate_labels].argmax(axis=0)
        label_pred = np.zeros_like(label_pred_in_candidates)
        for idx, label_val in enumerate(candidate_labels):
            label_pred[label_pred_in_candidates == idx] = label_val
        label_pred[mask_img == 0] = 0  # set bg_label

        label_viz = label2rgb(label_pred, bgr_img, bg_label=0)
        label_viz = (label_viz * 255).astype(np.uint8)
        debug_msg = bridge.cv2_to_imgmsg(label_viz, encoding='rgb8')
        debug_msg.header = img_msg.header
        self.pub_debug.publish(debug_msg)

        output_mask = np.ones(mask_img.shape, dtype=np.uint8)
        output_mask *= 255
        for label_val, label_name in enumerate(self.target_names):
            if label_name in self.label_names:
                assert label_name == 'kleenex_paper_towels'
                assert label_val == 21
                label_mask = ((label_pred == label_val) * 255).astype(np.uint8)
                contours, hierachy = cv2.findContours(
                    label_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
                cv2.drawContours(output_mask, contours, -1, 255, -1)
                # output_mask[label_pred == label_val] = False
        # output_mask = output_mask.astype(np.uint8)
        # output_mask[output_mask == 1] = 255
        output_mask[mask_img == 0] = 0
        output_mask_msg = bridge.cv2_to_imgmsg(output_mask, encoding='mono8')
        output_mask_msg.header = img_msg.header
        self.pub.publish(output_mask_msg)
        logwarn_throttle(10, '[FCNMaskForLabelNames] >> Finshed processing <<')
开发者ID:Affonso-Gui,项目名称:jsk_apc,代码行数:59,代码来源:fcn_mask_for_label_names.py


示例19: validate

 def validate(self):
     """Validate training with data."""
     log_templ = ('{i_iter}: type={type}, loss={loss}, acc={acc}, '
                  'acc_cls={acc_cls}, iu={iu}, fwavacc={fwavacc}')
     type = 'val'
     self.model.train = False
     N_data = len(self.dataset.val)
     result = defaultdict(list)
     desc = '{0}: validating'.format(self.i_iter)
     for indice in tqdm.tqdm(xrange(N_data), ncols=80, desc=desc):
         loss, acc, acc_cls, iu, fwavacc = self._iterate_once(
             type=type, indices=[indice])
         result['loss'].append(loss)
         result['acc'].append(acc)
         result['acc_cls'].append(acc_cls)
         result['iu'].append(iu)
         result['fwavacc'].append(fwavacc)
     # visualize predicted label
     blob = cuda.to_cpu(self.model.x.data)[0]
     label_true = cuda.to_cpu(self.model.t.data)[0]
     img = self.dataset.datum_to_img(blob)
     label_true_viz = label2rgb(label_true, img, bg_label=0)
     label_true_viz[label_true == 0] = 0
     label_true_viz = (label_true_viz * 255).astype(np.uint8)
     label = cuda.to_cpu(self.model.score.data)[0].argmax(axis=0)
     label_viz = label2rgb(label, img, bg_label=0)
     label_viz[label == 0] = 0
     label_viz = (label_viz * 255).astype(np.uint8)
     hline = np.zeros((5, img.shape[1], 3), dtype=np.uint8)
     hline.fill(255)
     imsave(
         osp.join(self.log_dir, 'visualize_{0}.jpg'.format(self.i_iter)),
         np.vstack([img, hline, label_true_viz, hline, label_viz, hline]))
     log = dict(
         i_iter=self.i_iter,
         type=type,
         loss=np.array(result['loss']).mean(),
         acc=np.array(result['acc']).mean(),
         acc_cls=np.array(result['acc_cls']).mean(),
         iu=np.array(result['iu']).mean(),
         fwavacc=np.array(result['fwavacc']).mean(),
     )
     print(log_templ.format(**log))
     self.logfile.write(
         '{i_iter},{type},{loss},{acc},{acc_cls},{iu},{fwavacc}\n'
         .format(**log))
开发者ID:barongeng,项目名称:fcn,代码行数:46,代码来源:trainer.py


示例20: forward

 def forward(self, bottom, top):
     # bottom[0]: images N*3*W*H
     # bottom[1]: prediction N*1*W*H
     n = bottom[0].data.shape[0]
     for i in range(n):
         labels = segmentation.slic( bottom[0].data[i].transpose((1,2,0)), 
                 compactness=self.compactness, n_segments=self.n_segs)
         top[0].data[i, ...] = color.label2rgb(labels, bottom[1].data[i].transpose((1,2,0)), kind='avg').transpose((2,0,1)) #.reshape(top[0].data[i].shape)
开发者ID:AmirooR,项目名称:coco_transformations,代码行数:8,代码来源:superpixelisation_layer.py



注:本文中的skimage.color.label2rgb函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python color.rgb2gray函数代码示例发布时间:2022-05-27
下一篇:
Python color.lab2rgb函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap