• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python exposure.rescale_intensity函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中skimage.exposure.rescale_intensity函数的典型用法代码示例。如果您正苦于以下问题:Python rescale_intensity函数的具体用法?Python rescale_intensity怎么用?Python rescale_intensity使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了rescale_intensity函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: autolevels

def autolevels(image,minPercent=2,maxPercent=98,funcName='mean',perChannel=False):
    '''
    Rescale intensity of an image. For RGB images, the new limits are calculated 
    per channel and then mean or median of these limits are applied to the whole 
    image (if perChannel option is False).
    '''
    # dictionary of functions
    funcs = {'mean':np.mean,'median':np.median,'min':np.min,'max':np.max}
    
    # calculate percentiles (returns 3 values for RGB pictures or vectors, 1 for grayscale images)
    if image.shape[1] == 3:
        pMin,pMax = np.percentile(image,(minPercent, maxPercent),axis=0)
    else:
        pMin,pMax = np.percentile(image,(minPercent, maxPercent),axis=(0,1))

    # Apply normalisation
    if not perChannel: # finds new min and max using selected function applied to all channels
        newMin = funcs[funcName](pMin)
        newMax = funcs[funcName](pMax)
        auto = exposure.rescale_intensity(image,in_range=(newMin,newMax)) 

    else: # applies a rescale on each channel separately
        r_channel = exposure.rescale_intensity(image[:,:,0], in_range=(pMin[0],pMax[0])) 
        g_channel = exposure.rescale_intensity(image[:,:,1], in_range=(pMin[1],pMax[1])) 
        b_channel = exposure.rescale_intensity(image[:,:,2], in_range=(pMin[2],pMax[2])) 
        auto = np.stack((r_channel,g_channel,b_channel),axis=2)

    return auto 
开发者ID:jobar8,项目名称:graphics,代码行数:28,代码来源:graphics.py


示例2: mod_zedge

def mod_zedge(composite, mod_id, algorithm, **kwargs):

    zedge_channel, zedge_channel_created = composite.channels.get_or_create(name="-zedge")

    for t in range(composite.series.ts):
        print("step02 | processing mod_zedge t{}/{}...".format(t + 1, composite.series.ts), end="\r")

        zdiff_mask = composite.masks.get(channel__name__contains=kwargs["channel_unique_override"], t=t).load()
        zbf = exposure.rescale_intensity(composite.gons.get(channel__name="-zbf", t=t).load() * 1.0)
        zedge = zbf.copy()

        binary_mask = zdiff_mask > 0
        outside_edge = distance_transform_edt(dilate(edge_image(binary_mask), iterations=4))
        outside_edge = 1.0 - exposure.rescale_intensity(outside_edge * 1.0)
        zedge *= outside_edge * outside_edge

        zedge_gon, zedge_gon_created = composite.gons.get_or_create(
            experiment=composite.experiment, series=composite.series, channel=zedge_channel, t=t
        )
        zedge_gon.set_origin(0, 0, 0, t)
        zedge_gon.set_extent(composite.series.rs, composite.series.cs, 1)

        zedge_gon.array = zedge.copy()
        zedge_gon.save_array(composite.series.experiment.composite_path, composite.templates.get(name="source"))
        zedge_gon.save()
开发者ID:apollo-dev,项目名称:apollo,代码行数:25,代码来源:algorithms.py


示例3: edge

def edge():

	#plt.switch_backend('MacOSX')
	image = io.imread(path + "bibme0.png")
	print type(image)
	print image.shape
#	edge_roberts = roberts(image)
#	edge_sobel = sobel(image)

	fig = plt.figure(figsize=(14, 7))
	ax_each = fig.add_subplot(121, adjustable='box-forced')
	ax_hsv = fig.add_subplot(122, sharex=ax_each, sharey=ax_each,
	                         adjustable='box-forced')

	# We use 1 - sobel_each(image)
	# but this will not work if image is not normalized
	ax_each.imshow(rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray)
	#ax_each.imshow(sobel_each(image))
	ax_each.set_xticks([]), ax_each.set_yticks([])
	ax_each.set_title("Sobel filter computed\n on individual RGB channels")
	
	
	# We use 1 - sobel_hsv(image) but this will not work if image is not normalized
	ax_hsv.imshow(rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray)
	ax_hsv.set_xticks([]), ax_hsv.set_yticks([])
	ax_hsv.set_title("Sobel filter computed\n on (V)alue converted image (HSV)")
	
	fig.savefig(out_path + 'sobel_gray.png')
	plt.show()
开发者ID:eason001,项目名称:imBot,代码行数:29,代码来源:imgpro.py


示例4: _write_image

    def _write_image(self, img_data, filename, img_format=None, dtype=None):
        """
        Output image data to a file, in a given image format.
        Assumes that the output directory exists (must be checked before).

        @param img_data :: image data in the usual numpy representation
        @param filename :: file name, including directory and extension
        @param img_format :: image file format
        @param dtype :: can be used to force a pixel type, otherwise the type
                        of the input data is used

        Returns:: name of the file saved
        """
        if not img_format:
            img_format = self.default_out_format
        filename = filename + '.' + img_format

        if dtype and img_data.dtype != dtype:
            img_data = np.array(img_data, dtype=dtype)

        if img_format == 'tiff' and _USING_PLUGIN_TIFFFILE:
            img_data = exposure.rescale_intensity(img_data, out_range='uint16')
            skio.imsave(filename, img_data, plugin='tifffile')
        else:
            img_data = exposure.rescale_intensity(img_data, out_range='uint16')
            skio.imsave(filename, img_data)

        return filename
开发者ID:spaceyatom,项目名称:mantid,代码行数:28,代码来源:energy_bands_aggregator.py


示例5: rgb2he2

def rgb2he2(img):
    # This implementation follows http://web.hku.hk/~ccsigma/color-deconv/color-deconv.html

    assert (img.ndim == 3)
    assert (img.shape[2] == 3)

    height, width, _ = img.shape

    img = -np.log((img + 1.0) / img.max())

    # the following lines are replaced with the final result,
    # to speed up computations
    #
    # he = np.array([0.550, 0.758, 0.351]); he /= norm(he)
    # eo = np.array([0.398, 0.634, 0.600]); eo /= norm(eo)
    # bg = np.array([0.754, 0.077, 0.652]); bg /= norm(bg)
    #
    # M = np.hstack((he.reshape(3,1), eo.reshape(3,1), bg.reshape(3,1)))
    # D = alg.inv(M)
    #
    D = np.array([[ 1.92129515,  1.00941672, -2.34107612],
                  [-2.34500192,  0.47155124,  2.65616872],
                  [ 1.21495282, -0.99544467,  0.2459345 ]])

    rgb = img.swapaxes(2, 0).reshape((3, height*width))
    heb = np.dot(D, rgb)
    res_img = heb.reshape((3, width, height)).swapaxes(0, 2)

    return rescale_intensity(res_img[:,:,0], out_range=(0,1)), \
           rescale_intensity(res_img[:,:,1], out_range=(0,1)), \
           rescale_intensity(res_img[:,:,2], out_range=(0,1))
开发者ID:gitter-badger,项目名称:WSItk,代码行数:31,代码来源:he.py


示例6: handle

	def handle(self, *args, **options):
		# vars
		experiment_name = options['expt']
		series_name = options['series']
		t = options['t']

		if experiment_name!='' and series_name!='':
			experiment = Experiment.objects.get(name=experiment_name)
			series = experiment.series.get(name=series_name)

			# select composite
			composite = series.composites.get()

			zmean = exposure.rescale_intensity(composite.gons.get(channel__name='-zmean', t=t).load() * 1.0)
			zmod = exposure.rescale_intensity(composite.gons.get(channel__name='-zmod', t=t).load() * 1.0)

			zdiff = np.zeros(zmean.shape)
			for unique in np.unique(zmod):
				print(unique, len(np.unique(zmod)))
				zdiff[zmod==unique] = np.mean(zmean[zmod==unique]) / np.sum(zmean)

			plt.imshow(zdiff, cmap='Greys_r')
			plt.show()

			# imsave('zdiff.tiff', zdiff)

		else:
			print('Please enter an experiment')
开发者ID:apollo-dev,项目名称:img-base,代码行数:28,代码来源:test_zdiff.py


示例7: juntarcanais

def juntarcanais(c1, c2):


    h = exposure.rescale_intensity(c1, out_range=(0, 1))
    d = exposure.rescale_intensity(c2, out_range=(0, 1))
    zdh = np.dstack((np.zeros_like(h), d, h))

    return zdh
开发者ID:ssscassio,项目名称:PathoSpotter,代码行数:8,代码来源:extrairmagenta.py


示例8: handle

	def handle(self, *args, **options):
		# vars
		experiment_name = options['expt']
		series_name = options['series']
		t = options['t']

		R = 1
		delta_z = -8
		# sigma = 5

		if experiment_name!='' and series_name!='':
			experiment = Experiment.objects.get(name=experiment_name)
			series = experiment.series.get(name=series_name)

			# select composite
			composite = series.composites.get()

			# load gfp
			gfp_gon = composite.gons.get(t=t, channel__name='0')
			gfp_start = exposure.rescale_intensity(gfp_gon.load() * 1.0)
			print('loaded gfp...')

			# load bf
			bf_gon = composite.gons.get(t=t, channel__name='1')
			bf = exposure.rescale_intensity(bf_gon.load() * 1.0)
			print('loaded bf...')

			for sigma in [0, 5, 10, 20]:
				gfp = gf(gfp_start, sigma=sigma) # <<< SMOOTHING
				for level in range(gfp.shape[2]):
					print('level {} {}...'.format(R, level))
					gfp[:,:,level] = convolve(gfp[:,:,level], np.ones((R,R)))

				# initialise images
				Z = np.zeros(composite.series.shape(d=2), dtype=int)
				Zmean = np.zeros(composite.series.shape(d=2))
				Zbf = np.zeros(composite.series.shape(d=2))

				Z = np.argmax(gfp, axis=2) + delta_z

				# outliers
				Z[Z<0] = 0
				Z[Z>composite.series.zs-1] = composite.series.zs-1

				for level in range(bf.shape[2]):
					print('level {}...'.format(level))
					bf_level = bf[:,:,level]
					Zbf[Z==level] = bf_level[Z==level]

				Zmean = 1 - np.mean(gfp, axis=2) / np.max(gfp, axis=2)

				imsave('zbf_R-{}_sigma-{}_delta_z{}.png'.format(R, sigma, delta_z), Zbf)

			# plt.imshow(Zbf, cmap='Greys_r')
			# plt.show()

		else:
			print('Please enter an experiment')
开发者ID:apollo-dev,项目名称:img-base,代码行数:58,代码来源:test_zmod.py


示例9: equalize_adapthist

def equalize_adapthist(image, ntiles_x=8, ntiles_y=8, clip_limit=0.01,
                       nbins=256):
    """Contrast Limited Adaptive Histogram Equalization.

    Parameters
    ----------
    image : array-like
        Input image.
    ntiles_x : int, optional
        Number of tile regions in the X direction.  Ranges between 2 and 16.
    ntiles_y : int, optional
        Number of tile regions in the Y direction.  Ranges between 2 and 16.
    clip_limit : float: optional
        Clipping limit, normalized between 0 and 1 (higher values give more
        contrast).
    nbins : int, optional
        Number of gray bins for histogram ("dynamic range").

    Returns
    -------
    out : ndarray
        Equalized image.

    Notes
    -----
    * The algorithm relies on an image whose rows and columns are even
      multiples of the number of tiles, so the extra rows and columns are left
      at their original values, thus  preserving the input image shape.
    * For color images, the following steps are performed:
       - The image is converted to LAB color space
       - The CLAHE algorithm is run on the L channel
       - The image is converted back to RGB space and returned
    * For RGBA images, the original alpha channel is removed.

    References
    ----------
    .. [1] http://tog.acm.org/resources/GraphicsGems/gems.html#gemsvi
    .. [2] https://en.wikipedia.org/wiki/CLAHE#CLAHE
    """
    args = [None, ntiles_x, ntiles_y, clip_limit * nbins, nbins]
    if image.ndim > 2:
        lab_img = color.rgb2lab(skimage.img_as_float(image))
        l_chan = lab_img[:, :, 0]
        l_chan /= np.max(np.abs(l_chan))
        l_chan = skimage.img_as_uint(l_chan)
        args[0] = rescale_intensity(l_chan, out_range=(0, NR_OF_GREY - 1))
        new_l = _clahe(*args).astype(float)
        new_l = rescale_intensity(new_l, out_range=(0, 100))
        lab_img[:new_l.shape[0], :new_l.shape[1], 0] = new_l
        image = color.lab2rgb(lab_img)
        image = rescale_intensity(image, out_range=(0, 1))
    else:
        image = skimage.img_as_uint(image)
        args[0] = rescale_intensity(image, out_range=(0, NR_OF_GREY - 1))
        out = _clahe(*args)
        image[:out.shape[0], :out.shape[1]] = out
        image = rescale_intensity(image)
    return image
开发者ID:4rozenwolves,项目名称:scikit-image,代码行数:58,代码来源:_adapthist.py


示例10: _color_correction

 def _color_correction(self, band, band_id, low, coverage):
     self.output("Color correcting band %s" % band_id, normal=True, color='green', indent=1)
     p_low, cloud_cut_low = self._percent_cut(band, low, 100 - (coverage * 3 / 4))
     temp = numpy.zeros(numpy.shape(band), dtype=numpy.uint16)
     cloud_divide = 65000 - coverage * 100
     mask = numpy.logical_and(band < cloud_cut_low, band > 0)
     temp[mask] = rescale_intensity(band[mask], in_range=(p_low, cloud_cut_low), out_range=(256, cloud_divide))
     temp[band >= cloud_cut_low] = rescale_intensity(band[band >= cloud_cut_low], out_range=(cloud_divide, 65535))
     return temp
开发者ID:spgriffin,项目名称:landsat-util,代码行数:9,代码来源:image.py


示例11: equalize_adapthist

def equalize_adapthist(image, ntiles_x=8, ntiles_y=8, clip_limit=0.01,
                       nbins=256):
    args = [None, ntiles_x, ntiles_y, clip_limit * nbins, nbins]
    image = skimage.img_as_uint(image)
    args[0] = rescale_intensity(image, out_range=(0, NR_OF_GREY - 1))
    out = _clahe(*args)
    image[:out.shape[0], :out.shape[1]] = out
    image = rescale_intensity(image)
    return image
开发者ID:karthik,项目名称:scikitimage,代码行数:9,代码来源:Pre-Process_Full.py


示例12: equalize_adapthist

def equalize_adapthist(image, ntiles_x=8, ntiles_y=8, clip_limit=0.01,
                       nbins=256):
    """Contrast Limited Adaptive Histogram Equalization (CLAHE).

    An algorithm for local contrast enhancement, that uses histograms computed
    over different tile regions of the image. Local details can therefore be
    enhanced even in regions that are darker or lighter than most of the image.

    Parameters
    ----------
    image : array-like
        Input image.
    ntiles_x : int, optional
        Number of tile regions in the X direction.  Ranges between 1 and 16.
    ntiles_y : int, optional
        Number of tile regions in the Y direction.  Ranges between 1 and 16.
    clip_limit : float: optional
        Clipping limit, normalized between 0 and 1 (higher values give more
        contrast).
    nbins : int, optional
        Number of gray bins for histogram ("dynamic range").

    Returns
    -------
    out : ndarray
        Equalized image.

    See Also
    --------
    equalize_hist, rescale_intensity

    Notes
    -----
    * For color images, the following steps are performed:
       - The image is converted to HSV color space
       - The CLAHE algorithm is run on the V (Value) channel
       - The image is converted back to RGB space and returned
    * For RGBA images, the original alpha channel is removed.
    * The CLAHE algorithm relies on image blocks of equal size.  This may
      result in extra border pixels that would not be handled.  In that case,
      we pad the image with a repeat of the border pixels, apply the
      algorithm, and then trim the image to original size.

    References
    ----------
    .. [1] http://tog.acm.org/resources/GraphicsGems/gems.html#gemsvi
    .. [2] https://en.wikipedia.org/wiki/CLAHE#CLAHE
    """
    image = skimage.img_as_uint(image)
    image = rescale_intensity(image, out_range=(0, NR_OF_GREY - 1))
    out = _clahe(image, ntiles_x, ntiles_y, clip_limit * nbins, nbins)
    image[:out.shape[0], :out.shape[1]] = out
    image = skimage.img_as_float(image)
    return rescale_intensity(image)
开发者ID:JeanKossaifi,项目名称:scikit-image,代码行数:54,代码来源:_adapthist.py


示例13: _get_scalebar

 def _get_scalebar(self):
     """Get the length in pixels of the image scale bar"""
     box=(0,419,519,520) #row where scalebar exists
     im=self.crop_image(box=box, copy=True)
     im=skimage.img_as_float(im)
     im=exposure.rescale_intensity(im,in_range=(0.49,0.5)) #saturate black and white pixels
     im=exposure.rescale_intensity(im) #make sure they're black and white
     im=np.diff(im[0]) #1d numpy array, differences
     lim=[np.where(im>0.9)[0][0],
          np.where(im<-0.9)[0][0]] #first occurance of both cases
     assert len(lim)==2, 'Couldn\'t find scalebar'
     return lim[1]-lim[0]
开发者ID:gb119,项目名称:kermit,代码行数:12,代码来源:core.py


示例14: watershed

def watershed(image):
    """ the watershed algorithm """
    if len(image.shape) != 2:
        raise TypeError("The input image must be gray-scale ")

    h, w = image.shape
    image = cv2.equalizeHist(image)
    image = denoise_bilateral(image, sigma_range=0.1, sigma_spatial=10)
    image = rescale_intensity(image)
    image = img_as_ubyte(image)
    image = rescale_intensity(image)
    # com.debug_im(image)

    _, thres = cv2.threshold(image, 80, 255, cv2.THRESH_BINARY_INV)

    distance = ndi.distance_transform_edt(thres)
    local_maxi = peak_local_max(distance, indices=False,
                                labels=thres,
                                min_distance=5)

    # com.debug_im(thres)
    # implt = plt.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
    # plt.show()

    markers = ndi.label(local_maxi, np.ones((3, 3)))[0]
    labels = ws(-distance, markers, mask=thres)
    labels = np.uint8(labels)
    # result = np.round(255.0 / np.amax(labels) * labels).astype(np.uint8)
    # com.debug_im(result)

    segments = []
    for idx in range(1, np.amax(labels) + 1):

        indices = np.where(labels == idx)
        left = np.amin(indices[1])
        right = np.amax(indices[1])
        top = np.amin(indices[0])
        down = np.amax(indices[0])

        # region = labels[top:down, left:right]
        # m = (region > 0) & (region != idx)
        # region[m] = 0
        # region[region >= 1] = 1
        region = image[top:down, left:right]
        cont = Contour(mask=region)
        cont.lt = [left, top]
        cont.rb = [right, down]
        segments.append(cont)

    return segments
开发者ID:dangkhoasdc,项目名称:CellCounter,代码行数:50,代码来源:watershed.py


示例15: proc_mbi

def proc_mbi(imgarray):
    # Normalize image:
    img = img_as_float(imgarray,force_copy=True)
    # Image equalization (Contrast stretching):
    p2,p98 = np.percentile(img, (2,98))
    img = exposure.rescale_intensity(img, in_range=(p2, p98), out_range=(0, 1))
    # Gamma correction:
    #img = exposure.adjust_gamma(img, 0.5)
    # Or Sigmoid correction:
    img = exposure.adjust_sigmoid(img)
    
    print "Init Morph Proc..."
    sizes = range(2,40,5)
    angles = [0,18,36,54,72,90,108,126,144,162]
    szimg = img.shape
    all_thr = np.zeros((len(sizes),szimg[0], szimg[1])).astype('float64')
    all_dmp = np.zeros((len(sizes) - 1,szimg[0], szimg[1])).astype('float64')
    
    idx = 0
    for sz in sizes:
        print sz
        builds_by_size = np.zeros(szimg).astype('float64')
        for ang in angles:
            print ang
            stel = ia870.iaseline(sz, ang)
            oprec = opening_by_reconstruction(img, stel)
            thr = np.absolute(img-oprec)
            builds_by_size += thr
        all_thr[idx,:,:] = (builds_by_size / len(angles))
        if idx>0:
            all_dmp[idx-1,:,:] = all_thr[idx,:,:] - all_thr[idx-1,:,:]
        idx += 1
    mbi = np.mean(all_dmp, axis=0)
    return mbi
开发者ID:jorgeop27,项目名称:geospatial_analysis_toolbox,代码行数:34,代码来源:mbi.py


示例16: warp_rect

    def warp_rect(self, u_cont):
        pts = u_cont.reshape(4, 2)
        rect = np.zeros((4, 2), dtype="float32")

        s = pts.sum(axis=1)
        rect[0] = pts[np.argmin(s)]
        rect[2] = pts[np.argmax(s)]

        diff = np.diff(pts, axis=1)
        rect[1] = pts[np.argmin(diff)]
        rect[3] = pts[np.argmax(diff)]

        rect *= self.ratio

        (tl, tr, br, bl) = rect
        width_a = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
        width_b = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
        height_a = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
        height_b = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
        max_w = max(int(width_a), int(width_b))
        max_h = max(int(height_a), int(height_b))

        dst = np.array([
            [0, 0], [max_w - 1, 0], [max_w - 1, max_h - 1], [0, max_h - 1]],
            dtype="float32")

        m = cv2.getPerspectiveTransform(rect, dst)
        warp = cv2.warpPerspective(self.orig, m, (max_w, max_h))
        warp = exposure.rescale_intensity(warp, out_range=(0, 255))
        bop = 15
        light = 15
        return cv2.copyMakeBorder(warp, bop, bop, light, light, cv2.BORDER_CONSTANT, (255, 255, 0))
开发者ID:frc5431,项目名称:2016StrongholdAll,代码行数:32,代码来源:image_proc_3.py


示例17: print_hog_image

def print_hog_image(image):
    """
    image is expected to be in it's original format

    function prints hog image
    """
    print image.shape
    image = color.rgb2gray(image)

    fd, hog_image = hog(image, orientations=8, pixels_per_cell=(4, 4),
                        cells_per_block=(1, 1), visualise=True, normalise=True)
    print "finished hog..."
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True)

    ax1.axis('off')
    ax1.imshow(image, cmap=plt.cm.gray)
    ax1.set_title('Input image')
    ax1.set_adjustable('box-forced')

    # Rescale histogram for better display
    hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02))

    ax2.axis('off')
    ax2.imshow(hog_image_rescaled, cmap=plt.cm.gray)
    ax2.set_title('Histogram of Oriented Gradients')
    ax1.set_adjustable('box-forced')
    plt.show()
开发者ID:phrayezzen,项目名称:COMP540,代码行数:27,代码来源:hogify.py


示例18: embed

 def embed(self, img, payload, k = 6, tv_denoising_weight = 4, rescale = True):
     if len(payload) > self.max_payload:
         raise ValueError("payload too long")
     padded = bytearray(payload) + b"\x00" * (self.max_payload - len(payload))
     encoded = self.rscodec.encode(padded)
     
     if img.ndim == 2:
         output = self._embed(img, encoded, k)
     elif img.ndim == 3:
         output = numpy.zeros(img.shape, dtype=float)
         for i in range(img.shape[2]):
             output[:,:,i] = self._embed(img[:,:,i], encoded, k)
         #y, cb, cr = rgb_to_ycbcr(img)
         #y2 = self._embed(y, encoded, k)
         #cb = self._embed(cb, encoded, k)
         #cr = self._embed(cr, encoded, k)
         #y2 = rescale_intensity(y2, out_range = (numpy.min(y), numpy.max(y)))
         #Cb2 = rescale_intensity(Cb2, out_range = (numpy.min(Cb), numpy.max(Cb)))
         #Cr2 = rescale_intensity(Cr2, out_range = (numpy.min(Cr), numpy.max(Cr)))
         #output = ycbcr_to_rgb(y2, cb, cr)
     else:
         raise TypeError("img must be a 2d or 3d array")
     
     #if tv_denoising_weight > 0:
     #    output = tv_denoise(output, tv_denoising_weight)
     if rescale:
         output = rescale_intensity(output, out_range = (numpy.min(img), numpy.max(img)))
     #return toimage(output,cmin=0,cmax=255)
     return output
开发者ID:KWMalik,项目名称:tau,代码行数:29,代码来源:watermarker.py


示例19: plot_aop_rgb

def plot_aop_rgb(rgbArray,ext,ls_pct=5,plot_title=''):
    
    ''' read in and plot 3 bands of a reflectance array as an RGB image
    --------
    Parameters
    --------
        rgbArray: ndarray (m x n x 3)
            3-band array of reflectance values, created from stack_rgb
        ext: tuple
            Extent of reflectance data to be plotted (xMin, xMax, yMin, yMax) 
            Stored in metadata['spatial extent'] from aop_h5refl2array function
        ls_pct: integer or float, optional
            linear stretch percent
        plot_title: string, optional
            image title

    Returns 
    --------
        plots RGB image of 3 bands of reflectance data
    --------

    Examples:
    --------
    >>> plot_aop_rgb(SERCrgb,
                     sercMetadata['spatial extent'],
                     plot_title = 'SERC RGB')'''
    
    pLow, pHigh = np.percentile(rgbArray[~np.isnan(rgbArray)], (ls_pct,100-ls_pct))
    img_rescale = exposure.rescale_intensity(rgbArray, in_range=(pLow,pHigh))
    plt.imshow(img_rescale,extent=ext)
    plt.title(plot_title + '\n Linear ' + str(ls_pct) + '% Contrast Stretch'); 
    ax = plt.gca(); ax.ticklabel_format(useOffset=False, style='plain') 
    rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90) 
开发者ID:NEONInc,项目名称:NEON-Data-Skills,代码行数:33,代码来源:neon_aop_hyperspectral.py


示例20: saveimage_16bit

def saveimage_16bit(image,
                    fname='Test.tif',
                    folder=None,
                    rescale=True,
                    dtype=np.uint16,
                    imager=None):
    '''
    Saves an images as a 16 bit tiff
    '''

    # rotate the reverse direction
    image = tf.rotate(image, -1 * _imager_rot[imager])

    # if scaled to 0,1 then rescale back to 16 bit
    if rescale:
        # print 'rescaled'
        image = rescale_intensity(
            image, in_range=(0, 1), out_range=(0, 2**16))

    # Ensureing all the values are integers
    image = image.astype(dtype)

    folder = folder or ''

    image = io.imsave(
        os.path.join(folder, fname), image)
开发者ID:MK8J,项目名称:PV_analysis,代码行数:26,代码来源:IO.py



注:本文中的skimage.exposure.rescale_intensity函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python feature.blob_log函数代码示例发布时间:2022-05-27
下一篇:
Python exposure.histogram函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap