• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.uint8函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.uint8函数的典型用法代码示例。如果您正苦于以下问题:Python uint8函数的具体用法?Python uint8怎么用?Python uint8使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了uint8函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: write

    def write(self, struct_name, data_dict):
        """write data_dict under the group struct_name in the open hdf5 file

        :param struct_name: the identificatioon of the structure to write in the hdf5
        :param data_dict: The python dictionnary containing the informations to write
        """
        if self.file is None:
            info = "No file currently open"
            logger.info(info)
            return

        group_l1 = self.file.create_group(struct_name)
        group_l1.attrs['OCTAVE_GLOBAL'] = np.uint8(1)
        group_l1.attrs['OCTAVE_NEW_FORMAT'] = np.uint8(1)
        group_l1.create_dataset("type", data=np.string_('scalar struct'), dtype="|S14")
        group_l2 = group_l1.create_group('value')
        for ftparams in data_dict:
            group_l3 = group_l2.create_group(ftparams)
            group_l3.attrs['OCTAVE_NEW_FORMAT'] = np.uint8(1)
            if type(data_dict[ftparams]) == str:
                group_l3.create_dataset("type", (), data=np.string_('sq_string'), dtype="|S10")
                if self.octave_targetted_version < 3.8:
                    group_l3.create_dataset("value", data=np.string_(data_dict[ftparams] + '0'))
                else:
                    group_l3.create_dataset("value", data=np.string_(data_dict[ftparams]))
            else:
                group_l3.create_dataset("type", (), data=np.string_('scalar'), dtype="|S7")
                group_l3.create_dataset("value", data=data_dict[ftparams])
开发者ID:vasole,项目名称:silx,代码行数:28,代码来源:octaveh5.py


示例2: __init__

    def __init__(self, mode='dummy', address=None, high_duration=0.001,
                 verbose=None):
        self._stamp_trigger = self._parallel_trigger
        if mode == 'parallel':
            if 'Linux' in platform.system():
                address = '/dev/parport0' if address is None else address
                import parallel as _p
                self._port = _p.Parallel(address)
                self._set_data = self._port.setData
            elif 'Windows' in platform.system():
                from ctypes import windll
                if not hasattr(windll, 'inpout32'):
                    raise SystemError('Must have inpout32 installed')

                addr = 0x0378 if address is None else address
                base = int(addr, 16) if addr[:2] == '0x' else addr
                self._port = windll.inpout32
                mask = np.uint8(1 << 5 | 1 << 6 | 1 << 7)
                # Use ECP to put the port into byte mode
                val = int((self._port.Inp32(base + 0x402) & ~mask) | (1 << 5))
                self.port.Out32(base + 0x402, val)

                # Now to make sure the port is in output mode we need to make
                # sure that bit 5 of the control register is not set
                val = int(self._port.Inp32(base + 2) & ~np.uint8(1 << 5))
                self._port.Out32(base + 2, val)

                def _set_data(data):
                    return self._port.Out32(base, data)
                self._set_data = _set_data
            else:
                raise NotImplementedError
        else:  # mode == 'dummy':
            self._stamp_trigger = self._dummy_trigger
        self.high_duration = high_duration
开发者ID:kdmarrett,项目名称:expyfun,代码行数:35,代码来源:_trigger_controllers.py


示例3: process

    def process(self):
        # capture
        ret, frame = self.cam.read()
        #frame = self.first
        frame = cv.resize(frame, None, fx=0.5, fy=0.5)
        frame = cv.flip(frame, 1)
        self.orig_frame = frame

        for click in (c for c in list(zip(self.col_defs, self.col_poss,
                                          self.col_disps))
                    if (c[0] != self.undefined_col).all()
                        and (c[1] != self.undefined_pos).all()):
            col = click[0]
            pos = click[1]
            disp = click[2]
            thresh = self.thresh_slide.get()
            hlower = (col[0, 0, 0] - thresh) % 180
            hupper = (col[0, 0, 0] + thresh) % 180

            # in case we have gone under 0 or over 180
            invert = hlower > hupper
            if invert:
                hlower, hupper = hupper, hlower
            lower = np.uint8([[[hlower, 20, 20]]])
            upper = np.uint8([[[hupper, 255, 255]]])

            hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
            mask = cv.inRange(hsv, lower, upper)
            if not invert:
                mask = cv.bitwise_not(mask)
            frame = cv.bitwise_and(frame, frame, mask=mask)
        self.frame = frame

        self.root.after(50, self.process)
开发者ID:jcuroboclub,项目名称:Robot-Tracker,代码行数:34,代码来源:main.py


示例4: testInt

    def testInt(self):
        num = np.int(2562010)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(127)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(2562010)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(2562010)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.int64(2562010)
        self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(255)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(2562010)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(2562010)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        num = np.uint64(2562010)
        self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
开发者ID:paddymul,项目名称:pandas,代码行数:27,代码来源:test_ujson.py


示例5: crop_waffle

def crop_waffle(img):
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    greyscale = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    lower_yellow = np.array([0,50,50])
    upper_yellow = np.array([70,255,255])
    mask = cv2.inRange(hsv, np.uint8(lower_yellow), np.uint8(upper_yellow))
    kernel = np.ones((9,9),np.uint8)
    closed_mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)
    masked_img = cv2.bitwise_and(greyscale,greyscale,mask = closed_mask)
    [contours,hiearchy] = cv2.findContours(masked_img,cv.CV_RETR_EXTERNAL,cv.CV_CHAIN_APPROX_SIMPLE)
    #now find the largest contour
    max_area = 0
    max_contour = None
    for c in contours:
        #we change datatypes from numpy arrays to cv arrays and back because contour area only takes cv arrays.
        c = cv.fromarray(c)
        if cv.ContourArea(c) > max_area:
            max_contour = c
            max_area = cv.ContourArea(c)
    max_contour = np.asarray(max_contour)
    shape = img.shape
    largest_blob_mask = np.zeros((shape[0],shape[1],1),np.uint8)
    cv2.fillPoly(largest_blob_mask, pts =[max_contour], color=(255,255,255))
    print_rgb_hist(img,largest_blob_mask)
    return cv2.bitwise_and(img,img, mask= largest_blob_mask)
开发者ID:greenteawarrior,项目名称:readml-cv,代码行数:25,代码来源:ExtractWaffles.py


示例6: write_sequence_file

def write_sequence_file(awgData, fileName, miniLLRepeat=1):
	'''
	Main function to pack channel LLs into an APS h5 file.
	'''
	#Preprocess the sequence data to handle APS restrictions
	LLs12, repeat12, wfLib12 = preprocess(awgData['ch12']['linkList'],
		                                      awgData['ch12']['wfLib'],
		                                      awgData['ch12']['correctionT'])
	LLs34, repeat34, wfLib34 = preprocess(awgData['ch34']['linkList'],
		                                      awgData['ch34']['wfLib'],
		                                      awgData['ch34']['correctionT'])
	assert repeat12 == repeat34, 'Failed to unroll sequence'
	if repeat12 != 0:
		miniLLRepeat *= repeat12

	#Merge the the marker data into the IQ linklists
	merge_APS_markerData(LLs12, awgData['ch1m1']['linkList'], 1)
	merge_APS_markerData(LLs12, awgData['ch2m1']['linkList'], 2)
	merge_APS_markerData(LLs34, awgData['ch3m1']['linkList'], 1)
	merge_APS_markerData(LLs34, awgData['ch4m1']['linkList'], 2)

	#Open the HDF5 file
	if os.path.isfile(fileName):
		os.remove(fileName)
	with h5py.File(fileName, 'w') as FID:

		#List of which channels we have data for
		#TODO: actually handle incomplete channel data
		channelDataFor = [1,2] if LLs12 else []
		channelDataFor += [3,4] if LLs34 else []
		FID['/'].attrs['Version'] = 2.1
		FID['/'].attrs['channelDataFor'] = np.uint16(channelDataFor)
		FID['/'].attrs['miniLLRepeat'] = np.uint16(miniLLRepeat - 1)

		#Create the waveform vectors
		wfInfo = []
		for wfLib in (wfLib12, wfLib34):
			wfInfo.append(create_wf_vector({key:wf.real for key,wf in wfLib.items()}))
			wfInfo.append(create_wf_vector({key:wf.imag for key,wf in wfLib.items()}))

		LLData = [LLs12, LLs34]
		repeats = [0, 0]
		#Create the groups and datasets
		for chanct in range(4):
			chanStr = '/chan_{0}'.format(chanct+1)
			chanGroup = FID.create_group(chanStr)
			chanGroup.attrs['isIQMode'] = np.uint8(1)
			#Write the waveformLib to file
			FID.create_dataset('{0}/waveformLib'.format(chanStr), data=wfInfo[chanct][0])

			#For A channels (1 & 3) we write link list data if we actually have any
			if (np.mod(chanct,2) == 0) and LLData[chanct//2]:
				groupStr = chanStr+'/linkListData'
				LLGroup = FID.create_group(groupStr)
				LLDataVecs, numEntries = create_LL_data(LLData[chanct//2], wfInfo[chanct][1], os.path.basename(fileName))
				LLGroup.attrs['length'] = numEntries
				for key,dataVec in LLDataVecs.items():
					FID.create_dataset(groupStr+'/' + key, data=dataVec)
			else:
				chanGroup.attrs['isLinkListData'] = np.uint8(0)
开发者ID:ahelsing,项目名称:QGL,代码行数:60,代码来源:APSPattern.py


示例7: partition_FOV_KMeans

    def partition_FOV_KMeans(self,tradeoff_weight=.5,fx=.25,fy=.25,n_clusters=4,max_iter=500):
        """
        Partition the FOV in clusters that are grouping pixels close in space and in mutual correlation

        Parameters
        ------------------------------
        tradeoff_weight:between 0 and 1 will weight the contributions of distance and correlation in the overall metric
        fx,fy: downsampling factor to apply to the movie
        n_clusters,max_iter: KMeans algorithm parameters

        Outputs
        -------------------------------
        fovs:array 2D encoding the partitions of the FOV
        mcoef: matric of pairwise correlation coefficients
        distanceMatrix: matrix of picel distances

        Example

        """

        _,h1,w1=self.shape
        self.resize(fx,fy)
        T,h,w=self.shape
        Y=np.reshape(self,(T,h*w))
        mcoef=np.corrcoef(Y.T)
        idxA,idxB =  np.meshgrid(list(range(w)),list(range(h)));
        coordmat=np.vstack((idxA.flatten(),idxB.flatten()))
        distanceMatrix=euclidean_distances(coordmat.T);
        distanceMatrix=old_div(distanceMatrix,np.max(distanceMatrix))
        estim=KMeans(n_clusters=n_clusters,max_iter=max_iter);
        kk=estim.fit(tradeoff_weight*mcoef-(1-tradeoff_weight)*distanceMatrix)
        labs=kk.labels_
        fovs=np.reshape(labs,(h,w))
        fovs=cv2.resize(np.uint8(fovs),(w1,h1),old_div(1.,fx),old_div(1.,fy),interpolation=cv2.INTER_NEAREST)
        return np.uint8(fovs), mcoef, distanceMatrix
开发者ID:agiovann,项目名称:Constrained_NMF,代码行数:35,代码来源:movies.py


示例8: on_epoch_end

    def on_epoch_end(self, callback_data, model, epoch):
        # convert to numpy arrays
        data_batch = model.data_batch.get()
        noise_batch = model.noise_batch.get()
        # value transform
        data_batch = self._value_transform(data_batch)
        noise_batch = self._value_transform(noise_batch)
        # shape transform
        data_canvas = self._shape_transform(data_batch)
        noise_canvas = self._shape_transform(noise_batch)
        # plotting options
        im_args = dict(interpolation="nearest", vmin=0., vmax=1.)
        if self.nchan == 1:
            im_args['cmap'] = plt.get_cmap("gray")
        fname = self.filename+'_data_'+'{:03d}'.format(epoch)+'.png'
        Image.fromarray(np.uint8(data_canvas*255)).convert('RGB').save(fname)
        fname = self.filename+'_noise_'+'{:03d}'.format(epoch)+'.png'
        Image.fromarray(np.uint8(noise_canvas*255)).convert('RGB').save(fname)

        # plot logged WGAN costs if logged
        if model.cost.costfunc.func == 'wasserstein':
            giter = callback_data['gan/gen_iter'][:]
            nonzeros = np.where(giter)
            giter = giter[nonzeros]
            cost_dis = callback_data['gan/cost_dis'][:][nonzeros]
            w_dist = medfilt(np.array(-cost_dis, dtype='float64'), kernel_size=101)
            plt.figure(figsize=(400/self.dpi, 300/self.dpi), dpi=self.dpi)
            plt.plot(giter, -cost_dis, 'k-', lw=0.25)
            plt.plot(giter, w_dist, 'r-', lw=2.)
            plt.title(self.filename, fontsize=self.font_size)
            plt.xlabel("Generator Iterations", fontsize=self.font_size)
            plt.ylabel("Wasserstein estimate", fontsize=self.font_size)
            plt.margins(0, 0, tight=True)
            plt.savefig(self.filename+'_training.png', bbox_inches='tight')
            plt.close()
开发者ID:NervanaSystems,项目名称:neon,代码行数:35,代码来源:plotting_callbacks.py


示例9: calc_chans

    def calc_chans(self):
        """Calculate lockout and inclusion chan neighbourhoods, max number of chans to use,
        and define the spike record dtype"""
        sort = self.sort
        self.enabledSiteLoc = {}
        for chan in self.chans: # for all enabled chans
            self.enabledSiteLoc[chan] = sort.stream.probe.SiteLoc[chan] # get its (x, y)
        # distance matrix for the chans enabled for this search, sorted by chans:
        self.dm = DistanceMatrix(self.enabledSiteLoc)
        # dict of neighbourhood of chanis for each chani
        self.locknbhdi = {} # for lockout around a spike
        self.inclnbhdi = {} # for inclusion of wavedata as part of a spike
        maxnchansperspike = 0
        for chani, distances in enumerate(self.dm.data): # iterate over rows of distances
            # at what col indices does the returned row fall within lockr?:
            lockchanis, = np.uint8(np.where(distances <= self.lockr))
            # at what col indices does the returned row fall within inclr?:
            inclchanis, = np.uint8(np.where(distances <= self.inclr))
            self.locknbhdi[chani] = lockchanis
            self.inclnbhdi[chani] = inclchanis
            maxnchansperspike = max(maxnchansperspike, len(inclchanis))
        self.maxnchansperspike = maxnchansperspike

        self.SPIKEDTYPE = [('id', np.int32), ('nid', np.int16),
                           ('chan', np.uint8), ('nchans', np.uint8),
                           ('chans', np.uint8, self.maxnchansperspike),
                           ('chani', np.uint8),
                           ('t', np.int64), ('t0', np.int64), ('t1', np.int64),
                           ('dt', np.int16), # time between peaks, in us
                           ('tis', np.uint8, (self.maxnchansperspike, 2)), # peak positions
                           ('aligni', np.uint8),
                           ('V0', np.float32), ('V1', np.float32), ('Vpp', np.float32),
                           ('x0', np.float32), ('y0', np.float32),
                           ('sx', np.float32), ('sy', np.float32),
                           ]
开发者ID:nhazar,项目名称:spyke,代码行数:35,代码来源:detect.py


示例10: explain

def explain(model, img, topLabels, numSamples, numFeatures, hideRest, hideColor, positiveOnly):
            
    img, oldImg = transform_img_fn(img)
    img = img*(1./255)
    prediction = model.predict(img)
    explainer = lime_image.LimeImageExplainer()
    img = np.squeeze(img)
    explanation = explainer.explain_instance(img, model.predict, top_labels=topLabels, hide_color=hideColor, num_samples=numSamples)
    temp, mask = explanation.get_image_and_mask(getTopPrediction(prediction[0]), positive_only=positiveOnly, num_features=numFeatures, hide_rest=hideRest)
    tempMask = mask * 255
    temp = Image.fromarray(np.uint8(tempMask))
    temp = temp.resize((oldImg.width, oldImg.height))
    temp = image.img_to_array(temp)
    temp = temp * 1./255
    temp = temp.astype(np.int64)
    temp = np.squeeze(temp)
    oldImgArr = image.img_to_array(oldImg)
    oldImgArr = oldImgArr * (1./255)
    oldImgArr = oldImgArr.astype(np.float64)
    imgExplained = mark_boundaries(oldImgArr, temp)
    imgFinal = np.uint8(imgExplained*255)
    img = Image.fromarray(imgFinal)
    imgByteArr = io.BytesIO()
    img.save(imgByteArr, format='JPEG')
    imgByteArr = imgByteArr.getvalue()


    return imgByteArr
开发者ID:tobiasbaur,项目名称:nova,代码行数:28,代码来源:ImageExplainerLime.py


示例11: grabCut

def grabCut(img, rect=None, mask=None, ite=5):
    height, width, channels = img.shape
    # if no arguments, try to segment using a large rectangle
    if rect == None and mask == None:
        rect = (int(width*0.15), 15, int(width*0.85), height-15)
        initOpt = cv2.GC_INIT_WITH_RECT
    # if rectangle argument but no mask, init mask with rectangle
    elif mask == None:
        mask = np.zeros((height, width), np.uint8)
        initOpt = cv2.GC_INIT_WITH_RECT
    # if mask argument but no rectangle, use mask and let rect to None
    elif rect == None:
        initOpt = cv2.GC_INIT_WITH_MASK
        rect = (0, 0, width, height)
        mask = np.uint8(mask)
    # if mask argument and rectangle, set pixels outside the mask as background
    else:
        mask = np.uint8(mask)
        rect = rectangleutil.checkRectangleBounds(rect, mask.shape)
        maskRect = rectangleutil.rectangle2mask(rect, mask.shape)
        mask[maskRect == 0] = cv2.GC_BGD
        initOpt = cv2.GC_INIT_WITH_MASK
    #imageblured = np.zeros(img.shape, img.dtype)
    #cv2.smooth(img, imageblured, cv.CV_GAUSSIAN, 5)
    tmp1 = np.zeros((1, 13 * 5))
    tmp2 = np.zeros((1, 13 * 5))
    cv2.grabCut(img, mask, rect, tmp1, tmp2, ite, initOpt)
    mask[mask == cv2.GC_BGD] = 0
    mask[mask == cv2.GC_PR_BGD] = 0
    mask[mask == cv2.GC_FGD] = 255
    mask[mask == cv2.GC_PR_FGD] = 255
    return mask
开发者ID:Tug,项目名称:image-extraction-server,代码行数:32,代码来源:labelize.py


示例12: __init__

	def __init__(self):
		self.stack = np.zeros(STACK_SIZE, dtype=np.uint8)
		self.sp = np.uint8(0)
		self.pc = np.uint8(0)
		self.s_regs = np.zeros(8, dtype=np.uint8)
		self.m_regs = np.zeros(8, dtype=np.uint8)
		self.progmem = np.zeros(PROG_SIZE, dtype=np.uint8)
开发者ID:mitre-cyber-academy,项目名称:2013-grabbag-500,代码行数:7,代码来源:vm.py


示例13: get_depth

def get_depth():
    """
    Returns numpy ndarrays representing the raw and ranged depth images.
    Outputs:
        dmap:= distancemap in mm, 1L ndarray, dtype=uint16, min=0, max=2**12-1
        d4d := depth for dislay, 3L ndarray, dtype=uint8, min=0, max=255    
    Note1: 
        fromstring is faster than asarray or frombuffer
    Note2:     
        .reshape(120,160) #smaller image for faster response 
                OMAP/ARM default video configuration
        .reshape(240,320) # Used to MATCH RGB Image (OMAP/ARM)
                Requires .set_video_mode
    """
    dmap = np.fromstring(depth_stream.read_frame().get_buffer_as_uint16(),dtype=np.uint16).reshape(h,w)  # Works & It's FAST
    d4d  = dmap.astype(float) *255/ 2**12-1 # Correct the range. Depth images are 12bits
    d4d  = cv2.cvtColor(np.uint8(d4d),cv2.COLOR_GRAY2RGB)
    
    temp1 = np.zeros(dmap.shape,dtype=np.uint8)
    temp1 = dmap - 2**8 #most significant: 2**8-2**15

    temp2 = dmap.copy() #least significant: 2**0-2**7
    temp1[temp1<0]=0
    temp2[temp2>255] = 0
    dmap = np.uint8(np.dstack((temp2,temp1,temp2)))
    #print dmap.shape, type(dmap), dmap.dtype
    return dmap, d4d
开发者ID:elmonkey,项目名称:MICU,代码行数:27,代码来源:dev3_videos.py


示例14: save_frame_and_response_map

def save_frame_and_response_map(frame, bbox, fig_n, crop_x, score, writer, fig):
    # fig = plt.figure(fig_n)
    plt.clf()
    ax = fig.add_subplot(131)
    ax.set_title('Tracked sequence')
    r = patches.Rectangle((bbox[0],bbox[1]), bbox[2], bbox[3], linewidth=2, edgecolor='r', fill=False)
    ax.imshow(np.uint8(frame))
    ax.add_patch(r)
    ax2 = fig.add_subplot(132)
    ax2.set_title('Context region')
    ax2.imshow(np.uint8(crop_x))
    ax2.spines['left'].set_position('center')
    ax2.spines['right'].set_color('none')
    ax2.spines['bottom'].set_position('center')
    ax2.spines['top'].set_color('none')
    ax2.set_yticklabels([])
    ax2.set_xticklabels([])
    ax3 = fig.add_subplot(133)
    ax3.set_title('Response map')
    ax3.spines['left'].set_position('center')
    ax3.spines['right'].set_color('none')
    ax3.spines['bottom'].set_position('center')
    ax3.spines['top'].set_color('none')
    ax3.set_yticklabels([])
    ax3.set_xticklabels([])
    ax3.imshow(np.uint8(score))

    # ax3.grid()
    writer.grab_frame()
开发者ID:vero1925,项目名称:Pytorch-SiamFC,代码行数:29,代码来源:visualization.py


示例15: array_colorkey

def array_colorkey (surface):
    """pygame.numpyarray.array_colorkey (Surface): return array

    copy the colorkey values into a 2d array

    Create a new array with the colorkey transparency value from each
    pixel. If the pixel matches the colorkey it will be fully
    tranparent; otherwise it will be fully opaque.

    This will work on any type of Surface format. If the image has no
    colorkey a solid opaque array will be returned.

    This function will temporarily lock the Surface as pixels are
    copied.
    """
    colorkey = surface.get_colorkey ()
    if colorkey == None:
        # No colorkey, return a solid opaque array.
        array = numpy.empty (surface.get_width () * surface.get_height (),
                             numpy.uint8)
        array.fill (0xff)
        array.shape = surface.get_width (), surface.get_height ()
        return array

    # Taken from from Alex Holkner's pygame-ctypes package. Thanks a
    # lot.
    array = array2d (surface)
    # Check each pixel value for the colorkey and mark it as opaque or
    # transparent as needed.
    val = surface.map_rgb (colorkey)
    array = numpy.choose (numpy.equal (array, val),
                          (numpy.uint8 (0xff), numpy.uint8 (0)))
    array.shape = surface.get_width (), surface.get_height ()
    return array
开发者ID:123jefferson,项目名称:MiniBloq-Sparki,代码行数:34,代码来源:_numpysurfarray.py


示例16: esquelet

def esquelet (image):

    img=np.uint8(image)
    mask = np.zeros(img.shape, dtype=np.uint8)
    img1 = np.uint8(img)
    size = np.size(img1)
    skel = np.zeros(img1.shape,np.uint8)
    element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
    done=False

    #skeletonization        

    while(not done):

        eroded = cv2.erode(img1,element)
        temp = cv2.dilate(eroded,element)
        temp = cv2.subtract(img1,temp)
        skel = cv2.bitwise_or(skel,temp)
        img1 = eroded.copy()

        zeros = size - cv2.countNonZero(img1)

        if zeros==size:

            done = True

    return [skel,img]
开发者ID:EMI2016,项目名称:LIDAR,代码行数:27,代码来源:todo6.py


示例17: draw_mask_on_image_array

def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
  """Draws mask on an image.

  Args:
    image: uint8 numpy array with shape (img_height, img_height, 3)
    mask: a uint8 numpy array of shape (img_height, img_height) with
      values between either 0 or 1.
    color: color to draw the keypoints with. Default is red.
    alpha: transparency value between 0 and 1. (default: 0.4)

  Raises:
    ValueError: On incorrect data type for image or masks.
  """
  if image.dtype != np.uint8:
    raise ValueError('`image` not of type np.uint8')
  if mask.dtype != np.uint8:
    raise ValueError('`mask` not of type np.uint8')
  if np.any(np.logical_and(mask != 1, mask != 0)):
    raise ValueError('`mask` elements should be in [0, 1]')
  if image.shape[:2] != mask.shape:
    raise ValueError('The image has spatial dimensions %s but the mask has '
                     'dimensions %s' % (image.shape[:2], mask.shape))
  rgb = ImageColor.getrgb(color)
  pil_image = Image.fromarray(image)

  solid_color = np.expand_dims(
      np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
  pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
  pil_mask = Image.fromarray(np.uint8(255.0*alpha*mask)).convert('L')
  pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
  np.copyto(image, np.array(pil_image.convert('RGB')))
开发者ID:zhangjiulong,项目名称:models,代码行数:31,代码来源:visualization_utils.py


示例18: test_valid

    def test_valid(self):
        prop = bcpp.Int()

        assert prop.is_valid(None)

        assert prop.is_valid(0)
        assert prop.is_valid(1)

        assert prop.is_valid(np.int8(0))
        assert prop.is_valid(np.int8(1))
        assert prop.is_valid(np.int16(0))
        assert prop.is_valid(np.int16(1))
        assert prop.is_valid(np.int32(0))
        assert prop.is_valid(np.int32(1))
        assert prop.is_valid(np.int64(0))
        assert prop.is_valid(np.int64(1))
        assert prop.is_valid(np.uint8(0))
        assert prop.is_valid(np.uint8(1))
        assert prop.is_valid(np.uint16(0))
        assert prop.is_valid(np.uint16(1))
        assert prop.is_valid(np.uint32(0))
        assert prop.is_valid(np.uint32(1))
        assert prop.is_valid(np.uint64(0))
        assert prop.is_valid(np.uint64(1))

        # TODO (bev) should fail
        assert prop.is_valid(False)
        assert prop.is_valid(True)
开发者ID:jakirkham,项目名称:bokeh,代码行数:28,代码来源:test_primitive.py


示例19: pred

def pred():
    keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
    image = tf.placeholder(tf.float32, shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, 3], name="input_image")
    annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_HEIGHT, IMAGE_WIDTH, 1], name="annotation")

    pred_annotation, logits = inference(image, keep_probability)
    test_dataset_reader = TestDataset('data/testlist.mat')
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
        saver = tf.train.Saver()
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print("Model restored...")
        itr = 0
        test_images, test_annotations, test_orgs = test_dataset_reader.next_batch()
        #print('getting', test_annotations[0, 200:210, 200:210])
        if len(test_annotations) > 0:
            feed_dict = {image: test_images, annotation: test_annotations, keep_probability: 0.5}
            preds = sess.run(pred_annotation, feed_dict=feed_dict)
            org0_im = Image.fromarray(np.uint8(test_orgs[0]))
            org0_im.save('res/org0.jpg')
            org1_im = Image.fromarray(np.uint8(test_orgs[1]))
            org1_im.save('res/org1.jpg')
            save_alpha_img(test_orgs[0], test_annotations[0], 'res/ann0')
            save_alpha_img(test_orgs[1], test_annotations[1], 'res/ann1')
            save_alpha_img(test_orgs[0], preds[0], 'res/pre0')
            save_alpha_img(test_orgs[1], preds[1], 'res/pre1')
开发者ID:Selimam,项目名称:AutoPortraitMatting,代码行数:28,代码来源:FCN.py


示例20: shift2

def shift2(im):
    w,h=im.shape
    w2=np.uint8(w/2)
    h2=np.uint8(h/2)
    im1=np.vstack((im[w2:,:],im[:w2,:]))
    im2=np.hstack((im1[:,h2:],im1[:,:h2]))
    return im2
开发者ID:xingnix,项目名称:learning,代码行数:7,代码来源:frequency.py



注:本文中的numpy.uint8函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.unicode_函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.uint64函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap