• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.ndarray函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.ndarray函数的典型用法代码示例。如果您正苦于以下问题:Python ndarray函数的具体用法?Python ndarray怎么用?Python ndarray使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ndarray函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: produce_optimization_sets

    def produce_optimization_sets(self, train, test_samples=None):

        if test_samples == 0:
            return [train, numpy.ndarray([0, 0]), 0]

        test_size = int(round(train.shape[0] / 10))

        if test_samples is None:
            test_samples = random.sample(xrange(0, train.shape[0] - 1), test_size)

        train_index = 0
        test_index = 0

        train_result = numpy.ndarray([train.shape[0] - test_size, train.shape[1]], dtype=theano.config.floatX)
        test_result = numpy.ndarray([test_size, train.shape[1]], dtype=theano.config.floatX)

        for i in xrange(train.shape[0]):

            if i in test_samples:
                test_result[test_index, :] = train[i, :]
                test_index += 1
            else:
                train_result[train_index, :] = train[i, :]
                train_index += 1

        return [train_result, test_result, test_samples]
开发者ID:aviveise,项目名称:double_encoder,代码行数:26,代码来源:dataset_base.py


示例2: __init__

    def __init__(self, n_in, n_out, weights=None,
                 activation='sigmoid', is_classifier_layer=False):

        # Get activation function from string
        self.activation_string = activation
        self.activation = Activation.get_activation(self.activation_string)
        self.activation_derivative = Activation.get_derivative(
                                    self.activation_string)

        self.n_in = n_in
        self.n_out = n_out

        self.inp = np.ndarray(n_in + 1)
        self.inp[0] = 1
        self.outp = np.ndarray(n_out)
        self.deltas = np.zeros(n_out)

        # You can have better initialization here
        if weights is None:
            self.weights = np.random.rand(n_in + 1, n_out) / 10 - 0.05

            # Adjust weights to zero mean
            for i in range(n_out):
                self.weights[:][i] -= (sum(self.weights[:][i]) / len(self.weights[:][i]))
        else:
            assert(weights.shape == (n_in + 1, n_out))
            self.weights = weights

        self.is_classifier_layer = is_classifier_layer

        # Some handy properties of the layers
        self.size = self.n_out
        self.shape = self.weights.shape
开发者ID:ISibboI,项目名称:NNPraktikum,代码行数:33,代码来源:logistic_layer.py


示例3: generate_performance_results

 def generate_performance_results(self, data, configs):
     num_folds = len(self.fold_data)
     num_pred = len(self.fold_data[0].test_predicted_values)
     num_actual = len(self.fold_data[0].test_actual_values)
     train_perf = np.ndarray(num_folds)
     test_perf = np.ndarray(num_folds)
     test_predicted = np.ones((num_folds,num_pred,))
     test_actual = np.ones((num_folds,num_actual,))
     for index, fold in enumerate(self.fold_data):
         # train_targets = self.train_targets[index]
         # test_targets = self.test_targets[index]
         train_predicted = fold.train_predicted_values
         train_actual = fold.train_actual_values
         test_predicted[index] = fold.test_predicted_values
         test_actual[index] = fold.test_actual_values
         results_loss_function = configs.results_loss_function
         train_target_ids = data.get_target_ids(fold.train_inds)
         test_target_ids = data.get_target_ids(fold.test_inds)
         train_perf[index] = LossFunction.compute_loss_function(train_predicted,
                                                                train_actual,
                                                                train_target_ids,
                                                                results_loss_function)
         test_perf[index] = LossFunction.compute_loss_function(test_predicted[index],
                                                               test_actual[index],
                                                               test_target_ids,
                                                               results_loss_function)
         self.fold_data[index].train_error = train_perf[index]
         self.fold_data[index].test_error = test_perf[index]
     self.test_actual = test_actual
     self.train_actual = train_actual
     self.test_predicted = test_predicted
     self.train_predicted = train_predicted
开发者ID:eracah,项目名称:CASP-ML-UC-DAVIS,代码行数:32,代码来源:Results.py


示例4: get_synthetic_warped_circle

def get_synthetic_warped_circle(nslices):
    #get a subsampled circle
    fname_cicle = get_data('reg_o')
    circle = np.load(fname_cicle)[::4,::4].astype(floating)

    #create a synthetic invertible map and warp the circle
    d, dinv = vfu.create_harmonic_fields_2d(64, 64, 0.1, 4)
    d = np.asarray(d, dtype=floating)
    dinv = np.asarray(dinv, dtype=floating)
    mapping = DiffeomorphicMap(2, (64, 64))
    mapping.forward, mapping.backward = d, dinv
    wcircle = mapping.transform(circle)

    if(nslices == 1):
        return circle, wcircle

    #normalize and form the 3d by piling slices
    circle = (circle-circle.min())/(circle.max() - circle.min())
    circle_3d = np.ndarray(circle.shape + (nslices,), dtype=floating)
    circle_3d[...] = circle[...,None]
    circle_3d[...,0] = 0
    circle_3d[...,-1] = 0

    #do the same with the warped circle
    wcircle = (wcircle-wcircle.min())/(wcircle.max() - wcircle.min())
    wcircle_3d = np.ndarray(wcircle.shape + (nslices,), dtype=floating)
    wcircle_3d[...] = wcircle[...,None]
    wcircle_3d[...,0] = 0
    wcircle_3d[...,-1] = 0

    return circle_3d, wcircle_3d
开发者ID:JohnGriffiths,项目名称:dipy,代码行数:31,代码来源:test_imwarp.py


示例5: AllocateRAM

	def AllocateRAM (self, verbose = True):

		ramsz = np.prod(self.dims) * self.ds / MB

		if (verbose):
			print '  Allocating %.1f MB of RAM' % ramsz

			print '    Data (dims: %(a)d %(b)d %(c)d %(d)d %(e)d %(f)d %(g)d %(h)d %(i)d %(j)d %(k)d %(l)d %(m)d %(n)d %(o)d %(p)d)' % {
				"a": self.dims[ 0], "b": self.dims[ 1], "c": self.dims[ 2], "d": self.dims[ 3], "e": self.dims[ 4], "f": self.dims[ 5],
				"g": self.dims[ 6],	"h": self.dims[ 7], "i": self.dims[ 8], "j": self.dims[ 9], "k": self.dims[10], "l": self.dims[11],
				"m": self.dims[12], "n": self.dims[13],	"o": self.dims[14], "p": self.dims[15]}
			
			print '    Noise (dims: %(a)d %(b)d %(c)d %(d)d %(e)d %(f)d %(g)d %(h)d %(i)d %(j)d %(k)d %(l)d %(m)d %(n)d %(o)d %(p)d)' % {
				"a": self.noisedims[ 0], "b": self.noisedims[ 1], "c": self.noisedims[ 2], "d": self.noisedims[ 3], "e": self.noisedims[ 4], "f": self.noisedims[ 5],
				"g": self.noisedims[ 6],	"h": self.noisedims[ 7], "i": self.noisedims[ 8], "j": self.noisedims[ 9], "k": self.noisedims[10], "l": self.noisedims[11],
				"m": self.noisedims[12], "n": self.noisedims[13],	"o": self.noisedims[14], "p": self.noisedims[15]}
			
			if (self.syncdims[0]):
				print '    SyncData (dims: %(a)d %(b)d )' % {"a": self.syncdims[0], "b": self.syncdims[1]}

		self.data = np.ndarray(shape=self.dims, dtype=self.dt)
		self.sync = np.ndarray(shape=self.syncdims, dtype=self.sddt)
		self.noise = np.ndarray(shape=filter(lambda x:x>0,self.noisedims), dtype=self.nddt)

		if (verbose):
			print ("    ... done.\n" % ramsz)

		return
开发者ID:kvahed,项目名称:PyRawReader,代码行数:28,代码来源:rawparser.py


示例6: load

def load(data_folders, min_num_images, max_num_images):
  dataset = np.ndarray(
    shape=(max_num_images, image_size, image_size), dtype=np.float32)
  labels = np.ndarray(shape=(max_num_images), dtype=np.int32)
  label_index = 0
  image_index = 0
  for folder in data_folders:
    print(folder)
    for image in os.listdir(folder):
      if image_index >= max_num_images:
        raise Exception('More images than expected: %d >= %d' % (
          num_images, max_num_images))
      image_file = os.path.join(folder, image)
      try:
        image_data = (ndimage.imread(image_file).astype(float) -
                      pixel_depth / 2) / pixel_depth
        if image_data.shape != (image_size, image_size):
          raise Exception('Unexpected image shape: %s' % str(image_data.shape))
        dataset[image_index, :, :] = image_data
        labels[image_index] = label_index
        image_index += 1
      except IOError as e:
        print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
    label_index += 1
  num_images = image_index
  dataset = dataset[0:num_images, :, :]
  labels = labels[0:num_images]
  if num_images < min_num_images:
    raise Exception('Many fewer images than expected: %d < %d' % (
        num_images, min_num_images))
  print('Full dataset tensor:', dataset.shape)
  print('Mean:', np.mean(dataset))
  print('Standard deviation:', np.std(dataset))
  print('Labels:', labels.shape)
  return dataset, labels
开发者ID:shubhamchaudhary,项目名称:ud730,代码行数:35,代码来源:1_notmnist.py


示例7: dual_plot

 def dual_plot(self, n):
     fig = plt.figure()
     ax1 = fig.add_subplot(121, axisbg='k')
     ax1.set_xlabel("Theta")
     ax1.set_ylabel("R")
     ax1.imshow(self.data, cmap=cm.hot,origin="lower", \
                    extent=[0,2*np.pi,self.rmin,self.rmax])
     r = np.arange(self.rmin,self.rmax,(self.rmax-self.rmin)/self.nrad)
     t = np.arange(0.,2.*np.pi,2.*np.pi/self.nsec)
     x = np.ndarray([self.nrad*self.nsec], dtype = float)
     y = np.ndarray([self.nrad*self.nsec], dtype = float)
     z = np.ndarray([self.nrad*self.nsec], dtype = float)
     k = 0
     for i in range(self.nrad):
         for j in range(self.nsec):
             x[k] = r[i]*np.cos(t[j])
             y[k] = r[i]*np.sin(t[j])
             z[k] = self.data[i,j]
             k +=1
     xx = np.arange(-self.rmax, self.rmax, (self.rmax-self.rmin)/n)
     yy = np.arange(-self.rmax, self.rmax, (self.rmax-self.rmin)/n)
     zz = griddata(x,y,z,xx,yy)
     ax2 = fig.add_subplot(122, axisbg='k')
     ax2.set_xlabel("X")
     ax2.set_ylabel("Y")
     ax2.imshow(zz, cmap=cm.hot,origin="lower" \
                    , extent=[-self.rmax,self.rmax,-self.rmax,self.rmax])
     plt.show()
开发者ID:adamdempsey90,项目名称:fargo3d,代码行数:28,代码来源:grid_plot.py


示例8: get_label_voxels

    def get_label_voxels(self):
        #the voxel coordinates of fg and bg labels
        if not self.opLabelArray.NonzeroBlocks.ready():
            return (None,None)

        nonzeroSlicings = self.opLabelArray.NonzeroBlocks[:].wait()[0]
        
        coors1 = [[], [], []]
        coors2 = [[], [], []]
        for sl in nonzeroSlicings:
            a = self.opLabelArray.Output[sl].wait()
            w1 = numpy.where(a == 1)
            w2 = numpy.where(a == 2)
            w1 = [w1[i] + sl[i].start for i in range(1,4)]
            w2 = [w2[i] + sl[i].start for i in range(1,4)]
            for i in range(3):
                coors1[i].append( w1[i] )
                coors2[i].append( w2[i] )
        
        for i in range(3):
            if len(coors1[i]) > 0:
                coors1[i] = numpy.concatenate(coors1[i],0)
            else:
                coors1[i] = numpy.ndarray((0,), numpy.int32)
            if len(coors2[i]) > 0:
                coors2[i] = numpy.concatenate(coors2[i],0)
            else:
                coors2[i] = numpy.ndarray((0,), numpy.int32)
        return (coors2, coors1)
开发者ID:CVML,项目名称:ilastik,代码行数:29,代码来源:opCarving.py


示例9: shape_from_header

    def shape_from_header(self, hdr):
        '''Read the shape of the array described by the header.
        The file position after this call is unspecified.
        '''
        mclass = hdr.mclass
        if mclass == mxFULL_CLASS:
            shape = tuple(map(int, hdr.dims))
        elif mclass == mxCHAR_CLASS:
            shape = tuple(map(int, hdr.dims))
            if self.chars_as_strings:
                shape = shape[:-1]
        elif mclass == mxSPARSE_CLASS:
            dt = hdr.dtype
            dims = hdr.dims

            if not (len(dims) == 2 and dims[0] >= 1 and dims[1] >= 1):
                return ()

            # Read only the row and column counts
            self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)
            rows = np.ndarray(shape=(1,), dtype=dt,
                              buffer=self.mat_stream.read(dt.itemsize))
            self.mat_stream.seek(dt.itemsize * (dims[0] - 1), 1)
            cols = np.ndarray(shape=(1,), dtype=dt,
                              buffer=self.mat_stream.read(dt.itemsize))

            shape = (int(rows), int(cols))
        else:
            raise TypeError('No reader for class code %s' % mclass)

        if self.squeeze_me:
            shape = tuple([x for x in shape if x != 1])
        return shape
开发者ID:dyao-vu,项目名称:meta-core,代码行数:33,代码来源:mio4.py


示例10: generate_batch

def generate_batch(config, data, unique_neg_data):
    global batch_idx, data_idx

    batch_size = config['batch_size']
    neg_sample_size = config['neg_sample_size']

    batch = data.values()[batch_idx]
    neg_batch = unique_neg_data.values()[batch_idx]
    idx = data_idx[batch_idx]

    data_pos_x = np.ones(batch_size) * batch_idx
    data_pos_y = np.ndarray(shape=batch_size, dtype=np.int32)
    data_neg_y = np.ndarray(shape=neg_sample_size, dtype=np.int32)

    for i in xrange(batch_size):
        data_pos_y[i] = batch[idx]
        idx = (idx + 1) % len(batch)

    for i, neg_y_idx in enumerate(random.sample(set(neg_batch), neg_sample_size)):
        data_neg_y[i] = neg_y_idx

    data_idx[batch_idx] = idx
    batch_idx = (batch_idx + 1) % len(data)

    return data_pos_x, data_pos_y, data_neg_y
开发者ID:chagge,项目名称:practice-tensorflow,代码行数:25,代码来源:main2.py


示例11: load

def load(data_folders, min_num_images, max_num_images):
    dataset = np.ndarray(
        shape=(max_num_images, image_size, image_size), dtype=np.float32)
    labels = np.ndarray(shape=(max_num_images), dtype=np.int32)
    label_index = 0
    image_index = 0
    for folder in data_folders:
        print folder
        for image in os.listdir(folder):
            if image_index >= max_num_images:
                raise Exception('More images than expected: %d > %d'
                        (num_images, max_num_images))
                image_file = os.path.join(folder, image)
                try:
                    # Loads the file. There seems to be some sort of translation on it.
                    image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth
                    if image_data.shape != (image_size, image_size):
                        raise Exception('Unexpected image shape: %s' % str(image_data.shape))
                    dataset[image_index, :, :] = image_data
                    labels[image_index] = label_index
                    image_index += 1
                except IOError as e:
                    print 'Could not read: ', image_file, ': ', e, '- skipped.'
            label_index += 1
        num_images = image_index
        dataset = dataset[0:num_images, :, :]
        labels = labels[0:num_images]
        if num_images < min_num_images:
            raise Exception('Many fewer images than expected: %d < %d' %
                (num_images, min_num_images))
        print 'Full dataset tensor: ', dataset.shape
        print 'Mean: ', np.mean(dataset)
        print 'Stdev: ', np.std(dataset)
        print 'Labels: ', labels.shape
        return dataset, labels
开发者ID:kstory,项目名称:tensorflow_sandbox,代码行数:35,代码来源:assignment1.py


示例12: cv

def cv(xs,ys):
	errorsums = np.zeros(11)
	bestdeg = 0

	
	def sqerror(y, yhat):
		return (y-yhat)**2

	segment = len(xs)/10
	for d in xrange(0,11): #each K
		for i in xrange(0,10): #each segment
			trp = 0 #points next position in train arrays
			tep = 0 #points next position in test arrays
			tsi = segment*i #Testdata start index
			xtrain = np.ndarray(len(xs)-segment)
			ytrain = np.ndarray(len(xs)-segment)
			xtest = np.ndarray(segment)
			ytest = np.ndarray(segment)
			for j in xrange(0,len(xs)): #divide data
				if j<tsi or j>=tsi+segment:
					xtrain[trp] = xs[j]
					ytrain[trp] = ys[j]
					trp+=1
				else:
					xtest[tep] = xs[j]
					ytest[tep] = ys[j]
					tep+=1
			polynomial = fitPolynomial(d, xtrain, ytrain)
			for k in xrange(0, len(ytest)):
				errorsums[d] += sqerror(ytest[k], polynomial(xtest[k]))
		if errorsums[d] < errorsums[bestdeg]:
			bestdeg = d
	return [bestdeg,errorsums]
开发者ID:ealiasannila,项目名称:iml,代码行数:33,代码来源:014328901_e5_elias_annila.py


示例13: load_gl_buffers

 def load_gl_buffers(self):
     num = self.n_frags
     pos = np.ndarray((num, 4), dtype=np.float32)
     seed = np.random.rand(2,num)
     pos[:,0] = seed[0,:]
     pos[:,1] = 0.0
     pos[:,2] = seed[1,:] # z pos
     pos[:,3] = 1. # velocity
     # pos[:,1] = np.sin(np.arange(0., num) * 2.001 * np.pi / (10*num))
     # pos[:,1] *= np.random.random_sample((num,)) / 3. - 0.2
     # pos[:,2] = np.cos(np.arange(0., num) * 2.001 * np.pi /(10* num))
     # pos[:,2] *= np.random.random_sample((num,)) / 3. - 0.2
     # pos[:,0] = 0. # z pos
     # pos[:,3] = 1. # velocity
     self.pos = pos
     self.pos_vbo = vbo.VBO(data=self.pos, usage=GL_DYNAMIC_DRAW, target=GL_ARRAY_BUFFER)
     # self.pos_vbo = vbo.VBO(data=self.pos_vect_frags_4_GL, usage=GL_DYNAMIC_DRAW, target=GL_ARRAY_BUFFER)
     self.pos_vbo.bind()
     self.col_vbo = vbo.VBO(data=self.col_vect_frags_4_GL, usage=GL_DYNAMIC_DRAW, target=GL_ARRAY_BUFFER)
     self.col_vbo.bind()
     self.vel = np.ndarray((self.n_frags, 4), dtype=np.float32)
     self.vel[:,2] = self.pos[:,2] * 2.
     self.vel[:,1] = self.pos[:,1] * 2.
     self.vel[:,0] = 3.
     self.vel[:,3] = np.random.random_sample((self.n_frags, ))
开发者ID:cerebis,项目名称:GRAAL,代码行数:25,代码来源:simulation_loader.py


示例14: ch

def ch(X, cIDX, distance="euclidean"):
    Nclusters = cIDX.max() + 1
    Npoints = len(X)

    n = np.ndarray(shape=(Nclusters), dtype=float)

    j = 0
    for i in range(cIDX.min(), cIDX.max() + 1):
        aux = np.asarray([float(b) for b in (cIDX == i)])
        n[j] = aux.sum()
        j = j + 1

    # Clusters
    A = np.array([X[np.where(cIDX == i)] for i in range(Nclusters)])
    # Centroids
    v = np.array([np.sum(Ai, axis=0) / float(Ai.shape[0]) for Ai in A])

    ssb = 0

    for i in range(Nclusters):
        ssb = n[i] * (cdist([v[i]], [np.mean(X, axis=0)], metric=distance)[0][0] ** 2) + ssb

    z = np.ndarray(shape=(Nclusters), dtype=float)

    for i in range(cIDX.min(), cIDX.max() + 1):
        aux = np.array([(cdist([x], [v[i]], metric=distance)[0][0] ** 2) for x in X[cIDX == i]])
        z[i] = aux.sum()

    ssw = z.sum()

    return (ssb / (Nclusters - 1)) / (ssw / (Npoints - Nclusters))
开发者ID:mmssouza,项目名称:ClusterEval,代码行数:31,代码来源:metrics.py


示例15: extract

    def extract(self, image, segments):
        fs = self.feature_size
        bg = 255
        regions = numpy.ndarray(shape=(0, fs), dtype=FEATURE_DATATYPE)

        for segment in segments:
            region = region_from_segment(image, segment)

            if self.stretch:
                region = cv2.resize(region, (fs, fs))
            else:
                x, y, w, h = segment
                proportion = float(min(h, w)) / max(w, h)
                new_size = (fs, int(fs * proportion)) if min(w, h) == h else (int(fs * proportion), fs)

                region = cv2.resize(region, new_size)
                s = region.shape
                new_region = numpy.ndarray((fs, fs), dtype=region.dtype)
                new_region[:, :] = bg
                new_region[:s[0], :s[1]] = region
                region = new_region

            regions = numpy.append(regions, region, axis=0)
        regions.shape = (len(segments), fs**2)

        return regions
开发者ID:JetMuffin,项目名称:urp-decaptcha,代码行数:26,代码来源:feature_extraction.py


示例16: pd_to_array

def pd_to_array(inpd, dims=225):
    count_vol = numpy.ndarray([dims,dims,dims])
    ptids = vtk.vtkIdList()
    points = inpd.GetPoints()
    data_vol = []    
    # check for cell data
    cell_data = inpd.GetCellData().GetScalars()
    if cell_data:
        data_vol = numpy.ndarray([dims,dims,dims])
    # loop over lines
    inpd.GetLines().InitTraversal()
    print "<filter.py> Input number of points: ",\
        points.GetNumberOfPoints(),\
        "lines:", inpd.GetNumberOfLines() 
    # loop over all lines
    for lidx in range(0, inpd.GetNumberOfLines()):
        # progress
        #if verbose:
        #    if lidx % 1 == 0:
        #        print "<filter.py> Line:", lidx, "/", inpd.GetNumberOfLines()
        inpd.GetLines().GetNextCell(ptids)
        num_points = ptids.GetNumberOfIds()
        for pidx in range(0, num_points):
            point = points.GetPoint(ptids.GetId(pidx))
            # center so that 0,0,0 moves to 100,100,100
            point = numpy.round(numpy.array(point) + 110)         
            count_vol[point[0], point[1], point[2]] += 1
            if cell_data:
                data_vol[point[0], point[1], point[2]] += cell_data.GetTuple(lidx)[0]
    return count_vol, data_vol
开发者ID:RuizhiLiao,项目名称:whitematteranalysis,代码行数:30,代码来源:filter.py


示例17: create_train_data

	def create_train_data(self):
		# 将增强之后的训练集生成npy
		i = 0
		print('-' * 30)
		print('creating train image')
		print('-' * 30)
		count = 0
		for indir in os.listdir(self.aug_merge_path):
			path = os.path.join(self.aug_merge_path, indir)
			count += len(os.listdir(path))
		imgdatas = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
		imglabels = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
		for indir in os.listdir(self.aug_merge_path):
			trainPath = os.path.join(self.aug_train_path, indir)
			labelPath = os.path.join(self.aug_label_path, indir)
			print(trainPath, labelPath)
			imgs = glob.glob(trainPath + '/*' + '.tif')
			for imgname in imgs:
				trainmidname = imgname[imgname.rindex('/') + 1:]
				labelimgname = imgname[imgname.rindex('/') + 1:imgname.rindex('_')] + '_label.tif'
				print(trainmidname, labelimgname)
				img = load_img(trainPath + '/' + trainmidname, grayscale=True)
				label = load_img(labelPath + '/' + labelimgname, grayscale=True)
				img = img_to_array(img)
				label = img_to_array(label)
				imgdatas[i] = img
				imglabels[i] = label
				if i % 100 == 0:
					print('Done: {0}/{1} images'.format(i, len(imgs)))
				i += 1
				print(i)
		print('loading done', imgdatas.shape)
		np.save(self.npy_path + '/imgs_train.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
		np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)
		print('Saving to .npy files done.')
开发者ID:USTCzxm,项目名称:U-net,代码行数:35,代码来源:data_Keras.py


示例18: polar

 def polar(self, n, file='None'):
     r = np.arange(self.rmin,self.rmax,(self.rmax-self.rmin)/self.nrad)
     t = np.arange(0.,2.*np.pi,2.*np.pi/self.nsec)
     x = np.ndarray([self.nrad*self.nsec], dtype = float)
     y = np.ndarray([self.nrad*self.nsec], dtype = float)
     z = np.ndarray([self.nrad*self.nsec], dtype = float)
     k = 0
     for i in range(self.nrad):
         for j in range(self.nsec):
             x[k] = r[i]*np.cos(t[j])
             y[k] = r[i]*np.sin(t[j])
             z[k] = self.data[i,j]
             k +=1
     xx = np.arange(-self.rmax, self.rmax, (self.rmax-self.rmin)/n)
     yy = np.arange(-self.rmax, self.rmax, (self.rmax-self.rmin)/n)
     zz = griddata(x,y,z,xx,yy)
     fig_pol = plt.figure()
     ax1 = fig_pol.add_subplot(111, axisbg='k')
     ax1.set_xlabel("X")
     ax1.set_ylabel("Y")
     if(self.zmax!='None' and self.zmin!='None'):
         ax1.imshow(zz, cmap=cm.hot, origin="lower", \
                        extent=[-self.rmax,self.rmax, \
                                     -self.rmax,self.rmax])
     else:
         ax1.imshow(zz, cmap=cm.hot, origin="lower", \
                        extent=[-self.rmax,self.rmax, \
                                     -self.rmax,self.rmax])
     if(file!="None"):
         plt.savefig(file+".png",dpi=70, format="png" )
         print file+".png done"
     else:
         plt.show()
开发者ID:adamdempsey90,项目名称:fargo3d,代码行数:33,代码来源:grid_plot.py


示例19: create_small_train_data

	def create_small_train_data(self):
		# 将增强之后的训练集生成npy
		print('-' * 30)
		print('creating samll train image')
		print('-' * 30)
		imgs = glob.glob('../data_set/aug_train/0/*' + '.tif')
		count = len(imgs)
		imgdatas = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
		imglabels = np.ndarray((count, self.out_rows, self.out_cols, 1), dtype=np.uint8)
		trainPath = '../data_set/aug_train/0'
		labelPath = '../data_set/aug_label/0'
		i = 0
		for imgname in imgs:
			trainmidname = imgname[imgname.rindex('/') + 1:]
			labelimgname = imgname[imgname.rindex('/') + 1:imgname.rindex('_')] + '_label.tif'
			print(trainmidname, labelimgname)
			img = load_img(trainPath + '/' + trainmidname, grayscale=True)
			label = load_img(labelPath + '/' + labelimgname, grayscale=True)
			img = img_to_array(img)
			label = img_to_array(label)
			imgdatas[i] = img
			imglabels[i] = label
			i += 1
			print(i)
		print('loading done', imgdatas.shape)
		np.save(self.npy_path + '/imgs_small_train.npy', imgdatas)  # 将30张训练集和30张label生成npy数据
		np.save(self.npy_path + '/imgs_mask_small_train.npy', imglabels)
		print('Saving to .npy files done.')
开发者ID:USTCzxm,项目名称:U-net,代码行数:28,代码来源:data_Keras.py


示例20: __init__

    def __init__(self, probs):
        prob = numpy.array(probs, numpy.float32)
        prob /= numpy.sum(prob)
        threshold = numpy.ndarray(len(probs), numpy.float32)
        values = numpy.ndarray(len(probs) * 2, numpy.int32)
        il, ir = 0, 0
        pairs = list(zip(prob, range(len(probs))))
        pairs.sort()
        for prob, i in pairs:
            p = prob * len(probs)
            while p > 1 and ir < len(threshold):
                values[ir * 2 + 1] = i
                p -= 1.0 - threshold[ir]
                ir += 1
            threshold[il] = p
            values[il * 2] = i
            il += 1
        # fill the rest
        for i in range(ir, len(probs)):
            values[i * 2 + 1] = 0

        assert((values < len(threshold)).all())
        self.threshold = threshold
        self.values = values
        self.use_gpu = False
开发者ID:naokiiiii,项目名称:chainer,代码行数:25,代码来源:walker_alias.py



注:本文中的numpy.ndarray函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.ndenumerate函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.narray函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap