• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.savez_compressed函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.savez_compressed函数的典型用法代码示例。如果您正苦于以下问题:Python savez_compressed函数的具体用法?Python savez_compressed怎么用?Python savez_compressed使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了savez_compressed函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: save_data

 def save_data(self, fmt='npy', dest='.'):
     if not self.has_saved:
         #must specify a unique new directory name for this output
         if not os.path.exists(dest):
             os.makedirs(dest)
         sim_dir = os.path.join(dest,self.fname)
         i = 1
         while os.path.exists(sim_dir):
             sim_dir = os.path.join(dest,self.fname,'-%i'%i)
             i += 1
         os.mkdir(sim_dir)
         self.sim_dir = sim_dir
         self.has_saved = True
     fname = os.path.join(self.sim_dir, self.fname)
     params = {k:self.__dict__[k] for k in self.__dict__ if k not in ['cells','neuropil','t','mov','mov_nofilter','stim','mov_nojit','mov_filtered']}
     cells = [cell.get_dict() for cell in self.cells]
     npil = np.array([self.neuropil.get_dict()])
     t = self.__dict__['t']
     stim = self.__dict__['stim']
     movie = np.array([{k:self.__dict__[k] for k in ['mov','mov_nofilter','mov_nojit','mov_filtered']}])
     if fmt in ['npy','npz','numpy','n']:
         np.savez_compressed(fname, params=params, cells=cells, neuropil=npil, time=t, stim=stim, movie=movie)
     elif fmt in ['mat','matlab','m']:
         matdic = {'params':params, 'cells':cells, 'neuropil':npil, 'time':t, 'stim':stim, 'movie':movie}
         savemat(fname, matdic)
开发者ID:bensondaled,项目名称:imaging-simulation,代码行数:25,代码来源:simulations.py


示例2: save_npz

def save_npz(file, obj, compression=True):
    """Saves an object to the file in NPZ format.

    This is a short-cut function to save only one object into an NPZ file.

    Args:
        file (str or file-like): Target file to write to.
        obj: Object to be serialized. It must support serialization protocol.
        compression (bool): If ``True``, compression in the resulting zip file
            is enabled.

    .. seealso::
        :func:`chainer.serializers.load_npz`

    """
    if isinstance(file, six.string_types):
        with open(file, 'wb') as f:
            save_npz(f, obj, compression)
        return

    s = DictionarySerializer()
    s.save(obj)
    if compression:
        numpy.savez_compressed(file, **s.target)
    else:
        numpy.savez(file, **s.target)
开发者ID:unnonouno,项目名称:chainer,代码行数:26,代码来源:npz.py


示例3: save_images_to_disk

def save_images_to_disk():
	print('Disk-saving thread active...')
	n = 0
	frameTimeOutputFile = open(planOutputPath+'frameTimes.txt','w')
	frameTimeOutputFile.write('frameCount\t n\t frameCond\t frameT\t interval\n')
	currdict = im_queue.get()
	while currdict is not None:
		frameTimeOutputFile.write('%i\t %i\t %i\t %s\t %s\n' % (int(currdict['frame']),n,int(currdict['cond']),currdict['time'],currdict['interval']))
		if save_as_tiff:
			fname = '%s/frame%i.tiff' % (dataOutputPath,int(currdict['frame']))
			tiff = TIFF.open(fname, mode='w')
			tiff.write_image(currdict['im'])
			tiff.close()
	
		elif save_as_npz:
			np.savez_compressed('%s/test%d.npz' % (output_path, n), currdict['im'])
		else:
			fname = '%s/frame%i.tiff' % (dataOutputPath,int(currdict['frame']),)
			with open(fname, 'wb') as f:
				pkl.dump(currdict, f, protocol=pkl.HIGHEST_PROTOCOL)
			
#		print 'DONE SAVING FRAME: ', currdict['frame'], n #fdict
		n += 1
		currdict = im_queue.get()
		
	disk_writer_alive = False
	#frameTimeOutputFile.close()
	print('Disk-saving thread inactive...')
开发者ID:cechava,项目名称:colorGratings,代码行数:28,代码来源:color_vs_BW_gratings.py


示例4: do_gen

def do_gen(content, decision):
    SER_S = np.empty((tau_range.shape[0], phi_range.shape[0]))
    SER_U = np.empty((tau_range.shape[0], phi_range.shape[0]))

    for tau_idx, tau in enumerate(tau_range):
        print("tau = %.2f" % (tau))

        # symbols of (synch"ed, unsynch"ed) sender
        if content in ("same",):
            tmp_syms = np.random.randint(16, size=nsyms + 2)
            send_syms = [tmp_syms, tmp_syms]
        else:
            send_syms = np.random.randint(16, size=2 * (nsyms + 2)).reshape(2, nsyms + 2)

        send_syms_s, send_syms_u = send_syms[0][1:-1], send_syms[1][1:-1]
        send_chips = pt.map_chips(*send_syms)

        RECV_CHIPS_I = pt.detect_i(send_chips[:2], send_chips[2:], phi_range, tau, As, Au)
        RECV_CHIPS_Q = pt.detect_q(send_chips[:2], send_chips[2:], phi_range, tau, As, Au)

        for phi_idx in range(len(phi_range)):
            recv_chips = np.empty(2 * RECV_CHIPS_I.shape[0])
            recv_chips[::2] = RECV_CHIPS_I[:, phi_idx]
            recv_chips[1::2] = RECV_CHIPS_Q[:, phi_idx]

            # slice bits to simulate hard decision decoder
            if decision in ("hard",):
                recv_chips = np.sign(recv_chips)

            recv_syms = pt.detect_syms_corr(recv_chips)[1:-1]

            SER_S[tau_idx, phi_idx] = sum(recv_syms != send_syms_s) / (1.0 * len(recv_syms))
            SER_U[tau_idx, phi_idx] = sum(recv_syms != send_syms_u) / (1.0 * len(recv_syms))

    np.savez_compressed("data/ser_Au%.2f_%s_%s_v2.npz" % (Au, content, decision), SER_S=SER_S, SER_U=SER_U, **settings)
开发者ID:cnodadiaz,项目名称:collision,代码行数:35,代码来源:gen_ser_contour.py


示例5: main

def main():
    if len(sys.argv) < 2:
        print("Not enough arguments supplied")
        return

    datapath = sys.argv[1]
    train_image_path = os.path.join(datapath, "train_images.npz")
    test_image_path = os.path.join(datapath, "test_images.npz")
    train_cap_path = os.path.join(datapath, "train_labels.txt")
    test_cap_path = os.path.join(datapath, "test_labels.txt")

    trX, teX, trY, teY = mnist(onehot=False)

    print("Generating Appended MNIST Training...")
    train_imgs, train_caps = generate_dataset(trX, trY)
    print("Generating Appended MNIST Testing...")
    test_imgs, test_caps = generate_dataset(teX, teY)

    print("Save Training/Testing Images...")
    np.savez_compressed(train_image_path, *train_imgs)
    np.savez_compressed(test_image_path, *test_imgs)

    print("Save Training/Testing Captions...")
    with open(train_cap_path, 'w') as train_cap_file:
        train_cap_file.writelines(train_caps)
    with open(test_cap_path, 'w') as test_cap_file:
        test_cap_file.writelines(test_caps)

    print("DONE. SUMMARY")
    print("# Train Examples: " + str(len(train_imgs)))
    print("# Test Examples: " + str(len(test_imgs)))
开发者ID:youralien,项目名称:arctic-captions,代码行数:31,代码来源:mnist_appended.py


示例6: main

def main():
    if os.path.exists('points.npz'):
        print("Loading points from points.npz")
        points = np.load('points.npz')['points']
    else:
        points = get_points()
        print("Saving points to points.npz")
        np.savez_compressed('points.npz', points=points)

    def point_input():
        for x, y in points:
            yield '%s %s\n' % (x, y)

    b_i = []
    b_j = []
    b_x = []
    b_y = []
    t1 = time.time()
    for line in run_subprocess(('./union',), point_input()):
        i, j, x, y = line.split()
        b_i.append(int(i))
        b_j.append(int(j))
        b_x.append(float(x))
        b_y.append(float(y))
    t2 = time.time()
    b_i = np.asarray(b_i)
    b_j = np.asarray(b_j)
    b_x = np.asarray(b_x)
    b_y = np.asarray(b_y)
    print("Got %d boundary intersections" % len(b_i) +
          " in %.4f s" % (t2 - t1))
    np.savez_compressed('boundary.npz', i=b_i, j=b_j, x=b_x, y=b_y)
开发者ID:Mortal,项目名称:point-union,代码行数:32,代码来源:extract_points.py


示例7: save_cb

    def save_cb(self, mode):
        """Save image, figure, and plot data arrays."""

        # This just defines the basename.
        # Extension has to be explicitly defined or things can get messy.
        target = Widgets.SaveDialog(
            title='Save {0} data'.format(mode)).get_path()

        # Save cancelled
        if not target:
            return

        # TODO: This can be a user preference?
        fig_dpi = 100

        if mode == 'cuts':
            # Save as fits file
            image = self.fitsimage.get_image()
            self.fv.error_wrap(image.save_as_file, target + '.fits')

            fig, xarr, yarr = self.cuts_plot.get_data()

        elif mode == 'slit':
            fig, xarr, yarr = self.slit_plot.get_data()

        fig.savefig(target + '.png', dpi=fig_dpi)
        numpy.savez_compressed(target + '.npz', x=xarr, y=yarr)
开发者ID:rupak0577,项目名称:ginga,代码行数:27,代码来源:Cuts.py


示例8: creator

    def creator(path):
        archive_path = download.cached_download(url)

        train_x = numpy.empty((5, 10000, 3072), dtype=numpy.uint8)
        train_y = numpy.empty((5, 10000), dtype=numpy.uint8)
        test_y = numpy.empty(10000, dtype=numpy.uint8)

        dir_name = '{}-batches-py'.format(name)

        with tarfile.open(archive_path, 'r:gz') as archive:
            # training set
            for i in range(5):
                file_name = '{}/data_batch_{}'.format(dir_name, i + 1)
                d = pickle.load(archive.extractfile(file_name))
                train_x[i] = d['data']
                train_y[i] = d['labels']

            # test set
            file_name = '{}/test_batch'.format(dir_name)
            d = pickle.load(archive.extractfile(file_name))
            test_x = d['data']
            test_y[...] = d['labels']  # copy to array

        train_x = train_x.reshape(50000, 3072)
        train_y = train_y.reshape(50000)

        numpy.savez_compressed(path, train_x=train_x, train_y=train_y,
                               test_x=test_x, test_y=test_y)
        return {'train_x': train_x, 'train_y': train_y,
                'test_x': test_x, 'test_y': test_y}
开发者ID:sarikayamehmet,项目名称:chainer,代码行数:30,代码来源:cifar.py


示例9: save_vocab

    def save_vocab(self, path_count, path_vocab, word_limit=100000):
        """ Saves the master vocabulary into a file.
        """

        # reserve space for 10 special tokens
        words = OrderedDict()
        for token in SPECIAL_TOKENS:
            # store -1 instead of np.inf, which can overflow
            words[token] = -1

        # sort words by frequency
        desc_order = OrderedDict(sorted(self.master_vocab.items(),
                                 key=lambda kv: kv[1], reverse=True))
        words.update(desc_order)

        # use encoding of up to 30 characters (no token conversions)
        # use float to store large numbers (we don't care about precision loss)
        np_vocab = np.array(words.items(),
                            dtype=([('word','|S30'),('count','float')]))

        # output count for debugging
        counts = np_vocab[:word_limit]
        np.savez_compressed(path_count, counts=counts)

        # output the index of each word for easy lookup
        final_words = OrderedDict()
        for i, w in enumerate(words.keys()[:word_limit]):
            final_words.update({w:i})
        with open(path_vocab, 'w') as f:
            f.write(json.dumps(final_words, indent=4, separators=(',', ': ')))
开发者ID:cclauss,项目名称:torchMoji,代码行数:30,代码来源:create_vocab.py


示例10: split_dataset

def split_dataset(labels, indices_save_path, classes_save_path, train_size=0.8,
                  test_size=0.1, validation_size=0.1, min_count=3):
    classes = {}

    print('Getting YAGO labels', file=sys.stderr, flush=True)
    yago_labels = [label[1] for label in labels]

    print('Getting filtered classes', file=sys.stderr, flush=True)
    filtered_classes = {l for l, v in Counter(yago_labels).items() if v >= min_count}

    print('Getting filtered indices', file=sys.stderr, flush=True)
    filtered_indices = np.array([i for i, l in enumerate(yago_labels)
                                 if (l != 'O' and l in filtered_classes) or (l == 'O')], dtype=np.int32)

    strat_split = StratifiedSplitter(np.array(yago_labels), filtered_indices)

    print('Splitting the dataset', file=sys.stderr, flush=True)
    train_indices, test_indices, validation_indices = strat_split.get_splitted_dataset_indices(
        train_size=train_size, test_size=test_size, validation_size=validation_size)

    print('Saving indices to file %s' % indices_save_path, file=sys.stderr, flush=True)
    np.savez_compressed(indices_save_path, train_indices=train_indices, test_indices=test_indices,
                        validation_indices=validation_indices, filtered_indices=filtered_indices)

    for idx, iteration in enumerate(CL_ITERATIONS[::-1]):
        print('Getting classes for iteration %s' % iteration, file=sys.stderr, flush=True)
        replaced_labels = [label[idx] for label in labels]
        classes[iteration] = np.unique(np.array(replaced_labels)[filtered_indices], return_counts=True)

    print('Saving classes to file %s' % classes_save_path, file=sys.stderr, flush=True)
    with open(classes_save_path, 'wb') as f:
        pickle.dump(classes, f)
开发者ID:MIREL-UNC,项目名称:wikipedia-ner,代码行数:32,代码来源:processing.py


示例11: feature_selection

def feature_selection(dataset, features_names, matrix_file_path, features_file_path, max_features=12000):
    print('Calculating variance of dataset features', file=sys.stderr, flush=True)
    dataset = csc_matrix(dataset)
    square_dataset = dataset.copy()
    square_dataset.data **= 2
    variance = np.asarray(square_dataset.mean(axis=0) - np.square(dataset.mean(axis=0)))[0]

    print('Getting top %d features' % max_features, file=sys.stderr, flush=True)
    top_features = np.argsort(variance)[::-1][:max_features]
    min_variance = variance[top_features][-1]

    print('Min variance: %.2e. Getting features over min variance.' % min_variance, file=sys.stderr, flush=True)
    valid_indices = np.where(variance > min_variance)[0]

    print('Final features count: %d/%d' % (valid_indices.shape[0], dataset.shape[1]), file=sys.stderr, flush=True)

    print('Filtering features', file=sys.stderr, flush=True)
    dataset = csr_matrix(dataset[:, valid_indices])

    print('Saving dataset to file {}'.format(matrix_file_path), file=sys.stderr, flush=True)
    np.savez_compressed(matrix_file_path, data=dataset.data, indices=dataset.indices,
                        indptr=dataset.indptr, shape=dataset.shape)

    print('Saving filtered features names', file=sys.stderr, flush=True)
    features_names = np.array(features_names)
    filtered_features_names = list(features_names[valid_indices])

    with open(features_file_path, 'wb') as f:
        pickle.dump(filtered_features_names, f)
开发者ID:MIREL-UNC,项目名称:wikipedia-ner,代码行数:29,代码来源:processing.py


示例12: process_glove

def process_glove(args, vocab_list, save_path, size=4e5, random_init=True):
    """
    :param vocab_list: [vocab]
    :return:
    """
    if not gfile.Exists(save_path + ".npz"):
        glove_path = os.path.join(args.glove_dir, "glove.6B.{}d.txt".format(args.glove_dim))
        if random_init:
            glove = np.random.randn(len(vocab_list), args.glove_dim)
        else:
            glove = np.zeros((len(vocab_list), args.glove_dim))
        found = 0
        with open(glove_path, 'r') as fh:
            for line in tqdm(fh, total=size):
                array = line.lstrip().rstrip().split(" ")
                word = array[0]
                vector = list(map(float, array[1:]))
                if word in vocab_list:
                    idx = vocab_list.index(word)
                    glove[idx, :] = vector
                    found += 1
                if word.capitalize() in vocab_list:
                    idx = vocab_list.index(word.capitalize())
                    glove[idx, :] = vector
                    found += 1
                if word.upper() in vocab_list:
                    idx = vocab_list.index(word.upper())
                    glove[idx, :] = vector
                    found += 1

        print("{}/{} of word vocab have corresponding vectors in {}".format(found, len(vocab_list), glove_path))
        np.savez_compressed(save_path, glove=glove)
        print("saved trimmed glove matrix at: {}".format(save_path))
开发者ID:InnerPeace-Wu,项目名称:reading_comprehension-cs224n,代码行数:33,代码来源:qa_data.py


示例13: align

def align(movie_data, options, args, lrh):
    print 'PCA'
    nvoxel = movie_data.shape[0]
    nTR    = movie_data.shape[1]
    nsubjs = movie_data.shape[2]
    
    align_algo = args.align_algo
    nfeature   = args.nfeature

    if not os.path.exists(options['working_path']):
        os.makedirs(options['working_path'])

    # zscore the data
    bX = np.zeros((nsubjs*nvoxel,nTR))
    for m in range(nsubjs):
        bX[m*nvoxel:(m+1)*nvoxel,:] = stats.zscore(movie_data[:,:,m].T ,axis=0, ddof=1).T
    del movie_data

    U, s, VT = np.linalg.svd(bX, full_matrices=False)

    bW = U[:,range(nfeature)]
    ES = np.diag(s).dot(VT)
    ES = ES[:nfeature,:]

    niter = 10 
    # initialization when first time run the algorithm
    np.savez_compressed(options['working_path']+align_algo+'_'+lrh+'_'+str(niter)+'.npz',\
          bW = bW, ES=ES, niter=niter)
    return niter
开发者ID:snastase,项目名称:SRM,代码行数:29,代码来源:pca.py


示例14: main

def main():
    desc = 'Applies summarize_scores on a given score matrix'
    parser = argparse.ArgumentParser(description = desc)
    parser.add_argument('infile')
    parser.add_argument('idfile', help = 'File with region ids')
    parser.add_argument('outfile')
    args = parser.parse_args()
    
    data = np.load(args.infile)
    scores = data['scores']
    motif_names = data['motif_names']
    data.close()
    is_score = np.array([not re.search('_scores', m) is None for m in motif_names], np.bool)
    is_count = np.array([not s for s in is_score], np.bool)
    region_ids = []
    with open(args.idfile, 'r') as infile:
        for line in infile:
            if line.strip() == '.':
                region_ids.append(None)
            else:
                region_ids.append(line.strip())
            
    scores_tmp, new_names = summarize_scores(scores[:, is_score], region_ids, np.max)
    counts_tmp, new_names_tmp = summarize_scores(scores[:, is_count], region_ids, np.sum)
    motif_names = np.array(motif_names)
    motif_names = list(np.concatenate((motif_names[is_score], motif_names[is_count])))
    scores = np.concatenate((scores_tmp, counts_tmp), axis = 1)

    np.savez_compressed(args.outfile, scores = scores, motif_names = motif_names,
                        region_names = new_names)
开发者ID:sofiakp,项目名称:roadmap,代码行数:30,代码来源:combineRegionScores.py


示例15: _storeresult

def _storeresult(result):
    """Test function to try various ways to store a tuple of numpy arrays.
    """
    if False:
        # Try numpy
        npy.savez_compressed('store-npy.npz', *result)
    if False:
        # Try h5py
        import h5py
        store = h5py.File("store-h5py.hdf5", "w", compression='lzf')
        store['numoccs'] = numoccs
        store['occcounts'] = occcounts
        store['childoccfreqs'] = childoccfreqs
        store['numunique'] = numunique
        store['uniquecounts'] = childuniquefreqs
        store['childuniquefreqs'] = childuniquefreqs
    if False:
        # Try PyTables
        import tables
        store = tables.open_file(
            'store-pytables.hdf5', mode="w",
            filters=tables.Filters(complib='bzip2', complevel=6))
        def storearray(name, x):
            atom = tables.Atom.from_dtype(x.dtype)
            ds = store.createCArray(store.root, name, atom, x.shape)
            ds[:] = x
        storearray('numoccs', numoccs)
        storearray('occcounts', occcounts)
        storearray('childoccfreqs', childoccfreqs)
        storearray('numunique', numunique)
        storearray('uniquecounts', childuniquefreqs)
        storearray('childuniquefreqs', childuniquefreqs)
        store.close()
开发者ID:JohnReid,项目名称:JEMIMA,代码行数:33,代码来源:evaluation.py


示例16: write_npz_file

    def write_npz_file(self):
        assert len(self.examples) >= self.Nperfile

        # put Nperfile random examples at the end of the list
        for i in xrange(self.Nperfile):
            a = len(self.examples) - i - 1
            if a > 0:
              b = random.randint(0, a-1)
              self.examples[a], self.examples[b] = self.examples[b], self.examples[a]

        # pop Nperfile examples off the end of the list
        # put each component into a separate numpy batch array
        save_dict = {}
        for c in xrange(len(self.names)):
            batch_shape = (self.Nperfile,) + self.shapes[c]
            batch = np.empty(batch_shape, dtype=self.dtypes[c])
            for i in xrange(self.Nperfile):
                batch[i,:] = self.examples[-1-i][c]
            save_dict[self.names[c]] = batch

        del self.examples[-self.Nperfile:]

        filename = os.path.join(self.out_dir, "examples.%d.%d" % (self.Nperfile, self.filenum))
        #print "NPZ.RandomizingWriter: writing", filename
        np.savez_compressed(filename, **save_dict)
        self.filenum += 1
开发者ID:TheDuck314,项目名称:go-NN,代码行数:26,代码来源:NPZ.py


示例17: _save_results

    def _save_results(self, save_layers, sess):
        # Train loss
        np.savetxt(os.path.join(self.results_save_path, 'train_loss_record_%s.txt' % self.experiment_name),
                   np.array(self.train_loss_record, dtype=np.float32), fmt='%.3f', delimiter=',')

        # Validation accuracy
        np.savetxt(os.path.join(self.results_save_path, 'validation_accuracy_record_%s.txt' % self.experiment_name),
                   np.array(self.validation_accuracy_record, dtype=np.float32), fmt='%.3f', delimiter=',')

        # Test
        self.test_results.to_csv(os.path.join(self.results_save_path, 'test_results_%s.csv' % self.experiment_name),
                                 index=False)
        self.test_predictions_results.to_csv(
            os.path.join(self.results_save_path, 'test_predictions_%s.csv' % self.experiment_name), index=False)

        if save_layers:
            print('Saving weights and biases', file=sys.stderr)
            file_name_weights = os.path.join(self.pre_trained_weights_save_path,
                                             "%s_weights.npz" % self.experiment_name)
            file_name_biases = os.path.join(self.pre_trained_weights_save_path,
                                            "%s_biases.npz" % self.experiment_name)

            weights_dict = {}
            biases_dict = {}

            for layer_idx, (weights, biases) in enumerate(zip(self.weights, self.biases)):
                layer_name = 'hidden_layer_%02d' % layer_idx
                weights_dict[layer_name] = weights.eval(session=sess)
                biases_dict[layer_name] = biases.eval(session=sess)

            np.savez_compressed(file_name_weights, **weights_dict)
            np.savez_compressed(file_name_biases, **biases_dict)
开发者ID:MIREL-UNC,项目名称:wikipedia-ner,代码行数:32,代码来源:mlp.py


示例18: write_sparse_matrix

def write_sparse_matrix(matrix, filepath, compressed=True):
    """
    Write a ``scipy.sparse.csr_matrix`` or ``scipy.sparse.csc_matrix`` to disk
    at ``filepath``, optionally compressed.

    Args:
        matrix (``scipy.sparse.csr_matrix`` or ``scipy.sparse.csr_matrix``)
        filepath (str): /path/to/file on disk to which matrix objects will be written;
            if ``filepath`` does not end in ``.npz``, that extension is
            automatically appended to the name
        compressed (bool): if True, save arrays into a single file in compressed
            .npz format

    .. seealso: http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.savez.html
    .. seealso: http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.savez_compressed.html
    """
    if not isinstance(matrix, (csc_matrix, csr_matrix)):
        raise TypeError('input matrix must be a scipy sparse csr or csc matrix')
    make_dirs(filepath, 'w')
    if compressed is False:
        savez(filepath,
              data=matrix.data, indices=matrix.indices,
              indptr=matrix.indptr, shape=matrix.shape)
    else:
        savez_compressed(filepath,
                         data=matrix.data, indices=matrix.indices,
                         indptr=matrix.indptr, shape=matrix.shape)
开发者ID:GregBowyer,项目名称:textacy,代码行数:27,代码来源:write.py


示例19: _download_higgs_data_and_save_npz

def _download_higgs_data_and_save_npz(data_dir):
  """Download higgs data and store as a numpy compressed file."""
  input_url = os.path.join(URL_ROOT, INPUT_FILE)
  np_filename = os.path.join(data_dir, NPZ_FILE)
  if tf.gfile.Exists(np_filename):
    raise ValueError('data_dir already has the processed data file: {}'.format(
        np_filename))
  if not tf.gfile.Exists(data_dir):
    tf.gfile.MkDir(data_dir)
  # 2.8 GB to download.
  try:
    print('Data downloading..')
    temp_filename, _ = urllib.request.urlretrieve(input_url)

    # Reading and parsing 11 million csv lines takes 2~3 minutes.
    print('Data processing.. taking multiple minutes..')
    data = pd.read_csv(
        temp_filename,
        dtype=np.float32,
        names=['c%02d' % i for i in range(29)]  # label + 28 features.
    ).as_matrix()
  finally:
    os.remove(temp_filename)

  # Writing to temporary location then copy to the data_dir (0.8 GB).
  f = tempfile.NamedTemporaryFile()
  np.savez_compressed(f, data=data)
  tf.gfile.Copy(f.name, np_filename)
  print('Data saved to: {}'.format(np_filename))
开发者ID:IoannisKansizoglou,项目名称:models,代码行数:29,代码来源:data_download.py


示例20: testFileSeekableWithZip

 def testFileSeekableWithZip(self):
   # Note: Test case for GitHub issue 27276, issue only exposed in python 3.7+.
   filename = os.path.join(self._base_dir, "a.npz")
   np.savez_compressed(filename, {"a": 1, "b": 2})
   with gfile.GFile(filename, "rb") as f:
     info = np.load(f, allow_pickle=True)
   _ = [i for i in info.items()]
开发者ID:aritratony,项目名称:tensorflow,代码行数:7,代码来源:file_io_test.py



注:本文中的numpy.savez_compressed函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.sctype2char函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.savez函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap