• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.array_split函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.array_split函数的典型用法代码示例。如果您正苦于以下问题:Python array_split函数的具体用法?Python array_split怎么用?Python array_split使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了array_split函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: solar_position_numba

def solar_position_numba(unixtime, lat, lon, elev, pressure, temp, delta_t,
                         atmos_refract, numthreads, sst=False):
    """Calculate the solar position using the numba compiled functions
    and multiple threads. Very slow if functions are not numba compiled.
    """
    loc_args = np.array([lat, lon, elev, pressure, temp, delta_t,
                         atmos_refract, sst])
    ulength = unixtime.shape[0]
    result = np.empty((6, ulength), dtype=np.float64)
    if unixtime.dtype != np.float64:
        unixtime = unixtime.astype(np.float64)

    if ulength < numthreads:
        pvl_logger.warning('The number of threads is more than the length of' +
                           ' the time array. Only using %s threads.',
                            ulength)
        numthreads = ulength

    if numthreads <= 1:
        pvl_logger.debug('Only using one thread for calculation')
        solar_position_loop(unixtime, loc_args, result)
        return result

    split0 = np.array_split(unixtime, numthreads)
    split2 = np.array_split(result, numthreads, axis=1)
    chunks = [[a0, loc_args, split2[i]] for i, a0 in enumerate(split0)]
    # Spawn one thread per chunk
    threads = [threading.Thread(target=solar_position_loop, args=chunk)
               for chunk in chunks]
    for thread in threads:
        thread.start()
    for thread in threads:
        thread.join()
    return result
开发者ID:TylrA,项目名称:pvlib-python,代码行数:34,代码来源:spa.py


示例2: vocode

 def vocode(self, segment_voice, segment_gen):
     """This is the vocoder.  It multiplies the amplitudes of two seperate signals
     to produce a singular response""" 
     temp_final = []
     for j in range(self.num_channels):
         saw_spec = segment_gen[j].make_spectrum()
         input_spec = segment_voice[j].make_spectrum()
     
         input_hs = input_spec.hs
         saw_hs = saw_spec.hs
     
         saw_bands = np.array_split(saw_hs, self.num_bands)
         input_bands = np.array_split(input_hs, self.num_bands)
     
         final_bands = np.empty_like(saw_bands)
         for i in range(self.num_bands):
             amp_multi = np.abs(saw_bands[i])*np.abs(input_bands[i])
             phase_multi = np.angle(saw_bands[i])
             final_bands[i] = amp_multi*(np.cos(phase_multi)+(np.sin(phase_multi)*1j))
             
         temp_final.append(np.ma.concatenate(final_bands).data)
     final_wave = []
     for i in range(len(temp_final)):
         final_wave.append(thinkdsp.Spectrum(hs=temp_final[i], framerate = self.framerate).make_wave())
     output = final_wave[0]
     for i in range(1,len(final_wave)):
         output |= final_wave[i]
     return output
开发者ID:jabb1123,项目名称:Vocoder_project-SigSys,代码行数:28,代码来源:test.py


示例3: distribute_nodes

 def distribute_nodes(self, path_index):
     path = self.paths[path_index]
     if path.type == 'linear':
         digits = int(np.ceil(np.log10(path.ne)))
         base = path.index * 10 ** digits
         energies = np.linspace(path.begin, path.end, path.ne)
         weights = path.weights2 + [1] * (path.ne - 6) + path.weights3
         weights = np.array(weights) * path.int_step
         nids = np.arange(path.ne) + base + 1
     
     elif path.type == 'poles':
         base = path.index * 100
         nids0 = base + 10 + np.arange(path.poles_num) + 1
         nids1 = base + 20 + np.arange(path.poles_num) + 1
         nids = np.append(nids0, nids1)
         energies0 = path.begin + (np.arange(path.poles_num) * 2
                                                     - 1) * np.pi * 1.j
         energies1 = path.end + (np.arange(path.poles_num) * 2
                                                     - 1) * np.pi * 1.j
         weights0 = [-1] * path.poles_num
         weights1 = [1] * path.poles_num
         weights = np.append(weights0, weights1)
     
     loc_nids = np.array_split(nids, self.comm.size)[self.comm.rank]
     loc_energies = np.array_split(energies,
                                      self.comm.size)[self.comm.rank]
     loc_weights = np.array_split(weights, self.comm.size)[self.comm.rank]
     return loc_nids, loc_energies, loc_weights
开发者ID:eojons,项目名称:gpaw-scme,代码行数:28,代码来源:contour.py


示例4: score

    def score(self, X, y):
        """Returns the score obtained for each estimators/data slice couple.

        Parameters
        ----------
        X : array, shape (n_samples, n_features, n_estimators)
            The input samples. For each data slice, the corresponding estimator
            score the prediction: e.g. [estimators[ii].score(X[..., ii], y)
                                        for ii in range(n_estimators)]
        y : array, shape (n_samples,) | (n_samples, n_targets)
            The target values.

        Returns
        -------
        score : array, shape (n_samples, n_estimators)
            Score for each estimator / data slice couple.
        """
        self._check_Xy(X)
        if X.shape[-1] != len(self.estimators_):
            raise ValueError('The number of estimators does not match '
                             'X.shape[2]')
        # For predictions/transforms the parallelization is across the data and
        # not across the estimators to avoid memory load.
        parallel, p_func, n_jobs = parallel_func(_sl_score, self.n_jobs)
        X_splits = np.array_split(X, n_jobs, axis=-1)
        est_splits = np.array_split(self.estimators_, n_jobs)
        score = parallel(p_func(est, x, y)
                         for (est, x) in zip(est_splits, X_splits))

        if n_jobs > 1:
            score = np.concatenate(score, axis=0)
        else:
            score = score[0]
        return score
开发者ID:chrismullins,项目名称:mne-python,代码行数:34,代码来源:search_light.py


示例5: process

    def process(self, data, output, processes, process):
        """
        """
        print "in the process function"
        if data.center_of_rotation is None:
            centre_of_rotation = np.ones(data.get_number_of_sinograms())
            centre_of_rotation = centre_of_rotation * self.parameters["center_of_rotation"]
        else:
            centre_of_rotation = data.center_of_rotation[:]

        if centre_of_rotation is None:
            centre_of_rotation = np.ones(data.get_number_of_sinograms())
            centre_of_rotation = centre_of_rotation * self.parameters["center_of_rotation"]

        sinogram_frames = np.arange(data.get_number_of_sinograms())

        frames = np.array_split(sinogram_frames, len(processes))[process]
        centre_of_rotations = np.array_split(centre_of_rotation, len(processes))[process]

        angles = data.rotation_angle.data[:]

        for i in range(len(frames)):
            frame_centre_of_rotation = centre_of_rotations[i]
            sinogram = data.data[:, frames[i], :]
            reconstruction = self.reconstruct(
                sinogram,
                frame_centre_of_rotation,
                angles,
                (output.data.shape[0], output.data.shape[2]),
                (output.data.shape[0] / 2, output.data.shape[2] / 2),
            )
            output.data[:, frames[i], :] = reconstruction
            self.count += 1
            print self.count
开发者ID:yskashyap,项目名称:Savu,代码行数:34,代码来源:base_recon.py


示例6: filter_params

 def filter_params(self, p_sets, p_fmins, nkeep=5, method='best'):
     # rank inits by costfx error low-to-high
     fmin_series = pd.Series(p_fmins)
     rankorder = fmin_series.sort_values()
     # eliminate extremely bad parameter sets
     rankorder = rankorder[rankorder<=5.0]
     if method=='random':
         # return nkeep from randomly sampled inits
         inits = p_sets[:nkeep]
         inits_err = p_fmins[:nkeep]
     elif method=='best':
         # return nkeep from inits with lowest err
         inits = [p_sets[i] for i in rankorder.index[:nkeep]]
         inits_err = rankorder.values[:nkeep]
     elif method=='lmh':
         # split index for low, med, and high err inits
         # if nkeep is odd, will sample more low than high
         if nkeep<3: nkeep=3
         ix = rankorder.index.values
         nl, nm, nh = [arr.size for arr in np.array_split(np.arange(nkeep), 3)]
         # extract indices roughly equal numbers of parameter sets with low, med, hi err
         keep_ix = np.hstack([ix[:nl], np.array_split(ix,2)[0][-nm:], ix[-nh:]])
         inits = [p_sets[i] for i in keep_ix]
         inits_err = [fmin_series[i] for i in keep_ix]
     return inits, np.min(inits_err)
开发者ID:dunovank,项目名称:radd_kd,代码行数:25,代码来源:theta.py


示例7: transform

    def transform(self, pts, verbose=None):
        """Apply the warp.

        Parameters
        ----------
        pts : shape (n_transform, 3)
            Source points to warp to the destination.

        Returns
        -------
        dest : shape (n_transform, 3)
            The transformed points.
        """
        logger.info('Transforming %s points' % (len(pts),))
        from scipy.spatial.distance import cdist
        assert pts.shape[1] == 3
        # for memory reasons, we should do this in ~100 MB chunks
        out = np.zeros_like(pts)
        n_splits = max(int((pts.shape[0] * self._destination.shape[0]) /
                           (100e6 / 8.)), 1)
        for this_out, this_pts in zip(np.array_split(out, n_splits),
                                      np.array_split(pts, n_splits)):
            dists = _tps(cdist(this_pts, self._destination, 'sqeuclidean'))
            L = np.hstack((dists, np.ones((dists.shape[0], 1)), this_pts))
            this_out[:] = np.dot(L, self._weights)
        assert not (out == 0).any()
        return out
开发者ID:jhouck,项目名称:mne-python,代码行数:27,代码来源:transforms.py


示例8: split_data

def split_data(ras, decs):
	"""
	It will split the RAs and DECs into smaller chunks which would be better
	for cache coherent
	"""
	size = ceil(len(ras)/256.0)
	return zip(array_split(ras, size), array_split(decs, size))
开发者ID:DarwinSenior,项目名称:COSMO,代码行数:7,代码来源:binSortCountMapReduce.py


示例9: parallelMorton

def parallelMorton(iMortonRanges, xMortonRanges, childMethod, numProcessesQuery):
    if iMortonRanges != None:
        numMRanges = max((len(iMortonRanges), len(xMortonRanges)))
        if numMRanges > numProcessesQuery:
            numChunks = numProcessesQuery
        else:
            numChunks = numMRanges
        ichunks = numpy.array_split(iMortonRanges, numChunks)
        xchunks = numpy.array_split(xMortonRanges, numChunks)
    else:
        numMRanges = len(xMortonRanges)
        if numMRanges > numProcessesQuery:
            numChunks = numProcessesQuery
        else:
            numChunks = numMRanges
        ichunks = numpy.array_split([], numChunks)
        xchunks = numpy.array_split(xMortonRanges, numChunks)
    children = []
    for i in range(numChunks):
        children.append(multiprocessing.Process(target=childMethod, 
            args=(ichunks[i],xchunks[i])))
        children[-1].start()  
    # wait for all children to finish their execution
    for i in range(numChunks):
        children[i].join()
开发者ID:ZheLI0319,项目名称:pointcloud-benchmark,代码行数:25,代码来源:dbops.py


示例10: gp2

def gp2(data, block_size = 100, nugget = 0.005):

	c = data[0]
	s = data[1]
	s_2 = np.array_split(s, len(s)/block_size + 1)
	c_2 = np.array_split(c, len(s)/block_size + 1)
	
	sapflux_pred = []
	
	nug = nugget;
	for a in range(0,len(s_2)):
	
		t0 = time.time()
		X = np.atleast_2d(c_2[a]).T
		y = np.atleast_2d(s_2[a]).T
	
		gproc = gaussian_process.GaussianProcess(theta0=0.01, thetaL=1e-4, thetaU=1e-1,nugget=nug)
	
		
		gproc.fit(X, y)
		y_pred, sigma2_pred = gproc.predict(X, eval_MSE=True)
		sapflux_pred.extend(y_pred.ravel())
		t1 = time.time()
		print t1-t0
	
	return np.array([c, s, np.array(sapflux_pred)])
开发者ID:dmuley,项目名称:lc-simulation,代码行数:26,代码来源:pipeline.py


示例11: ensemble_maker_inner

def ensemble_maker_inner(train_mat,labels,model_gen_function, info_dict,num=10):
    ## contains core functions to make ensemble models
    ## from training data and labels
    ## model_gen_function is a functiont that takes NO arguments and returns a keras model
    ## info_dict is a dictionary of training info 
    train_mat, labels = shuffle(train_mat, labels)
    train_mat = np.array_split(train_mat, num, axis=0)
    labels = np.array_split(labels, num, axis=0)
    earlystop = EarlyStopping(monitor=info_dict['monitor'], min_delta=info_dict['min_delta'],
                              patience=info_dict['patience'],
                              verbose=0,
                              mode='auto')
    callbacks_list = [earlystop]
    model_list = []
    for ii in range(num):
        train_feature = array_stack(train_mat, ii)
        train_labels = array_stack(labels, ii)
        loaded_model = model_gen_function() # note the call to gen new model
        current_model = reset_weights(loaded_model)
        history = current_model.fit(train_feature, train_labels,
                                    epochs=info_dict['epochs'], verbose=0,
                                    batch_size=info_dict['batch_size'],
                                    callbacks=callbacks_list)
        model_list.append(current_model)
    return(model_list)
开发者ID:hjkgrp,项目名称:molSimplify,代码行数:25,代码来源:ensemble_test.py


示例12: generateTrainAndTest

    def generateTrainAndTest(self):
        """
        Generate train and test data and then yield
        :return:
        """
        partitions = np.array_split(self.dataset, self.numOfFolds)
        labels_partitions = np.array_split(self.labels, self.numOfFolds)
        for fold in range(self.numOfFolds):
            self.test = partitions[fold]
            self.labels_test = labels_partitions[fold]

            fold_left = partitions[:fold]
            fold_right = partitions[fold + 1:]

            labels_fold_left = labels_partitions[:fold]
            labels_fold_right = labels_partitions[fold + 1:]

            if fold_left.__len__() == 0:
                self.train = np.concatenate(fold_right)
                self.labels_train = np.concatenate(labels_fold_right)
            elif fold_right.__len__() == 0:
                self.train = np.concatenate(fold_left)
                self.labels_train = np.concatenate(labels_fold_left)
            else:
                self.train = np.concatenate((np.concatenate(fold_left), np.concatenate(fold_right)))
                self.labels_train = np.concatenate(
                        (np.concatenate(labels_fold_left), np.concatenate(labels_fold_right)))
            yield
开发者ID:mehtadeepen,项目名称:Image-Classification-in-ML,代码行数:28,代码来源:CrossValidation.py


示例13: get_gradient

def get_gradient(theta):
    global fractional_counts, event_index, feature_index, event_grad, rc, N
    assert len(theta) == len(feature_index)
    event_grad = {}
    cpu_count = multiprocessing.cpu_count()
    pool = Pool(processes=cpu_count)  # uses all available CPUs
    batches_fractional_counts = np.array_split(range(len(event_index)), cpu_count)
    events_to_split = events_to_features.keys()
    batches_events_to_features = np.array_split(events_to_split, cpu_count)
    # for batch_of_fc in batches_fractional_counts:
    for batch_of_fc in batches_events_to_features:
        pool.apply_async(batch_gradient, args=(theta, batch_of_fc), callback=batch_accumilate_gradient)
    pool.close()
    pool.join()
    # grad = np.zeros_like(theta)
    grad = -2 * rc * theta  # l2 regularization with lambda 0.5
    for e in event_grad:
        feats = events_to_features.get(e, [])
        for f in feats:
            grad[feature_index[f]] += event_grad[e]

    # for s in seen_index:
    # grad[s] += -theta[s]  # l2 regularization with lambda 0.5
    assert len(grad) == len(feature_index)
    return -grad
开发者ID:arendu,项目名称:Featurized-Word-Alignment,代码行数:25,代码来源:hybrid_model1_mp.py


示例14: make_batches

def make_batches(x, y, batch_size=128, shuffle=True, nest=True):
    for i in range(len(x)):
        x[i] = atleast_4d(x[i])
    y = atleast_4d(y)
    num_batches = (y.shape[0] // batch_size)
    if y.shape[0] % batch_size is not 0:
        num_batches += 1
    if shuffle:
        shuffled_arrays = sk.utils.shuffle(*x, y)
        x = shuffled_arrays[:len(x)]
        y = shuffled_arrays[-1]
    x_batches_list = []
    for i in range(len(x)):
        x_batches_list.append(np.array_split(x[i], num_batches))
    if nest:
        x_batches = []
        for i in range(num_batches):
            x_batch = []
            for x_input in x_batches_list:
                x_batch.append(x_input[i])
            x_batches.append(x_batch)
    else:
        x_batches = x_batches_list
    y_batches = np.array_split(y, num_batches)
    return x_batches, y_batches, num_batches
开发者ID:wanqizhu,项目名称:Neural-Network-Dev,代码行数:25,代码来源:Core.py


示例15: ModelSelectionTest01

def ModelSelectionTest01():
	from sklearn import datasets, svm
	import numpy as np
	digits = datasets.load_digits()
	X_digits = digits.data
	Y_digits = digits.target
	svc = svm.SVC(C = 1, kernel = 'linear')
	score = svc.fit(X_digits[:-100], Y_digits[:-100]).score(X_digits[-100:], Y_digits[-100:])

	#print score

	X_folds = np.array_split(X_digits, 3)
	Y_folds = np.array_split(Y_digits, 3)

	#print len(X_folds[0])

	scores = list()

	for k in range(3):
		X_train = list(X_folds) #这里的X_folds是一个具有3个元素的list
		X_test = X_train.pop(k) #test是train的第K个元素
		X_train = np.concatenate(X_train) #这里是把X_train减去X_test
		#print len(X_train)
		Y_train = list(Y_folds)
		Y_test = Y_train.pop(k)
		Y_train = np.concatenate(Y_train)

		scores.append(svc.fit(X_train, Y_train).score(X_test, Y_test))

	#print scores


	from sklearn import cross_validation
	k_fold = cross_validation.KFold(n = 6, n_folds = 3)
	for train_indices, test_indices in k_fold:
		print train_indices, test_indices

	k_fold = cross_validation.KFold(len(X_digits), n_folds = 3)
	scores = [svc.fit(X_digits[train], Y_digits[train]).score(X_digits[test], Y_digits[test]) for train , test in k_fold]

	#print scores

	scores = cross_validation.cross_val_score(svc, X_digits, Y_digits, cv = k_fold, n_jobs = 1)
	#print scores

	from sklearn.grid_search import GridSearchCV
	gammas = np.logspace(-6, -1, 10)
	clf = GridSearchCV(estimator = svc, param_grid = dict(gamma = gammas), n_jobs = 1)
	clf.fit(X_digits[:1000], Y_digits[:1000])
	print clf.best_score_
	print clf.best_estimator_.gamma

	from sklearn import linear_model, datasets
	lasso = linear_model.LassoCV()    #这里的lassoCV和lasso有什么区别?
	diabetes = datasets.load_diabetes()
	X_diabetes = diabetes.data
	Y_diabetes = diabetes.target
	lasso.fit(X_diabetes, Y_diabetes)

	print lasso.alpha_
开发者ID:hyliu0302,项目名称:scikit-learn-notes,代码行数:60,代码来源:myScikitLearnFcns.py


示例16: schedule_generator_maintenance_loop

def schedule_generator_maintenance_loop(load, pmaxs, annual_maintenance_rates, dispatch_periods, scheduling_order):
    # if nothing else, better to schedule the large generators first
    scheduling_order = np.argsort(-pmaxs) if scheduling_order is None else scheduling_order

    # annual maintenance rates must be between zero and one
    annual_maintenance_rates = np.clip(annual_maintenance_rates, 0, 1)

    # gives the index for the change between dispatch_periods
    group_cuts = list(np.where(np.diff(dispatch_periods) != 0)[0] + 1) if dispatch_periods is not None else None
    group_lengths = np.array([group_cuts[0]] + list(np.diff(group_cuts)) + [len(load) - group_cuts[-1]])
    num_groups = len(group_cuts) + 1

    # necessary to scale load in some cases for the optimization to work. Basically, load shouldn't be > gen
    load_scaled = scale_load_to_system(load, pmaxs)
    load_scaled = np.concatenate([[np.max(ls)]*gl for gl, ls in zip(group_lengths, np.array_split(load_scaled, np.array(group_cuts)))])

    pmaxs_clipped = copy.deepcopy(pmaxs)
    pmaxs_clipped = np.clip(pmaxs_clipped, 1e-1, None)
    maintenance_energy = annual_maintenance_rates*pmaxs_clipped*len(load)
    scheduled_maintenance = np.zeros((num_groups, len(pmaxs)))

    # loop through and schedule maintenance for each generator one at a time. Update the net load after each one.
    for i in scheduling_order:
        energy_allocation = dispatch_budget.dispatch_to_energy_budget(load_scaled, -maintenance_energy[i], pmins=0, pmaxs=pmaxs_clipped[i])
        scheduled_maintenance[:, i] = np.clip(np.array([np.mean(ls) for ls in np.array_split(energy_allocation, np.array(group_cuts))])/pmaxs_clipped[i], 0, 1)
        load_scaled += np.concatenate([[sm * pmaxs[i]]*gl for gl, sm in zip(group_lengths, scheduled_maintenance[:, i])])

    if not all(np.isclose(annual_maintenance_rates, (scheduled_maintenance.T * group_lengths).sum(axis=1)/len(load))):
        logging.warning("scheduled maintance rates don't all match the annual maintenance rates")
    return scheduled_maintenance
开发者ID:anamileva,项目名称:energyPATHWAYS,代码行数:30,代码来源:dispatch_maintenance.py


示例17: lorenz_example

def lorenz_example():

    sigma = 10
    rho = 28
    beta = 8.0/3
    theta = 3 * np.pi / 4

    def lorenz(xyz, t):
        x, y, z = xyz
        x_dot = sigma * (y - x)
        y_dot = x * rho - x * z - y
        z_dot = x * y - beta* z
        return [x_dot, y_dot, z_dot]

    initial = (-10, -7, 35)
    t = np.arange(0, 100, 0.006)

    solution = odeint(lorenz, initial, t)

    x = solution[:, 0]
    y = solution[:, 1]
    z = solution[:, 2]
    xprime = np.cos(theta) * x - np.sin(theta) * y

    colors = ["#C6DBEF", "#9ECAE1", "#6BAED6", "#4292C6", "#2171B5", "#08519C", "#08306B",]

    output_file("lorenz.html", title="lorenz.py example")

    multi_line(np.array_split(xprime, 7), np.array_split(z, 7),
               line_color=colors, line_alpha=0.8, line_width=1.5,
               tools="pan,zoom,resize", title="lorenz example", name="lorenz_example")

    return curplot()
开发者ID:luisgustavoneves,项目名称:bokeh,代码行数:33,代码来源:lorenz.py


示例18: make_batches

def make_batches(X, y, batch_size=128, shuffle=True, nest=True):
    for i in range(len(X)):
        X[i] = atleast_4d(X[i])
    y = atleast_4d(y)
    num_batches = (y.shape[0] // batch_size)
    if y.shape[0] % batch_size is not 0:
        num_batches += 1
    if shuffle:
        shuffled_arrays = sk.utils.shuffle(*X, y)
        X = shuffled_arrays[:len(X)]
        y = shuffled_arrays[-1]
    X_batches_list = []
    for i in range(len(X)):
        X_batches_list.append(np.array_split(X[i], num_batches))
    if nest:
        X_batches = []
        for i in range(num_batches):
            X_batch = []
            for X_input in X_batches_list:
                X_batch.append(X_input[i])
            X_batches.append(X_batch)
    else:
        X_batches = X_batches_list
    y_batches = np.array_split(y, num_batches)
    return X_batches, y_batches, num_batches
开发者ID:agajews,项目名称:Neural-Network-Dev,代码行数:25,代码来源:Core.py


示例19: _setup_grids_

def _setup_grids_(mf, dm):
    mol = mf.mol
    grids = mf.grids

    if rank == 0:
        grids.build(with_non0tab=False)
        grids.coords = numpy.array_split(grids.coords, mpi.pool.size)
        grids.weights = numpy.array_split(grids.weights, mpi.pool.size)
    grids.coords = mpi.scatter(grids.coords)
    grids.weights = mpi.scatter(grids.weights)

    ground_state = (isinstance(dm, numpy.ndarray) and dm.ndim == 2)
    if mf.small_rho_cutoff > 1e-20 and ground_state:
        rho = mf._numint.get_rho(mol, dm, grids, mf.max_memory)
        n = comm.allreduce(numpy.dot(rho, grids.weights))
        if abs(n-mol.nelectron) < rks.NELEC_ERROR_TOL*n:
            rw = mpi.gather(rho * grids.weights)
            idx = abs(rw) > mf.small_rho_cutoff / grids.weights.size
            logger.alldebug1(mf, 'Drop grids %d',
                             grids.weights.size - numpy.count_nonzero(idx))
            grids.coords  = numpy.asarray(grids.coords [idx], order='C')
            grids.weights = numpy.asarray(grids.weights[idx], order='C')

    grids.non0tab = grids.make_mask(mol, grids.coords)

    return grids
开发者ID:sunqm,项目名称:mpi4pyscf,代码行数:26,代码来源:rks.py


示例20: RSM

 def RSM(self,avgl,rossete = 4,loopdist='gaussian'):
     x = np.arange(1,self.N)
     pickpdist = pdist()
     if loopdist == 'gaussian':
         cdf = np.cumsum(pickpdist.gaussian(avgl)(x))
         
     while True:
         self.looplst = x[np.searchsorted(cdf,np.random.random(self.M))]
         if np.sum(self.looplst) < self.N - 1:
             break
     
     rest = self.N-1-np.sum(self.looplst)
     temp = np.array_split(np.arange(rest),3*rossete)
     anchor = []
     for i in range(rossete):
         temp3 = []
         temp3.append(np.random.choice(temp[i*3+1],1)[0])
         for j in np.arange([len(np.array_split(np.arange(self.M),rossete)[k]) for k in range(rossete)][i]-1):
             temp3.append(temp3[-1]+np.random.randint(1,5))
         anchor.append(temp3)
     
     anchor = np.array(anchor)
     anchor = anchor.flatten()
     temp1 = anchor + np.cumsum(self.looplst)
     temp2 = temp1 - self.looplst
     self.pair = np.array(zip(temp2,temp1))
     
     return self.looplst,self.pair
开发者ID:anyuzx,项目名称:polymer_crowder,代码行数:28,代码来源:loopmodel.py



注:本文中的numpy.array_split函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.array_str函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.array_repr函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap