• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python joblib.delayed函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sklearn.externals.joblib.delayed函数的典型用法代码示例。如果您正苦于以下问题:Python delayed函数的具体用法?Python delayed怎么用?Python delayed使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了delayed函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: batch_update

def batch_update(parallel, data, row, col):
    U_ = U[row, :]
    V_ = V[col, :]
    bu_ = bu[row]
    bm_ = bm[col]


    du = parallel(delayed(gred_u)(data[i], U_[i,:], V_[i,:], bu_[i], bm_[i], avg, C) for i in xrange(len(data)))
    dv = parallel(delayed(gred_v)(data[i], U_[i,:], V_[i,:], bu_[i], bm_[i], avg, C) for i in xrange(len(data)))
    dbu = parallel(delayed(gred_bu)(data[i], U_[i,:], V_[i,:], bu_[i], bm_[i], avg, C) for i in xrange(len(data)))
    dbm = parallel(delayed(gred_bm)(data[i], U_[i,:], V_[i,:], bu_[i], bm_[i], avg, C) for i in xrange(len(data)))
    if method=='sgd':
        for i in xrange(len(data)):
            U_[i,:] -= eta*du[i]
            V_[i,:] -= eta*dv[i]
            bu_[i] -= eta*dbu[i]
            bm_[i] -= eta*dbm[i]
        for c, i in enumerate(row):
            U[i,:]=U_[c,:]
            bu[i]=bu_[c]
        for c, j in enumerate(col):
            V[j,:]=V_[c,:]
            bm[j]=bm_[c]
    elif method=='adagrad':
        for c, i in enumerate(row):
            gdu[i] += np.dot(du[c], du[c])
            gdbu[i] += np.dot(dbu[c], dbu[c])
            U[i,:]-=eta*du[c]/sqrt(gdu[i]+epislon)
            bu[i]-=eta*dbu[c]/sqrt(gdbu[i]+epislon)
        for c, j in enumerate(col):
            gdv[j] += np.dot(dv[c], dv[c])
            gdbm[j] += np.dot(dbm[c], dbm[c])
            V[j,:]-=eta*dv[c]/sqrt(gdv[i]+epislon)
            bm[j]-=eta*dbm[c]/sqrt(gdbm[i]+epislon)
开发者ID:wattlebird,项目名称:predictor,代码行数:34,代码来源:gradientdescent.py


示例2: orig_main

def orig_main():
    if len(sys.argv) == 4:
        path, adjective, n_jobs = sys.argv[1:]
        n_jobs = int(n_jobs)
        print "Training the adjective %s for the phase %s" % (
                adjective)

        loaded_features = load_adjective_phase(path)
        p = Parallel(n_jobs=n_jobs,verbose=10)
        p(delayed(orig_train_adjective_phase_classifier)(path, adjective, loaded_features))

    elif len(sys.argv) == 3:
        path, n_jobs = sys.argv[1:]
        n_jobs = int(n_jobs)
        print "Training the all adjectives"
        loaded_features = load_adjective_phase(path)
 
        p = Parallel(n_jobs=n_jobs,verbose=10)
        p(delayed(orig_train_adjective_phase_classifier)(path, adjective, loaded_features) 
            for adjective in adjectives)
                                                      
    else:
        print "Usage:"
        print "%s path adjective n_jobs" % sys.argv[0]
        print "%s path n_jobs" % sys.argv[0]
        print "Path to the base directory"
开发者ID:IanTheEngineer,项目名称:Penn-haptics-bolt,代码行数:26,代码来源:train_adjectives_features.py


示例3: executeWithStart

    def executeWithStart(self, desc, function, data, *args, **kwargs):
        #Splitting task
        tSplitter = TaskSplitter()
        nbJobs, splittedData, starts = tSplitter.partition(self._nbParal, data)

        #Logging
        self.setTask(1, ("Starting parallelization : "+desc))

        #Parallelization
        parallelizer = Parallel(n_jobs=nbJobs, temp_folder=self._tmpFolder,
                                verbose=self.verbosity,)

        if len(args) == 0:
            if len(kwargs) == 0:
                allData = parallelizer(delayed(function)(
                    splittedData[i], startIndex=starts[i])
                    for i in xrange(nbJobs))
            else:
                allData = parallelizer(delayed(function)(
                    splittedData[i], startIndex=starts[i], **kwargs)
                    for i in xrange(nbJobs))

        elif len(kwargs) == 0:
            allData = parallelizer(delayed(function)(
                splittedData[i], startIndex=starts[i], *args)
                for i in xrange(nbJobs))

        else:
            allData = parallelizer(delayed(function)(
                splittedData[i], startIndex=starts[i], *args, **kwargs)
                for i in xrange(nbJobs))

        self.endTask()

        return allData
开发者ID:jm-begon,项目名称:masterthesis,代码行数:35,代码来源:TaskManager.py


示例4: warmstart_all_parallel

def warmstart_all_parallel(x, y, x_test, y_test, fname_in='results_softmax_regression_mnist', fname_out='results_softmax_regression_warmstart_mnist', model_type='softmax_regression', w_diff_term_crit=0.0001, learning_rate=0.0001, regularizations = [100., 10., 1., 0.1, 0.01, 0.001, 0.]):
    pretrained_models = pickle.load(open(fname_in, 'rb'))
    if model_type == 'softmax_regression':
        #previous_loss_train=None, previous_regularization_penalty_train=None
        results = joblib.Parallel(n_jobs=47)(delayed(tf_softmax_regression.train_softmax)
                                             (
                                             x, y, x_test, y_test, learning_rate=learning_rate, max_iterations=1000000,
                                             w_diff_term_crit=w_diff_term_crit, verbose=True,
                                             regularization=regularizations[target_i],
                                             model=pretrained_models[init_i]['model'],
                                             regularization_initialization=pretrained_models[init_i]['regularization'],
                                             previous_loss_train=pretrained_models[init_i]['loss_train'],
                                             previous_regularization_penalty_train=pretrained_models[init_i]['regularization_penalty_train']
                                         ) for target_i in xrange(0, len(regularizations))
                                           for init_i in xrange(0, len(pretrained_models))
                                         )
    elif model_type == 'linear_regression':
        results = joblib.Parallel(n_jobs=47)(delayed(tf_linear_regression.train)
                                                 (
                                                 x, y, x_test, y_test, learning_rate=learning_rate, max_iterations=1000000,
                                                 w_diff_term_crit=w_diff_term_crit, verbose=True,
                                                 regularization=regularizations[target_i],
                                                 model=pretrained_models[init_i]['model'],
                                                 regularization_initialization=pretrained_models[init_i][
                                                     'regularization']
                                             ) for target_i in xrange(0, len(regularizations))
                                             for init_i in xrange(0, len(pretrained_models))
                                             )
    pickle.dump(results, open(fname_out, 'wb'))
开发者ID:nikste,项目名称:hyperparameter_fault_tolerance,代码行数:29,代码来源:experiments.py


示例5: predict_

    def predict_(self, X, probability=False):
        """Predict class for X.

        The predicted class of an input sample is a vote by the individual searchlights.

        Parameters
        ----------
        X : array-like or sparse matrix of shape = [n_samples, n_features]
            The input samples. Internally, it will be converted to
            ``dtype=np.float32`` and if a sparse matrix is provided
            to a sparse ``csr_matrix``.

        Returns
        -------
        y : array of shape = [n_samples] or [n_samples, n_outputs]
            The predicted classes.
        """

        # votes = []
        # for v in range(self.n_best):
        #     votes += [self.estimators_[v].predict(np.array([x.get_data()[self.best_spheres[v]] for x in X]))]

        if not isinstance(X, dict):
            raise ValueError("X has to be a dict")

        if self.base_estimator._estimator_type == "searchlight_ensemble":
            self.votes = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(
                delayed(_vote)(e, X[roi_id][0], probability) for roi_id, e in self.estimators_.items()
            )
        else:
            self.votes = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(
                delayed(_vote)(e, X[roi_id], probability) for roi_id, e in self.estimators_.items()
            )

        self.votes_pooled = np.array(self.votes).swapaxes(0, 1).dot(self.vote_weighting) / sum(self.vote_weighting)
开发者ID:m-guggenmos,项目名称:decog,代码行数:35,代码来源:roi_ensemble.py


示例6: get_split_scores

def get_split_scores(factory,thresholds,formula,
                     metric = None,#p.e. usability entropy
                     use_joblib = False,
                     joblib_backend = 'threading',
                     n_jobs = -1,
                     min_events_fraction_leaf = 0.,verbose = False):

    if metric == None:
        metric = penalized_usability_entropy
    if min_events_fraction_leaf <=1:
        min_events_fraction_leaf = int(min_events_fraction_leaf*sum(factory.weights))
    if verbose:
        print min_events_fraction_leaf, sum(factory.weights)

    if not use_joblib:
        scores = np.repeat(float("inf"),len(thresholds))
        for i,(feature,cut,_) in enumerate(thresholds):
            predicate =  (factory.events[:,feature] > cut)

            #skip the edge cases... (inf penalty)
            if np.all(predicate) or (not np.any(predicate)):
                #if this split does not split, fuggedaboutit
                continue 
            if min_events_fraction_leaf>0:
                #get rid of too uneven a cuts
                sum_weight = np.sum(factory.weights)
                true_weight = np.sum(factory.weights[predicate])
                false_weight = sum_weight - true_weight
                if true_weight < min_events_fraction_leaf or false_weight < min_events_fraction_leaf:
                    if verbose: print "t:",true_weight,"f:",false_weight, "discarded"
                    continue
                if verbose: print "t:",true_weight,"f:",false_weight, "passed"
            #compute score
            subFactories = factory.split_by(predicate)
            scores[i] = metric(formula,*subFactories)
    else:
        if n_jobs < 0:
            n_jobs = joblib.cpu_count() +1 - n_jobs
       
        indices = [0]+[len(thresholds)*(i+1)/n_jobs for i in range(n_jobs)]
        thresholdSections = [thresholds[indices[i]:indices[i+1]] for i in range(n_jobs)]
        
        if joblib_backend == 'threading':
            factory = [deepcopy(factory) for i in range(n_jobs)]
            formula = [deepcopy(formula) for i in range(n_jobs)]
            metric = [deepcopy(metric) for i in range(n_jobs)] #in case it has some internal data
            
            jobs = (joblib.delayed(get_split_scores)(factory[i],thresholdSection, formula[i],
                                                 metric=metric[i],use_joblib = False,
                                                 min_events_fraction_leaf = min_events_fraction_leaf,
                                                 verbose = verbose)
                                    for i,thresholdSection in enumerate(thresholdSections))
        else:
            jobs = (joblib.delayed(get_split_scores)(factory,thresholdSection, formula,
                                                 metric=metric,use_joblib = False,
                                                 min_events_fraction_leaf = min_events_fraction_leaf,
                                                 verbose = verbose)
                                    for thresholdSection in thresholdSections)
        scores = np.hstack(joblib.Parallel(n_jobs = n_jobs, backend = joblib_backend)(jobs))
    return scores
开发者ID:justheuristic,项目名称:pruner,代码行数:60,代码来源:alt_hierarchy.py


示例7: fit

    def fit(self, imgs, y=None, confounds=None):
        """Compute the mask and the ICA maps across subjects

        Parameters
        ----------
        imgs: list of Niimg-like objects
            See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.
            Data on which PCA must be calculated. If this is a list,
            the affine is considered the same for all.

        confounds: CSV file path or 2D matrix
            This parameter is passed to nilearn.signal.clean. Please see the
            related documentation for details
        """
        MultiPCA.fit(self, imgs, y=y, confounds=confounds)
        random_state = check_random_state(self.random_state)

        seeds = random_state.randint(np.iinfo(np.int32).max, size=self.n_init)
        if (LooseVersion(sklearn.__version__).version > [0, 12]):
            # random_state in fastica was added in 0.13
            results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
                delayed(fastica)(self.components_.T,
                    whiten=True, fun='cube', random_state=seed)
                for seed in seeds)
        else:
            results = Parallel(n_jobs=1, verbose=self.verbose)(
                delayed(fastica)(self.components_.T, whiten=True, fun='cube')
                for seed in seeds)

        ica_maps_gen_ = (result[2].T for result in results)
        ica_maps_and_sparsities = ((ica_map,
                                    np.sum(np.abs(ica_map), axis=1).max())
                                   for ica_map in ica_maps_gen_)
        ica_maps, _ = min(ica_maps_and_sparsities, key=itemgetter(-1))

        # Thresholding
        ratio = None
        if isinstance(self.threshold, float):
            ratio = self.threshold
        elif self.threshold == 'auto':
            ratio = 1.
        elif self.threshold is not None:
            raise ValueError("Threshold must be None, "
                             "'auto' or float. You provided %s." %
                             str(self.threshold))
        if ratio is not None:
            abs_ica_maps = np.abs(ica_maps)
            threshold = scoreatpercentile(
                abs_ica_maps,
                100. - (100. / len(ica_maps)) * ratio)
            ica_maps[abs_ica_maps < threshold] = 0.
        self.components_ = ica_maps

        # flip signs in each component so that peak is +ve
        for component in self.components_:
            if component.max() < -component.min():
                component *= -1

        return self
开发者ID:DavidDJChen,项目名称:nilearn,代码行数:59,代码来源:canica.py


示例8: plot_learning_curves_across_topics

def plot_learning_curves_across_topics(n_runs, start_idx, stop_idx, estimators_dict, comment=None):
  """
  TODO Most probably buggy
  """
  for topic_id, data in texts_vote_lists_truths_by_topic_id.iteritems():
    print 'Loading topic %s' % topic_id
    texts, vote_lists, truths = data
    n_documents = len(texts)

    vectorizer = TfidfVectorizer()
    tfidf = vectorizer.fit_transform(texts)
    text_similarity = cosine_similarity(tfidf)

    x = np.arange(start_idx, stop_idx)

    y_by_estimator = dict( (estimator, []) for estimator in estimators_dict.keys() )

    for estimator_name, estimator_and_args in estimators_dict.iteritems():
      print 'Calculating for %s' % estimator_name
      estimator, args, active_pars = estimator_and_args
      if active_pars is None:
        sequences = Parallel(n_jobs=4)( delayed(get_accuracy_sequence)(estimator, stop_idx, texts, 
          vote_lists, truths, text_similarity, idx, False, *args) for idx in xrange(n_runs) )
      else:
        sequences = Parallel(n_jobs=4)( delayed(get_accuracy_sequence_active)(estimator, stop_idx, texts, 
          vote_lists, truths, text_similarity, active_pars, idx, False, *args) for idx in xrange(n_runs) )      

      good_slices = [ s[start_idx:] for s in sequences if s is not None ]
      if good_slices:
        results = np.vstack(good_slices)

        begin_accuracies = results[:, 0]
        end_accuracies = results[:, -1]
        
        begin_accuracies.dump("pickles/%s-%s-begin-accuracies--.pkl" % (topic_id, estimator_name) )
        end_accuracies.dump("pickles/%s-%s-end-accuracies--.pkl" % (topic_id, estimator_name))

        # We will then need to vstack and avg though all the topic accuracies for each estimator
        y_by_estimator[estimator_name].append( np.mean(results, axis=0) )
      else:
        print 'Topic %s is not represented with estimator %s' % (topic_id, estimator_name)

    result_by_estimator = {}

    for estimator_name, mean_accuracy_sequences in y_by_estimator.iteritems():
      if mean_accuracy_sequences:
        to_avg = np.vstack(mean_accuracy_sequences)
        result_by_estimator[estimator_name] = np.mean(to_avg, axis=0)
      else:
        print "Nope"
  if comment:
    title = 'Across topics, %s runs, %s' % (n_runs, comment)
  else:
    title = 'Across topics, %s runs' % topic_id
  plot_learning_curve(title, x, result_by_estimator, 'Votes sampled', 'Accuracy')
开发者ID:piyushbansal,项目名称:ir-crowd-thesis,代码行数:55,代码来源:experiments.py


示例9: train_all_parallel

def train_all_parallel(x, y, x_test, y_test, fname='results_softmax_regression_mnist', model_type='softmax_regression', w_diff_term_crit=0.0001, learning_rate=0.0001, regularizations = [100., 10., 1., 0.1, 0.01, 0.001, 0.]):
    if model_type == 'softmax_regression':
        results = joblib.Parallel(n_jobs=47)(delayed( tf_softmax_regression.train_softmax)(
            x, y, x_test, y_test, learning_rate=learning_rate, max_iterations=1000000,
            regularization=regularizations[reg_i], w_diff_term_crit=w_diff_term_crit, verbose=True) for i_par in range(48) for reg_i in xrange(0, len(regularizations)))

    elif model_type == 'linear_regression':
        results = joblib.Parallel(n_jobs=47)(delayed(tf_linear_regression.train)(
            x, y, x_test, y_test, learning_rate=learning_rate, max_iterations=1000000,
            regularization=regularizations[reg_i], w_diff_term_crit=w_diff_term_crit, verbose=True) for i_par in range(48) for
                                             reg_i in xrange(0, len(regularizations)))

    pickle.dump(results, open(fname, 'wb'))
开发者ID:nikste,项目名称:hyperparameter_fault_tolerance,代码行数:13,代码来源:experiments.py


示例10: main

def main():
    """
    if len(sys.argv) == 6:
        database, path, adjective, phase, sensor = sys.argv[1:]
        train_single_dataset(database, path, adjective, phase, sensor)
    """
    if len(sys.argv) == 6:
        database, path, adjective, phase, n_jobs = sys.argv[1:]
        n_jobs = int(n_jobs)
        print "Training the adjectives %s and for phase %s" %(
            adjective, phase)
        p = Parallel(n_jobs=n_jobs,verbose=10)
        p(delayed(create_single_dataset)(database, path, adjective, phase))

    if len(sys.argv) == 5:
        database, path, adjective, n_jobs = sys.argv[1:]
        n_jobs = int(n_jobs)
        print "Training all the phases for adjective %s" %(
                    adjective)
        p = Parallel(n_jobs=n_jobs,verbose=10)
        p(delayed(create_single_dataset)(database, path, adjective, phase)
            for phase in itertools.product(phases))
            #    create_single_dataset(database, path, adjective, phase))

    elif len(sys.argv) == 3:
        database, path = sys.argv[1:]
        #n_jobs = int(n_jobs)
        print "Training all combinations of adjectives and phases"
        #p = Parallel(n_jobs=n_jobs,verbose=10)
        #p(delayed(create_single_dataset)(database, path, adjective, phase)
        #for adjective, phase in itertools.product(adjectives,
        #                                          phases))
        base_directory = path
        untrained_directory = os.path.join(base_directory, "untrained_adjectives")
        hmm_feature_directory = os.path.join(base_directory, "adjective_phase_set")
        check_dir(hmm_feature_directory)
        for adj_f in os.listdir(untrained_directory):
            full_adj_path = os.path.join(untrained_directory, adj_f)
            adj_obj = cPickle.load(open(full_adj_path))
            assert isinstance(adj_obj, AdjectiveClassifier)
            create_single_dataset(database, hmm_feature_directory, adj_obj)
        #    create_single_dataset(database, path, adjective, "some_phase")
    else:
        print "Usage:"
        print "%s database path adjective phase n_jobs" % sys.argv[0]
        print "%s database path adjective n_jobs" % sys.argv[0]
        print "%s database path" % sys.argv[0]
        print "Files will be saved in path/adjective_phase_set"
开发者ID:IanTheEngineer,项目名称:Penn-haptics-bolt,代码行数:48,代码来源:create_hmm_features_v2.py


示例11: fit

 def fit(self, X, y=None):
     transformer_idx_list = map(lambda trans, idx:(trans[0], trans[1], idx), self.transformer_list, self.idx_list)
     transformers = Parallel(n_jobs=self.n_jobs)(
         delayed(_fit_one_transformer)(trans, X[:,idx], y)
         for name, trans, idx in transformer_idx_list)
     self._update_transformer_list(transformers)
     return self
开发者ID:jasonfreak,项目名称:almsc,代码行数:7,代码来源:feature_union_ext.py


示例12: transform

    def transform(self, traj_list):
        """Transform traj_list separately by each transformer, concatenate results.

        Parameters
        ----------
        trajectories : list (of mdtraj.Trajectory objects)
            Trajectories to featurize

        Returns
        -------
        Y : list (of np.ndarray)
            Y[i] is the featurized version of X[i]
            Y[i] will have shape (n_samples_i, n_features), where
            n_samples_i is the length of trajectory i and n_features
            is the total (concatenated) number of features in the
            concatenated list of featurizers.

        """
        Xs = Parallel(n_jobs=self.n_jobs)(
            delayed(sklearn.pipeline._transform_one)(trans, name, traj_list, self.transformer_weights)
            for name, trans in self.transformer_list)

        X_i_stacked = [np.hstack([Xs[feature_ind][trj_ind] for feature_ind in range(len(Xs))]) for trj_ind in range(len(Xs[0]))]

        return X_i_stacked
开发者ID:jchodera,项目名称:mixtape,代码行数:25,代码来源:featurizer.py


示例13: decode_stash_parallel

def decode_stash_parallel(stash, penalty, label_map, num_cpus=NUM_CPUS,
                          **viterbi_args):
    """Apply Viterbi decoding over a stash in parallel.

    Parameters
    ----------
    stash : biggie.Stash
        Stash of fretboard posteriors.
    penalty : scalar
        Self-transition penalty.
    label_map : callable object
        Map from frets to string labels.
    num_cpus : int
        Number of CPUs to use in parallel.
    **viterbi_args, other args to pass to util.viterbi

    Returns
    -------
    annotset : dict of pyjams.RangeAnnotations
        Range annotations under the same keys as the input stash.
    """
    assert not __interactive__
    keys = stash.keys()
    pool = Parallel(n_jobs=num_cpus)
    decode = delayed(decode_fretboard)
    results = pool(decode(stash.get(k), penalty, label_map) for k in keys)
    return {k: r for k, r in zip(keys, results)}
开发者ID:agangzz,项目名称:dl4mir,代码行数:27,代码来源:decode.py


示例14: cross_val_predict_proba

def cross_val_predict_proba(
        estimator, X, y, scoring='roc_auc', cv=8, n_jobs=1,
        verbose=0, fit_params=None,
        pre_dispatch='2*n_jobs'):
    """ Predict probabilities using cross-validation.
    """
    if isinstance(cv, int):
        cv1 = cross_validation.StratifiedKFold(y, cv)
    else:
        cv1 = cv

    fit_params = fit_params if fit_params is not None else {}
    parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
                        pre_dispatch=pre_dispatch)
    results = parallel(
        delayed(_cross_val_predict)(clone(estimator), X, y, train, test,
                                    verbose, fit_params, proba=True)
        for train, test in cv1)
    y_pred = np.zeros(len(y))
    scores = []
    for (mask, y_p) in results:
        y_pred[mask] = y_p
        if scoring == 'roc_auc':
            y_test = y[mask]
            if len(np.unique(y_test)) > 1:
                scores.append(compute_auc(y_test, y_p))
                # scores.append(roc_auc_score(y_test, y_p))
    return np.asarray(y_pred), np.asarray(scores)
开发者ID:orazaro,项目名称:kgml,代码行数:28,代码来源:model_selection.py


示例15: cross_val_predict

def cross_val_predict(
        estimator, X, y, loss=None, cv=8, n_jobs=1,
        verbose=0, fit_params=None, proba=False,
        pre_dispatch='2*n_jobs'):
    """
    """
    if isinstance(cv, int):
        cv1 = cross_validation.StratifiedKFold(y, cv)
    else:
        cv1 = cv
    fit_params = fit_params if fit_params is not None else {}
    parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
                        pre_dispatch=pre_dispatch)
    results = parallel(
        delayed(_cross_val_predict)(clone(estimator), X, y, train, test,
                                    verbose, fit_params, proba)
        for train, test in cv1)
    y_pred = np.zeros(len(y))
    scores = []
    for (mask, y_p) in results:
        y_pred[mask] = y_p
        if loss:
            y_test = y[mask]
            scores.append(-loss(y_test, y_p))
    if loss:
        scores = np.asarray(scores)

    return np.asarray(y_pred), scores
开发者ID:orazaro,项目名称:kgml,代码行数:28,代码来源:model_selection.py


示例16: optimal_allocation_with_skopt

def optimal_allocation_with_skopt(t, X, Y, n=10, n_parallel=4, const_income=True):
    # [0, 1]
    nn = 2
    opt_fun = _fun
    if const_income:
        nn = 1
        opt_fun = _fun_constant_income
    dimensions = [Real(0, 1)] * nn * (t - 1)
    optimizer = skopt.Optimizer(
            dimensions, base_estimator='gp', random_state=1
            # n_random_starts=None, n_initial_points=10, acq_func='gp_hedge', acq_optimizer='auto', acq_func_kwargs=None, acq_optimizer_kwargs=None
            )
    # fun = functools.partial(_fun, t, X, Y)
    fun = opt_fun(t, X, Y)
    if n_parallel <= 1:
        print('not parallel')
        for i in range(n):
                suggested = optimizer.ask()
                y = fun(suggested)
                optimizer.tell(suggested, y)
                print('iteration: {}, {}, {}'.format(i, suggested, y))
    else:
        # something not working here
        print('parallel')
        n_left = n
        for i in range(0, n, max(n_parallel, 1)):
            suggested = optimizer.ask(n_points=min(n_left, n_parallel))
            n_left -= n_parallel
            print(n_left)
            y = Parallel()(delayed(fun)(x) for x in suggested)
            optimizer.tell(suggested, y)
            print('iteration: {}, {}, {}, {}'.format(i, suggested, y, action_to_zeroone(np.array(suggested))))
    print('min is', min(optimizer.yi))
    return optimizer
开发者ID:cottrell,项目名称:notebooks,代码行数:34,代码来源:example3.py


示例17: fit

    def fit(self, data, Y=None):
        if hasattr(data, 'copy'):
            # It's an array
            data = data.copy()
        else:
            # Probably a list
            data = copy.deepcopy(data)

        memory = self.memory
        if isinstance(memory, basestring):
            memory = Memory(cachedir=memory)

        pcas = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
            delayed(_subject_pca)(subject_data,
                                 n_components=self.n_components, mem=memory)
            for subject_data in data)
        pcas = np.concatenate(pcas, axis=1)

        ica_maps = self._find_high_kurtosis(pcas, memory)

        del pcas
        self.maps_ = ica_maps
        if not self.maps_only:
            # Relearn the time series
            self.learn_from_maps(data)

        return self
开发者ID:jmargeta,项目名称:tutorial,代码行数:27,代码来源:canica.py


示例18: prepare_merge_jobs

 def prepare_merge_jobs(self, results):
     result_groups = grouper(results, self.split_bins)
     merge_jobs = []
     for result_group in result_groups:
         result_group = list(result_group)
         merge_jobs.append(joblib.delayed(self.load_and_merge_results_job)(result_group))
     return merge_jobs
开发者ID:chatto-hub-test2,项目名称:Spaceboy5,代码行数:7,代码来源:parallel_transform.py


示例19: _intra_cluster_distances_block

def _intra_cluster_distances_block(X, labels, metric, n_jobs=1, **kwds):
    """Calculate the mean intra-cluster distance for sample i.
 
    Parameters
    ----------
    X : array [n_samples_a, n_features]
        Feature array.
 
    labels : array, shape = [n_samples]
        label values for each sample
 
    metric : string, or callable
        The metric to use when calculating distance between instances in a
        feature array. If metric is a string, it must be one of the options
        allowed by metrics.pairwise.pairwise_distances. If X is the distance
        array itself, use "precomputed" as the metric.
 
    `**kwds` : optional keyword parameters
        Any further parameters are passed directly to the distance function.
        If using a scipy.spatial.distance metric, the parameters are still
        metric dependent. See the scipy docs for usage examples.
 
    Returns
    -------
    a : array [n_samples_a]
        Mean intra-cluster distance
    """
    intra_dist = np.zeros(labels.size, dtype=float)
    values = Parallel(n_jobs=n_jobs)(
            delayed(_intra_cluster_distances_block_)
                (X[np.where(labels == label)[0]], metric, **kwds)
                for label in np.unique(labels))
    for label, values_ in zip(np.unique(labels), values):
        intra_dist[np.where(labels == label)[0]] = values_
    return intra_dist
开发者ID:baothien,项目名称:tiensy,代码行数:35,代码来源:silhouette_score_modified_parallel.py


示例20: _parallel_learning

    def _parallel_learning(self, X, Y, w):
        n_samples = len(X)
        objective, positive_slacks = 0, 0
        verbose = max(0, self.verbose - 3)
        if self.batch_size is not None:
            raise ValueError("If n_jobs != 1, batch_size needs to" "be None")
        # generate batches of size n_jobs
        # to speed up inference
        if self.n_jobs == -1:
            n_jobs = cpu_count()
        else:
            n_jobs = self.n_jobs

        n_batches = int(np.ceil(float(len(X)) / n_jobs))
        slices = gen_even_slices(n_samples, n_batches)
        for batch in slices:
            X_b = X[batch]
            Y_b = Y[batch]
            candidate_constraints = Parallel(n_jobs=self.n_jobs, verbose=verbose)(
                delayed(find_constraint)(self.model, x, y, w) for x, y in zip(X_b, Y_b)
            )
            dpsi = np.zeros(self.model.size_psi)
            for x, y, constraint in zip(X_b, Y_b, candidate_constraints):
                y_hat, delta_psi, slack, loss = constraint
                if slack > 0:
                    objective += slack
                    dpsi += delta_psi
                    positive_slacks += 1
            w = self._solve_subgradient(dpsi, n_samples, w)
        return objective, positive_slacks, w
开发者ID:huyng,项目名称:pystruct,代码行数:30,代码来源:subgradient_ssvm.py



注:本文中的sklearn.externals.joblib.delayed函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python joblib.dump函数代码示例发布时间:2022-05-27
下一篇:
Python joblib.cpu_count函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap