• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.full函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.full函数的典型用法代码示例。如果您正苦于以下问题:Python full函数的具体用法?Python full怎么用?Python full使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了full函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: load_model

def load_model(nbins_sfh=7,sigma=0.3,df=2.,agelims=None,objname=None, **extras):

    # we'll need this to access specific model parameters
    n = [p['name'] for p in model_params]

    # replace nbins_sfh
    nbins_sfh = 4 + (int(objname)-1) / 9

    # create SFH bins
    zred = model_params[n.index('zred')]['init']
    tuniv = WMAP9.age(zred).value

    # now construct the nonparametric SFH
    # current scheme: six bins, four spaced equally in logarithmic 
    # last bin is 15% age of the Universe, first two are 0-30, 30-100
    tbinmax = (tuniv*0.85)*1e9
    agelims = agelims[:2] + np.linspace(agelims[2],np.log10(tbinmax),nbins_sfh-2).tolist() + [np.log10(tuniv*1e9)]
    agebins = np.array([agelims[:-1], agelims[1:]])

    # load nvariables and agebins
    model_params[n.index('agebins')]['N'] = nbins_sfh
    model_params[n.index('agebins')]['init'] = agebins.T
    model_params[n.index('mass')]['N'] = nbins_sfh
    model_params[n.index('logsfr_ratios')]['N'] = nbins_sfh-1
    model_params[n.index('logsfr_ratios')]['init'] = np.full(nbins_sfh-1,0.0) # constant SFH
    model_params[n.index('logsfr_ratios')]['prior'] = priors.StudentT(mean=np.full(nbins_sfh-1,0.0),
                                                                      scale=np.full(nbins_sfh-1,sigma),
                                                                      df=np.full(nbins_sfh-1,df))

    return sedmodel.SedModel(model_params)
开发者ID:jrleja,项目名称:threedhst_bsfh,代码行数:30,代码来源:mock_timebin_params.py


示例2: testMakeTableExceptions

  def testMakeTableExceptions(self):
    # Verify that contents is being type-checked and shape-checked.
    with self.assertRaises(ValueError):
      text_plugin.make_table([])

    with self.assertRaises(ValueError):
      text_plugin.make_table('foo')

    with self.assertRaises(ValueError):
      invalid_shape = np.full((3, 3, 3), 'nope', dtype=np.dtype('S3'))
      text_plugin.make_table(invalid_shape)

    # Test headers exceptions in 2d array case.
    test_array = np.full((3, 3), 'foo', dtype=np.dtype('S3'))
    with self.assertRaises(ValueError):
      # Headers is wrong type.
      text_plugin.make_table(test_array, headers='foo')
    with self.assertRaises(ValueError):
      # Too many headers.
      text_plugin.make_table(test_array, headers=['foo', 'bar', 'zod', 'zoink'])
    with self.assertRaises(ValueError):
      # headers is 2d
      text_plugin.make_table(test_array, headers=test_array)

    # Also make sure the column counting logic works in the 1d array case.
    test_array = np.array(['foo', 'bar', 'zod'])
    with self.assertRaises(ValueError):
      # Too many headers.
      text_plugin.make_table(test_array, headers=test_array)
开发者ID:jtagscherer,项目名称:tensorboard,代码行数:29,代码来源:text_plugin_test.py


示例3: crossval_predict

def crossval_predict(predictor, X, y, prefix, n_cv=5):
    if not np.array_equal(predictor.classes_, [0, 1]):
        raise Exception("classes labels NOT match")

    can_pred_proba = common.can_predict_probability(predictor)

    n_samples = X.shape[0]
    print "totally {} samples, divided into {} folds".format(n_samples, n_cv)

    if can_pred_proba:
        datas = np.full((n_samples, 2), np.NaN)
        headers = ["{}_{}".format(prefix, t) for t in ["proba", "log_proba"]]
        yvalidates = pd.DataFrame(datas, columns=headers, index=y.index)
    else:
        datas = np.full((n_samples, 1), np.NaN)
        header = "{}_label".format(prefix)
        yvalidates = pd.DataFrame(datas, columns=[header], index=y.index)

    folds = StratifiedKFold(y, n_folds=n_cv, shuffle=True, random_state=seed)
    for index, (train_index, test_index) in enumerate(folds):
        Xtrain, Xtest = X[train_index], X[test_index]
        ytrain, ytest = y[train_index], y[test_index]

        predictor.fit(Xtrain, ytrain)
        if can_pred_proba:
            ytest_probas = predictor.predict_proba(Xtest)
            pos_proba = ytest_probas[:, 1]  # probability for label=1 (Positive)
            yvalidates.iloc[test_index, 0] = pos_proba
            yvalidates.iloc[test_index, 1] = np.log(pos_proba)
        else:
            yvalidates.iloc[test_index, 0] = predictor.predict(Xtest)

        print "====== cross-validated on {}-fold ======".format(index + 1)

    return yvalidates
开发者ID:stasi009,项目名称:MyKaggle,代码行数:35,代码来源:sentiment_base_sklearn.py


示例4: run

def run(oiter):
    # ----- Variable for this run -----
    log_alpha_0 = all_log_alpha_0[oiter]

    print "Running job {0} on {1}".format(oiter + 1, socket.gethostname())
    train_images, train_labels, _, _, _ = load_data()
    train_images = train_images[:N_data, :]
    train_labels = train_labels[:N_data, :]
    batch_idxs = BatchList(N_data, batch_size)
    iter_per_epoch = len(batch_idxs)
    N_weights, _, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)
    def indexed_loss_fun(w, idxs):
        return loss_fun(w, X=train_images[idxs], T=train_labels[idxs])

    V0 = npr.randn(N_weights) * velocity_scale
    losses = []
    d_losses = []
    alpha_0 = np.exp(log_alpha_0)
    for N_iters in all_N_iters:
        alphas = np.full(N_iters, alpha_0)
        betas = np.full(N_iters, beta_0)
        npr.seed(1)
        W0 = npr.randn(N_weights) * np.exp(log_param_scale)
        results = sgd(indexed_loss_fun, batch_idxs, N_iters, W0, V0, alphas, betas)
        losses.append(results['loss_final'])
        d_losses.append(d_log_loss(alpha_0, results['d_alphas']))

    return losses, d_losses
开发者ID:ChinJY,项目名称:hypergrad,代码行数:28,代码来源:experiment.py


示例5: test_window_safe

    def test_window_safe(self, factor_len):
        # all true data set of (days, securities)
        data = full(self.default_shape, True, dtype=bool)

        class InputFilter(Filter):
            inputs = ()
            window_length = 0

        class TestFactor(CustomFactor):
            dtype = float64_dtype
            inputs = (InputFilter(), )
            window_length = factor_len

            def compute(self, today, assets, out, filter_):
                # sum for each column
                out[:] = np_sum(filter_, axis=0)

        results = self.run_graph(
            TermGraph({'windowsafe': TestFactor()}),
            initial_workspace={InputFilter(): data},
        )

        # number of days in default_shape
        n = self.default_shape[0]

        # shape of output array
        output_shape = ((n - factor_len + 1), self.default_shape[1])
        check_arrays(
            results['windowsafe'],
            full(output_shape, factor_len, dtype=float64)
        )
开发者ID:Weylew,项目名称:zipline,代码行数:31,代码来源:test_filter.py


示例6: _init

 def _init(self, X, lengths, params):
     init = 1. / self.n_components
     if 's' in params or not hasattr(self, "startprob_"):
         self.startprob_ = np.full(self.n_components, init)
     if 't' in params or not hasattr(self, "transmat_"):
         self.transmat_ = np.full((self.n_components, self.n_components),
                                  init)
开发者ID:haderazzini,项目名称:hmmlearn,代码行数:7,代码来源:base.py


示例7: complete_obs_table

def complete_obs_table(obs_table, used_columns, filter_list, tolerance,
                       lim_flag, default_error=0.1, systematic_deviation=0.1):
    """Complete the observation table

    For each filter:
    * If the corresponding error is not present in the used column list or in
      the table columns, add (or replace) an error column with the default
      error.
    * Adjust the error value.

    Parameters
    ----------
    obs_table: astropy.table.Table
        The observation table.
    used_columns: list of strings
        The list of columns to use in the observation table.
    filter_list: list of strings
        The list of filters used in the analysis.
    tolerance: float
        Tolerance threshold under flux error is considered as 0.
    lim_flag: boolean
        Do we process upper limits (True) or treat them as no-data (False)?
    default_error: float
        Default error factor used when the provided error in under the
        tolerance threshold.
    systematic_deviation: float
        Systematic deviation added to the error.

    Returns
    -------
    obs_table = astropy.table.Table
        The completed observation table

    Raises
    ------
    Exception: When a filter is not present in the observation table.

    """
    # TODO Print or log a warning when an error column is in the used column
    # list but is not present in the observation table.
    for name in filter_list:
        if name not in obs_table.columns:
            raise Exception("The filter <{}> (at least) is not present in "
                                "the observation table.".format(name))

        name_err = name + "_err"
        if name_err not in obs_table.columns:
            obs_table.add_column(Column(name=name_err,
                                        data=np.full(len(obs_table), np.nan)),
            index=obs_table.colnames.index(name)+1)
        elif name_err not in used_columns:
            obs_table[name_err] = np.full(len(obs_table), np.nan)

        obs_table[name], obs_table[name_err] = adjust_data(obs_table[name],
                                                           obs_table[name_err],
                                                           tolerance,
                                                           lim_flag,
                                                           default_error,
                                                           systematic_deviation)
    return obs_table
开发者ID:JohannesBuchner,项目名称:cigale,代码行数:60,代码来源:__init__.py


示例8: test_many_inputs

    def test_many_inputs(self):
        """
        Test adding NumericalExpressions with >10 inputs.
        """
        # Create an initial NumericalExpression by adding two factors together.
        f = self.f
        expr = f + f

        self.fake_raw_data = {f: full((5, 5), 0, float)}
        expected = 0

        # Alternate between adding and subtracting factors. Because subtraction
        # is not commutative, this ensures that we are combining factors in the
        # correct order.
        ops = (add, sub)

        for i, name in enumerate(ascii_uppercase):
            op = ops[i % 2]
            NewFactor = type(
                name,
                (Factor,),
                dict(dtype=float64_dtype, inputs=(), window_length=0),
            )
            new_factor = NewFactor()

            # Again we need a NumericalExpression, so add two factors together.
            new_expr = new_factor + new_factor
            self.fake_raw_data[new_factor] = full((5, 5), i + 1, float)
            expr = op(expr, new_expr)

            # Double the expected output since each factor is counted twice.
            expected = op(expected, (i + 1) * 2)

        self.check_output(expr, full((5, 5), expected, float))
开发者ID:280185386,项目名称:zipline,代码行数:34,代码来源:test_numerical_expression.py


示例9: make_default_configuration

    def make_default_configuration(self):
        self.global_register = ccpdv4['CCPD_GLOBAL'].copy()
        self.pixel_register = {
            "threshold": np.full((48, 12), 7, dtype=np.uint8),  # 16 columns (triple col) x 6 rows (double row)
#             "monitor": value = np.full((48,12), 0, dtype=np.uint8),
            "injection": np.full((6, ), 0, dtype=np.uint8)
        }
开发者ID:mathieubenoit,项目名称:ccpdv4,代码行数:7,代码来源:register.py


示例10: _clean_timeseries

    def _clean_timeseries(self, timeseries, starttime, endtime):
        """Realigns timeseries data so the start and endtimes are the same
            as what was originally asked for, even if the data was during
            a gap.

        Parameters
        ----------
        timeseries: obspy.core.stream
            The timeseries stream as returned by the call to getWaveform
        starttime: obspy.core.UTCDateTime
            the starttime of the requested data
        endtime: obspy.core.UTCDateTime
            the endtime of the requested data

        Notes: the original timeseries object is changed.
        """
        for trace in timeseries:
            trace_starttime = UTCDateTime(trace.stats.starttime)
            trace_endtime = UTCDateTime(trace.stats.endtime)

            if trace.stats.starttime > starttime:
                cnt = int((trace_starttime - starttime) / trace.stats.delta)
                trace.data = numpy.concatenate([
                        numpy.full(cnt, numpy.nan, dtype=numpy.float64),
                        trace.data])
                trace.stats.starttime = starttime
            if trace_endtime < endtime:
                cnt = int((endtime - trace_endtime) / trace.stats.delta)
                trace.data = numpy.concatenate([
                        trace.data,
                        numpy.full(cnt, numpy.nan, dtype=numpy.float64)])
                trace.stats.endttime = endtime
开发者ID:dcstewart-usgs,项目名称:geomag-algorithms,代码行数:32,代码来源:EdgeFactory.py


示例11: _recognize3

  def _recognize3(self, scores, transitions):
    lengthT = scores.shape[0]
    lengthS = transitions.shape[1]

    cost = np.full((lengthT, lengthT), np.inf, 'float32')
    back = np.full((lengthT, lengthT), np.inf, 'int32')

    cost[0] = np.min(scores[0])
    back[0] = -1

    transcript = []
    attention = []

    for s in xrange(1, lengthT):
      for t in xrange(min(s * lengthS, lengthT)):
        #if s % self.nstates == 0: # end state

        cost[s, t] = np.min(scores[s])
        q = transitions[t].copy()
        q[:min(t,lengthS)] += cost[s - 1, t - min(t,lengthS) : t]
        back[s, t] = q.argmin() + 1
        cost[s, t] += q.min()

    t = lengthT - 1
    s = 1
    while t >= 0 and s < lengthT:
      if s % self.nstates == 0:
        attention.append(t)
        transcript.append(scores[t].argmin()  / self.nstates)
      t -= back[-s, t]
      s += 1
    return transcript[::-1], attention[::-1]
开发者ID:rwth-i6,项目名称:returnn,代码行数:32,代码来源:OpInvAlign.py


示例12: evaluate

def evaluate(gtdir, preddir, eval_pose=True, eval_track=True,
             eval_upper_bound=False):
    gtFramesAll, prFramesAll = load_data_dir(['', gtdir, preddir])

    print('# gt frames  :', len(gtFramesAll))
    print('# pred frames:', len(prFramesAll))

    apAll = np.full((Joint().count + 1, 1), np.nan)
    preAll = np.full((Joint().count + 1, 1), np.nan)
    recAll = np.full((Joint().count + 1, 1), np.nan)
    if eval_pose:
        apAll, preAll, recAll = evaluateAP(gtFramesAll, prFramesAll)
        print('Average Precision (AP) metric:')
        printTable(apAll)

    metrics = np.full((Joint().count + 4, 1), np.nan)
    if eval_track:
        metricsAll = evaluateTracking(
            gtFramesAll, prFramesAll, eval_upper_bound)

        for i in range(Joint().count + 1):
            metrics[i, 0] = metricsAll['mota'][0, i]
        metrics[Joint().count + 1, 0] = metricsAll['motp'][0, Joint().count]
        metrics[Joint().count + 2, 0] = metricsAll['pre'][0, Joint().count]
        metrics[Joint().count + 3, 0] = metricsAll['rec'][0, Joint().count]
        print('Multiple Object Tracking (MOT) metrics:')
        printTable(metrics, motHeader=True)
    return (apAll, preAll, recAll), metrics
开发者ID:TPNguyen,项目名称:DetectAndTrack,代码行数:28,代码来源:evaluate_simple.py


示例13: test_rolling_and_nonrolling

    def test_rolling_and_nonrolling(self):
        open_ = USEquityPricing.open
        close = USEquityPricing.close
        volume = USEquityPricing.volume

        # Test for thirty days up to the last day that we think all
        # the assets existed.
        dates_to_test = self.dates[-30:]

        constants = {open_: 1, close: 2, volume: 3}
        loader = PrecomputedLoader(constants=constants, dates=self.dates, sids=self.asset_ids)
        engine = SimplePipelineEngine(lambda column: loader, self.dates, self.asset_finder)

        sumdiff = RollingSumDifference()

        result = engine.run_pipeline(
            Pipeline(
                columns={"sumdiff": sumdiff, "open": open_.latest, "close": close.latest, "volume": volume.latest}
            ),
            dates_to_test[0],
            dates_to_test[-1],
        )
        self.assertIsNotNone(result)
        self.assertEqual({"sumdiff", "open", "close", "volume"}, set(result.columns))

        result_index = self.asset_ids * len(dates_to_test)
        result_shape = (len(result_index),)
        check_arrays(result["sumdiff"], Series(index=result_index, data=full(result_shape, -3, dtype=float)))

        for name, const in [("open", 1), ("close", 2), ("volume", 3)]:
            check_arrays(result[name], Series(index=result_index, data=full(result_shape, const, dtype=float)))
开发者ID:RoyHsiao,项目名称:zipline,代码行数:31,代码来源:test_engine.py


示例14: plot_risk

def plot_risk(risk_local, risk_central, risk_dist, iters, name_file, label_x, label_y):
    size = iters.shape[0]

    # Create the data
    risk_local   = np.full(size, risk_local)
    risk_central = np.full(size, risk_central)
    iters        = np.array(iters)

    # Plot graphs
    sns.set_style("ticks")
    plt.figure()
    plt.rc('text', usetex = True)
    plt.rc('text.latex', unicode = True)
    tests = list(risk_dist.keys())
    with sns.color_palette("tab10", len(tests) + 2):
        for test in tests:
            label = "SVM distribuído com " + test
            plt.plot(iters, risk_dist[test], linewidth = 2, label = label)
        plt.plot(iters, risk_local,   linewidth = 2.2, linestyle = '-.', label = 'SVM Local')
        plt.plot(iters, risk_central, linewidth = 2.2, linestyle = '-.', label = 'SVM Central')
        plt.legend(loc = 'upper right')
        sns.despine()
        plt.xlabel(label_x)
        plt.ylabel(label_y)
        file = str(plots_path) + "/" + name_file + ".pdf"
        plt.savefig(file, transparent = True)
开发者ID:caiodadauto,项目名称:Distributed-SVM,代码行数:26,代码来源:plotrisk.py


示例15: initBuffers

    def initBuffers(self,puzzle):
        #define lengths buffer and copy to the GPU
        #as we will not read from this buffer later, mapping is not required
        self.lengths = np.full(self.simulations,np.iinfo(np.int16).max,dtype=np.int16)
        self.lengthsBuffer = cl.Buffer(self.context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=self.lengths)
         
        #define buffer for aggregated lengths for each workgroup
        self.groupLengths = np.full(self.workGroups,np.iinfo(np.int16).max,dtype=np.int16)
        self.groupLengthsBuffer = cl.Buffer(self.context, cl.mem_flags.READ_WRITE | cl.mem_flags.USE_HOST_PTR, hostbuf=self.groupLengths)
        
        #map group lengths buffer
        cl.enqueue_map_buffer(self.queue,self.groupLengthsBuffer,cl.map_flags.READ,0,self.groupLengths.shape,self.groupLengths.dtype)
        
        #get the input puzzle ready for the kernel; convert to 8 bit int (char)
        p = np.array(puzzle['puzzle']).astype(np.int8)
        #subtract 1 so that -1 denotes a gap and 0 denotes a square to be filled
        p = p - np.ones_like(p,dtype=p.dtype)
        
        #copy the puzzle, one for each simulation
        self.puzzles = np.zeros((self.simulations,self.height,self.width),dtype=p.dtype)
        self.puzzles[:,0:self.height,0:self.width] = p
    
        #define puzzles buffer and copy data (we do not need to worry about getting data out of this buffer, so mapping isn't required)
        #this buffer contains the input puzzles, one for each invocation (the puzzle is too large to hold in local or shared memory)
        self.puzzlesFlattened = self.puzzles.ravel()
        self.puzzlesBuffer = cl.Buffer(self.context, cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR, hostbuf=self.puzzlesFlattened)
        
        #define output buffer for best solutions aggregated across workgroups
        self.solutions = self.puzzles[0:self.workGroups]
        self.solutionsFlattened = self.solutions.ravel()
        self.solutionsBuffer = cl.Buffer(self.context, cl.mem_flags.READ_WRITE | cl.mem_flags.USE_HOST_PTR, hostbuf=self.solutionsFlattened)

        #map solutions buffer
        cl.enqueue_map_buffer(self.queue,self.solutionsBuffer,cl.map_flags.READ,0,self.solutionsFlattened.shape,self.solutions.dtype)
开发者ID:ohlord,项目名称:cimpress,代码行数:34,代码来源:CLSolve.py


示例16: generateFileList

def generateFileList(rootPath):
	iterNum = 0
	X_train_i = np.array([])
	X_test_i = np.array([])
	y_train_i = np.array([])
	y_test_i = np.array([])

	if reducedInput:
		print "WARN: not using all available input"
		X_train_path = ["004_M3", "012_F2"]
		y_train_data = [1, 0]
	else:
		global X_train_path, y_train_data

	for p in X_train_path:
		a = root + "/" + p + "/C" + p + "_INDE"
		b = root + "/" + p + "/C" + p + "_SENT"
		filesThisFolder = np.append([a + "/" + f for f in listdir(a) if isfile(join(a, f)) and f.endswith(".wav")],
									[b + "/" + f for f in listdir(b) if isfile(join(b, f)) and f.endswith(".wav")])
		y_train_i = np.append(y_train_i, np.full((filesThisFolder.shape[0]), y_train_data[iterNum], dtype='int8'))
		X_train_i = np.append(X_train_i, filesThisFolder)
		iterNum += 1
	iterNum = 0
	for p in X_test_path:
		a = root + "/" + p + "/C" + p + "_INDE"
		b = root + "/" + p + "/C" + p + "_SENT"
		filesThisFolder = np.append([a + "/" + f for f in listdir(a) if isfile(join(a, f)) and f.endswith(".wav")],
									[b + "/" + f for f in listdir(b) if isfile(join(b, f)) and f.endswith(".wav")])
		y_test_i = np.append(y_test_i, np.full((filesThisFolder.shape[0]), y_test_data[iterNum], dtype='int8'))
		X_test_i = np.append(X_test_i, filesThisFolder)
		iterNum += 1
	return ((X_train_i, y_train_i), (X_test_i, y_test_i))
开发者ID:dare0021,项目名称:KerasBasedSpeechClassifier,代码行数:32,代码来源:preprocessor.py


示例17: loop_over_zone

    def loop_over_zone(self, array):
        """Generate a masked array and retrieve average values.

        :param array: ndarray, representing zone.

        :returns: ndarray, average values for zone.
        """
        mesh_1 = self.model_data.model_mesh3D[1]
        rows = mesh_1.shape[0]
        arr_zoned = [np.full(mesh_1.shape[1:3], np.nan)] * int(np.max(mesh_1))

        for zone in range(int(np.max(mesh_1))):
            temp = np.array([np.full(mesh_1.shape[1:3], np.nan)] * rows)

            zone_1 = float(zone + 1)
            for layer in range(rows):
                mesh_1_layer = mesh_1[layer]
                temp[layer][mesh_1_layer == zone_1] = array[layer][mesh_1_layer == zone_1]
            # End for

            masked_temp = np.ma.masked_array(temp, np.isnan(temp))
            arr_zoned[zone] = np.mean(masked_temp, axis=0)
        # End for

        return arr_zoned
开发者ID:daniel-partington,项目名称:HydroModelBuilder,代码行数:25,代码来源:flopyInterface.py


示例18: run

def run():
    train_images, train_labels, _, _, _ = load_data()
    train_images = train_images[:N_data, :]
    train_labels = train_labels[:N_data, :]
    batch_idxs = BatchList(N_data, batch_size)
    iter_per_epoch = len(batch_idxs)
    N_weights, _, loss_fun, frac_err = make_nn_funs(layer_sizes, L2_reg)
    def indexed_loss_fun(w, idxs):
        return loss_fun(w, X=train_images[idxs], T=train_labels[idxs])

    log_alphas = np.full(N_iters, log_alpha_0)
    betas      = np.full(N_iters, beta_0)
    npr.seed(1)
    V0 = npr.randn(N_weights) * velocity_scale
    W0 = npr.randn(N_weights) * np.exp(log_param_scale)
    output = []
    for i in range(N_meta_iter):
        print "Meta iteration {0}".format(i)
        results = sgd(indexed_loss_fun, batch_idxs, N_iters,
                      W0, V0, np.exp(log_alphas), betas, record_learning_curve=True)
        learning_curve = results['learning_curve']
        d_log_alphas = np.exp(log_alphas) * results['d_alphas']
        output.append((learning_curve, log_alphas, d_log_alphas))
        log_alphas = log_alphas - meta_alpha * d_log_alphas

    return output
开发者ID:ChinJY,项目名称:hypergrad,代码行数:26,代码来源:experiment.py


示例19: test_uninterpolated_nan_regions

def test_uninterpolated_nan_regions(boundary, normalize_kernel):
    #8086
    # Test NaN interpolation of contiguous NaN regions with kernels of size
    # identical and greater than that of the region of NaN values.

    # Test case: kernel.shape == NaN_region.shape
    kernel = Gaussian2DKernel(1, 5, 5)
    nan_centroid = np.full(kernel.shape, np.nan)
    image = np.pad(nan_centroid, pad_width=kernel.shape[0]*2, mode='constant',
                   constant_values=1)
    with pytest.warns(AstropyUserWarning,
                      match="nan_treatment='interpolate', however, NaN values detected "
                      "post convolution. A contiguous region of NaN values, larger "
                      "than the kernel size, are present in the input array. "
                      "Increase the kernel size to avoid this."):
        result = convolve(image, kernel, boundary=boundary, nan_treatment='interpolate',
                          normalize_kernel=normalize_kernel)
        assert(np.any(np.isnan(result)))

    # Test case: kernel.shape > NaN_region.shape
    nan_centroid = np.full((kernel.shape[0]-1, kernel.shape[1]-1), np.nan) # 1 smaller than kerenel
    image = np.pad(nan_centroid, pad_width=kernel.shape[0]*2, mode='constant',
                   constant_values=1)
    result = convolve(image, kernel, boundary=boundary, nan_treatment='interpolate',
                      normalize_kernel=normalize_kernel)
    assert(~np.any(np.isnan(result))) # Note: negation
开发者ID:Cadair,项目名称:astropy,代码行数:26,代码来源:test_convolve.py


示例20: test_multiple_rolling_factors

    def test_multiple_rolling_factors(self):

        loader = self.loader
        engine = SimpleFFCEngine(loader, self.dates, self.asset_finder)
        shape = num_dates, num_assets = (5, len(self.assets))
        dates = self.dates[10:10 + num_dates]

        short_factor = RollingSumDifference(window_length=3)
        long_factor = RollingSumDifference(window_length=5)
        high_factor = RollingSumDifference(
            window_length=3,
            inputs=[USEquityPricing.open, USEquityPricing.high],
        )

        results = engine.factor_matrix(
            {'short': short_factor, 'long': long_factor, 'high': high_factor},
            dates[0],
            dates[-1],
        )
        self.assertEqual(set(results.columns), {'short', 'high', 'long'})

        # row-wise sum over an array whose values are all (1 - 2)
        assert_array_equal(
            results['short'].unstack().values,
            full(shape, -short_factor.window_length),
        )
        assert_array_equal(
            results['long'].unstack().values,
            full(shape, -long_factor.window_length),
        )
        # row-wise sum over an array whose values are all (1 - 3)
        assert_array_equal(
            results['high'].unstack().values,
            full(shape, -2 * high_factor.window_length),
        )
开发者ID:robertotang,项目名称:zipline,代码行数:35,代码来源:test_engine.py



注:本文中的numpy.full函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.full_like函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.fromstring函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap