• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.norm函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nengo.utils.numpy.norm函数的典型用法代码示例。如果您正苦于以下问题:Python norm函数的具体用法?Python norm怎么用?Python norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了norm函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: similarity

def similarity(v1, v2):
    # v1 and v2 are vectors
    eps = np.nextafter(0, 1)  # smallest float above zero
    dot = np.dot(v1, v2)
    dot /= max(npext.norm(v1), eps)
    dot /= max(npext.norm(v2), eps)
    return dot
开发者ID:tbekolay,项目名称:phd,代码行数:7,代码来源:idmp.py


示例2: cd_encoders_biases

def cd_encoders_biases(n_encoders, trainX, trainY, rng=np.random, mask=None,
                       norm_min=0.05, norm_tries=10):
    """Constrained difference (CD) method for encoders from data [1]_.

    Parameters
    ==========
    n_encoders : int
        Number of encoders to generate.
    trainX : (n_samples, n_dimensions) array-like
        Training features.
    trainY : (n_samples,) array-like
        Training labels.

    Returns
    =======
    encoders : (n_encoders, n_dimensions) array
        Generated encoders.
    biases : (n_encoders,) array
        Generated biases. These are biases assuming `f = G[E * X + b]`,
        and are therefore more like Nengo's `intercepts`.

    References
    ==========
    .. [1] McDonnell, M. D., Tissera, M. D., Vladusich, T., Van Schaik, A.,
       Tapson, J., & Schwenker, F. (2015). Fast, simple and accurate
       handwritten digit classification by training shallow neural network
       classifiers with the "Extreme learning machine" algorithm. PLoS ONE,
       10(8), 1-20. doi:10.1371/journal.pone.0134254
    """
    assert trainX.shape[0] == trainY.size
    trainX = trainX.reshape(trainX.shape[0], -1)
    trainY = trainY.ravel()
    d = trainX.shape[1]
    classes = np.unique(trainY)
    assert mask is None or mask.shape == (n_encoders, d)

    inds = [(trainY == label).nonzero()[0] for label in classes]
    train_norm = npext.norm(trainX, axis=1).mean()

    encoders = np.zeros((n_encoders, d))
    biases = np.zeros(n_encoders)
    for k in range(n_encoders):
        for _ in range(norm_tries):
            i, j = rng.choice(len(classes), size=2, replace=False)
            a, b = trainX[rng.choice(inds[i])], trainX[rng.choice(inds[j])]
            dab = a - b
            if mask is not None:
                dab *= mask[k]
            ndab = npext.norm(dab)**2
            if ndab >= norm_min * train_norm:
                break
        else:
            raise ValueError("Cannot find valid encoder")

        encoders[k] = (2. / ndab) * dab
        biases[k] = np.dot(a + b, dab) / ndab

    return encoders, biases
开发者ID:fmirus,项目名称:nengo_extras,代码行数:58,代码来源:vision.py


示例3: test_sqrt_beta

def test_sqrt_beta(n, m, rng):
    num_samples = 250
    num_bins = 5

    vectors = rng.randn(num_samples, n + m)
    vectors /= npext.norm(vectors, axis=1, keepdims=True)
    expectation, _ = np.histogram(
        npext.norm(vectors[:, :m], axis=1), bins=num_bins)

    dist = dists.SqrtBeta(n, m)
    samples = dist.sample(num_samples, 1, rng=rng)
    hist, _ = np.histogram(samples, bins=num_bins)

    assert np.all(np.abs(np.asfarray(hist - expectation) / num_samples) < 0.16)
开发者ID:falconlulu,项目名称:nengo,代码行数:14,代码来源:test_dists.py


示例4: test_state_norm

def test_state_norm(plt):
    # Choose a filter, timestep, and number of simulation timesteps
    sys = Alpha(0.1)
    dt = 0.000001
    length = 2000000

    # Modify the state-space to read out the state vector
    A, B, C, D = sys2ss(sys)
    old_C = C
    C = np.eye(len(A))
    D = np.zeros((len(A), B.shape[1]))

    response = np.empty((length, len(C)))
    for i in range(len(C)):
        # Simulate the state vector
        response[:, i] = impulse((A, B, C[i, :], D[i, :]), dt, length)

    # Check that the power of each state equals the H2-norm of each state
    # The analog case is the same after scaling since dt is approx 0.
    actual = norm(response, axis=0) * dt
    assert np.allclose(actual, state_norm(cont2discrete(sys, dt)))
    assert np.allclose(actual, state_norm(sys) * np.sqrt(dt))

    plt.figure()
    plt.plot(response[:, 0], label="$x_0$")
    plt.plot(response[:, 1], label="$x_1$")
    plt.plot(np.dot(response, old_C.T), label="$y$")
    plt.legend()
开发者ID:arvoelke,项目名称:nengolib,代码行数:28,代码来源:test_lyapunov.py


示例5: test_hypersphere_surface

def test_hypersphere_surface(dimensions, rng):
    n = 150 * dimensions
    dist = dists.UniformHypersphere(surface=True)
    samples = dist.sample(n, dimensions, rng=rng)
    assert samples.shape == (n, dimensions)
    assert np.allclose(npext.norm(samples, axis=1), 1)
    assert np.allclose(np.mean(samples, axis=0), 0, atol=0.25 / dimensions)
开发者ID:CamZHU,项目名称:nengo,代码行数:7,代码来源:test_dists.py


示例6: test_sphere

def test_sphere(d, rng):
    n = 200
    x = sphere.sample(n, d, rng)
    assert x.shape == (n, d)
    assert np.allclose(norm(x, axis=1), 1)

    f = _furthest(x)
    assert (f > 1.5).all()
开发者ID:arvoelke,项目名称:nengolib,代码行数:8,代码来源:test_ntmdists.py


示例7: test_hypersphere_surface

def test_hypersphere_surface(dimensions):
    n = 100 * dimensions
    dist = dists.UniformHypersphere(dimensions, surface=True)
    samples = dist.sample(n, np.random.RandomState(1))
    assert samples.shape == (n, dimensions)
    assert np.allclose(npext.norm(samples, axis=1), 1)
    assert np.allclose(
        np.mean(samples, axis=0), np.zeros(dimensions), atol=0.1)
开发者ID:Dartonw,项目名称:nengo,代码行数:8,代码来源:test_distributions.py


示例8: test_ball

def test_ball(d, rng):
    n = 200
    x = ball.sample(n, d, rng)
    assert x.shape == (n, d)

    dist = norm(x, axis=1)
    assert (dist <= 1).all()

    f = _furthest(x)
    assert (f > dist + 0.5).all()
开发者ID:arvoelke,项目名称:nengolib,代码行数:10,代码来源:test_ntmdists.py


示例9: ciw_encoders

def ciw_encoders(n_encoders, trainX, trainY, rng=np.random,
                 normalize_data=True, normalize_encoders=True):
    """Computed Input Weights (CIW) method for encoders from data [1]_.

    Parameters
    ==========
    n_encoders : int
        Number of encoders to generate.
    trainX : (n_samples, n_dimensions) array-like
        Training features.
    trainY : (n_samples,) array-like
        Training labels.

    Returns
    =======
    encoders : (n_encoders, n_dimensions) array
        Generated encoders.

    References
    ==========
    .. [1] McDonnell, M. D., Tissera, M. D., Vladusich, T., Van Schaik, A.,
       Tapson, J., & Schwenker, F. (2015). Fast, simple and accurate
       handwritten digit classification by training shallow neural network
       classifiers with the "Extreme learning machine" algorithm. PLoS ONE,
       10(8), 1-20. doi:10.1371/journal.pone.0134254
    """
    assert trainX.shape[0] == trainY.size
    trainX = trainX.reshape(trainX.shape[0], -1)
    trainY = trainY.ravel()
    classes = np.unique(trainY)

    assert n_encoders % len(classes) == 0
    n_enc_per_class = n_encoders / len(classes)

    # normalize
    if normalize_data:
        trainX = (trainX - trainX.mean()) / trainX.std()
        # trainX = (trainX - trainX.mean(axis=0)) / trainX.std()
        # trainX = (trainX - trainX.mean(axis=0)) / (trainX.std(axis=0) + 1e-8)

    # generate
    encoders = []
    for label in classes:
        X = trainX[trainY == label]
        plusminus = rng.choice([-1, 1], size=(X.shape[0], n_enc_per_class))
        samples = np.dot(plusminus.T, X)
        encoders.append(samples)

    encoders = np.vstack(encoders)
    if normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    return encoders
开发者ID:fmirus,项目名称:nengo_extras,代码行数:53,代码来源:vision.py


示例10: similarity

def similarity(data, vocab, normalize=False):
    """Return the similarity between some data and the vocabulary.

    Computes the dot products between all data vectors and each
    vocabulary vector. If `normalize=True`, normalizes all vectors
    to compute the cosine similarity.

    Parameters
    ----------
    data: array_like
        The data used for comparison.
    vocab: spa.Vocabulary, array_like
        Vocabulary (or list of vectors) to use to calculate
        the similarity values
    normalize : boolean (optional)
        Whether to normalize all vectors, to compute the cosine similarity.
    """
    from nengo.spa.vocab import Vocabulary

    if isinstance(vocab, Vocabulary):
        vectors = vocab.vectors
    elif is_iterable(vocab):
        vectors = np.array(vocab, copy=False, ndmin=2)
    else:
        raise ValidationError("%r object is not a valid vocabulary"
                              % (vocab.__class__.__name__), attr='vocab')

    data = np.array(data, copy=False, ndmin=2)
    dots = np.dot(data, vectors.T)

    if normalize:
        # Zero-norm vectors should return zero, so avoid divide-by-zero error
        eps = np.nextafter(0, 1)  # smallest float above zero
        dnorm = np.maximum(npext.norm(data, axis=1, keepdims=True), eps)
        vnorm = np.maximum(npext.norm(vectors, axis=1, keepdims=True), eps)

        dots /= dnorm
        dots /= vnorm.T

    return dots
开发者ID:JolyZhang,项目名称:nengo,代码行数:40,代码来源:utils.py


示例11: test_eval_points_scaling

def test_eval_points_scaling(Simulator, sample, radius, seed, rng):
    eval_points = UniformHypersphere()
    if sample:
        eval_points = eval_points.sample(500, 3, rng=rng)

    model = nengo.Network(seed=seed)
    with model:
        a = nengo.Ensemble(1, 3, eval_points=eval_points, radius=radius)

    with Simulator(model) as sim:
        dists = npext.norm(sim.data[a].eval_points, axis=1)
        assert np.all(dists <= radius)
        assert np.any(dists >= 0.9 * radius)
开发者ID:amshenoy,项目名称:nengo,代码行数:13,代码来源:test_ensemble.py


示例12: test_encoders

def test_encoders(n_dimensions, n_neurons=10, encoders=None):
    if encoders is None:
        encoders = np.random.normal(size=(n_neurons, n_dimensions))
        encoders /= norm(encoders, axis=-1, keepdims=True)

    model = nengo.Network(label="_test_encoders")
    with model:
        ens = nengo.Ensemble(neurons=nengo.LIF(n_neurons),
                             dimensions=n_dimensions,
                             encoders=encoders,
                             label="A")
    sim = nengo.Simulator(model)

    assert np.allclose(encoders, sim.data[ens].encoders)
开发者ID:lisannehuurdeman,项目名称:nengo,代码行数:14,代码来源:test_ensemble.py


示例13: build_lif

def build_lif(model, ens):
    # Create a random number generator
    rng = np.random.RandomState(model.seeds[ens])

    # Get the eval points
    eval_points = ensemble.gen_eval_points(ens, ens.eval_points, rng=rng)

    # Get the encoders
    if isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=np.float64)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Get maximum rates and intercepts
    max_rates = ensemble.sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = ensemble.sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is None and ens.bias is None:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
    elif ens.gain is not None and ens.bias is not None:
        gain = ensemble.sample(ens.gain, ens.n_neurons, rng=rng)
        bias = ensemble.sample(ens.bias, ens.n_neurons, rng=rng)
    else:
        raise NotImplementedError(
            "gain or bias set for {!s}, but not both. Solving for one given "
            "the other is not yet implemented.".format(ens)
        )

    # Scale the encoders
    scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    # Store all the parameters
    model.params[ens] = BuiltEnsemble(
        eval_points=eval_points,
        encoders=encoders,
        scaled_encoders=scaled_encoders,
        max_rates=max_rates,
        intercepts=intercepts,
        gain=gain,
        bias=bias
    )

    # Create the object which will handle simulation of the LIF ensemble.  This
    # object will be responsible for adding items to the netlist and providing
    # functions to prepare the ensemble for simulation.  The object may be
    # modified by later methods.
    model.object_operators[ens] = operators.EnsembleLIF(ens)
开发者ID:hunse,项目名称:nengo_spinnaker,代码行数:50,代码来源:ensemble.py


示例14: test_encoders

def test_encoders(n_dimensions, n_neurons=10, encoders=None):
    if encoders is None:
        encoders = np.random.normal(size=(n_neurons, n_dimensions))
        encoders /= norm(encoders, axis=-1, keepdims=True)

    args = {'label': 'A',
            'neurons': nengo.LIF(n_neurons),
            'dimensions': n_dimensions}

    model = nengo.Model('_test_encoders')
    ens = nengo.Ensemble(encoders=encoders, **args)
    sim = nengo.Simulator(model)

    assert np.allclose(encoders, sim.data[ens].encoders)
开发者ID:cnvandev,项目名称:nengo,代码行数:14,代码来源:test_ensemble.py


示例15: test_encoders

def test_encoders(RefSimulator, dimensions, seed, n_neurons=10, encoders=None):
    if encoders is None:
        encoders = np.random.normal(size=(n_neurons, dimensions))
        encoders = npext.array(encoders, min_dims=2, dtype=np.float64)
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    model = nengo.Network(label="_test_encoders", seed=seed)
    with model:
        ens = nengo.Ensemble(n_neurons=n_neurons,
                             dimensions=dimensions,
                             encoders=encoders,
                             label="A")

    with RefSimulator(model) as sim:
        assert np.allclose(encoders, sim.data[ens].encoders)
开发者ID:amshenoy,项目名称:nengo,代码行数:15,代码来源:test_ensemble.py


示例16: sample

    def sample(self, n, d, rng=np.random):
        if d is None or d < 1:  # check this, since other dists allow d = None
            raise ValueError("Dimensions must be a positive integer")

        samples = rng.randn(n, d)
        samples /= npext.norm(samples, axis=1, keepdims=True)

        if self.surface:
            return samples

        # Generate magnitudes for vectors from uniform distribution.
        # The (1 / d) exponent ensures that samples are uniformly distributed
        # in n-space and not all bunched up at the centre of the sphere.
        samples *= rng.rand(n, 1) ** (1.0 / d)

        return samples
开发者ID:thingimon,项目名称:nengo,代码行数:16,代码来源:dists.py


示例17: test_eval_points_scaling

def test_eval_points_scaling(Simulator, sample, radius, seed, rng, scale):
    eval_points = UniformHypersphere()
    if sample:
        eval_points = eval_points.sample(500, 3, rng=rng)

    model = nengo.Network(seed=seed)
    with model:
        a = nengo.Ensemble(1, 3, radius=radius)
        b = nengo.Ensemble(1, 3)
        con = nengo.Connection(a, b, eval_points=eval_points,
                               scale_eval_points=scale)

    sim = Simulator(model)
    dists = npext.norm(sim.data[con].eval_points, axis=1)
    limit = radius if scale else 1.0
    assert np.all(dists <= limit)
    assert np.any(dists >= 0.9 * limit)
开发者ID:LittileBee,项目名称:nengo,代码行数:17,代码来源:test_connection.py


示例18: make_pool

    def make_pool(self, ens):
        if isinstance(ens.encoders, Distribution):
            encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions,
                                           rng=self.rng)
        else:
            encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
            encoders /= npext.norm(encoders, axis=1, keepdims=True)


        if self.config[ens].compact:
            p = pool.CompactPool(ens.n_neurons)
        elif self.config[ens].fixed:
            p = pool.FixedPool(ens.n_neurons,
                               bits_soma=self.config[ens].fixed_bits_soma,
                               bits_syn=self.config[ens].fixed_bits_syn)
        else:
            p = pool.StdPool(ens.n_neurons)
        intercepts = nengo.builder.sample(ens.intercepts, ens.n_neurons,
                                          rng=self.rng)
        max_rates = nengo.builder.sample(ens.max_rates, ens.n_neurons,
                                          rng=self.rng)
        gain, bias = self.find_gain_bias(p.soma, intercepts, max_rates)
        p.set_bias(bias)
        print 'bias', p.get_bias()

        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

        self.pools[ens] = p

        self.model.params[ens] = BuiltEnsemble(intercepts=intercepts,
                                               max_rates=max_rates,
                                               gain=gain,
                                               bias=bias,
                                               encoders=encoders,
                                               scaled_encoders=scaled_encoders,
                                               eval_points=None,
                                               )
开发者ID:tcstewar,项目名称:neuron_explore,代码行数:37,代码来源:simulator.py


示例19: build_ensemble

    def build_ensemble(self, ens):
        # Create random number generator
        seed = self.next_seed() if ens.seed is None else ens.seed
        rng = np.random.RandomState(seed)

        # Generate eval points
        if ens.eval_points is None or is_integer(ens.eval_points):
            eval_points = self.generate_eval_points(
                ens=ens, n_points=ens.eval_points, rng=rng)
        else:
            eval_points = npext.array(
                ens.eval_points, dtype=np.float64, min_dims=2)

        # Set up signal
        self.model.sig_in[ens] = Signal(np.zeros(ens.dimensions),
                                        name="%s.signal" % ens.label)
        self.model.operators.append(Reset(self.model.sig_in[ens]))

        # Set up encoders
        if ens.encoders is None:
            if isinstance(ens.neurons, nengo.Direct):
                encoders = np.identity(ens.dimensions)
            else:
                sphere = dists.UniformHypersphere(ens.dimensions, surface=True)
                encoders = sphere.sample(ens.neurons.n_neurons, rng=rng)
        else:
            encoders = np.array(ens.encoders, dtype=np.float64)
            enc_shape = (ens.neurons.n_neurons, ens.dimensions)
            if encoders.shape != enc_shape:
                raise ShapeMismatch(
                    "Encoder shape is %s. Should be (n_neurons, dimensions); "
                    "in this case %s." % (encoders.shape, enc_shape))
            encoders /= npext.norm(encoders, axis=1, keepdims=True)

        # Determine max_rates and intercepts
        if isinstance(ens.max_rates, dists.Distribution):
            max_rates = ens.max_rates.sample(
                ens.neurons.n_neurons, rng=rng)
        else:
            max_rates = np.array(ens.max_rates)
        if isinstance(ens.intercepts, dists.Distribution):
            intercepts = ens.intercepts.sample(
                ens.neurons.n_neurons, rng=rng)
        else:
            intercepts = np.array(ens.intercepts)

        # Build the neurons
        if isinstance(ens.neurons, nengo.Direct):
            bn = self.build(ens.neurons, ens.dimensions)
        else:
            bn = self.build(ens.neurons, max_rates, intercepts)

        # Scale the encoders
        if isinstance(ens.neurons, nengo.Direct):
            scaled_encoders = encoders
        else:
            scaled_encoders = encoders * (bn.gain / ens.radius)[:, np.newaxis]

        # Create output signal, using built Neurons
        self.model.operators.append(DotInc(
            Signal(scaled_encoders, name="%s.scaled_encoders" % ens.label),
            self.model.sig_in[ens],
            self.model.sig_in[ens.neurons],
            tag="%s encoding" % ens.label))

        # Output is neural output
        self.model.sig_out[ens] = self.model.sig_out[ens.neurons]

        for probe in ens.probes["decoded_output"]:
            self.build(probe, dimensions=ens.dimensions)
        for probe in ens.probes["spikes"] + ens.probes["voltages"]:
            self.build(probe, dimensions=ens.neurons.n_neurons)

        return BuiltEnsemble(eval_points=eval_points,
                             encoders=encoders,
                             intercepts=intercepts,
                             max_rates=max_rates,
                             scaled_encoders=scaled_encoders)
开发者ID:Dartonw,项目名称:nengo,代码行数:78,代码来源:builder.py


示例20: build_ensemble

def build_ensemble(model, ens):
    """Builds an `.Ensemble` object into a model.

    A brief summary of what happens in the ensemble build process, in order:

    1. Generate evaluation points and encoders.
    2. Normalize encoders to unit length.
    3. Determine bias and gain.
    4. Create neuron input signal
    5. Add operator for injecting bias.
    6. Call build function for neuron type.
    7. Scale encoders by gain and radius.
    8. Add operators for multiplying decoded input signal by encoders and
       incrementing the result in the neuron input signal.
    9. Call build function for injected noise.

    Some of these steps may be altered or omitted depending on the parameters
    of the ensemble, in particular the neuron type. For example, most steps are
    omitted for the `.Direct` neuron type.

    Parameters
    ----------
    model : Model
        The model to build into.
    ens : Ensemble
        The ensemble to build.

    Notes
    -----
    Sets ``model.params[ens]`` to a `.BuiltEnsemble` instance.
    """

    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = get_samples(
            ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    if ens.normalize_encoders:
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    gain, bias, max_rates, intercepts = get_gain_bias(ens, rng)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        model.sig[ens.neurons]['bias'] = Signal(
            bias, name="%s.bias" % ens, readonly=True)
        model.add_op(Copy(model.sig[ens.neurons]['bias'],
                          model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens, readonly=True)

    # Inject noise if specified
    if ens.noise is not None:
        model.build(ens.noise, sig_out=model.sig[ens.neurons]['in'], inc=True)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
#.........这里部分代码省略.........
开发者ID:nengo,项目名称:nengo,代码行数:101,代码来源:ensemble.py



注:本文中的nengo.utils.numpy.norm函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.rms函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.array函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap