• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.array函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nengo.utils.numpy.array函数的典型用法代码示例。如果您正苦于以下问题:Python array函数的具体用法?Python array怎么用?Python array使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了array函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: build_decoder

        def build_decoder(function, evals, solver):
            """Internal function for building a single decoder."""
            if evals is None:
                evals = npext.array(eval_points, min_dims=2)
            else:
                evals = npext.array(evals, min_dims=2)

            assert solver is None or not solver.weights

            x = np.dot(evals, encoders.T / ens.radius)
            activities = ens.neuron_type.rates(x, gain, bias)

            if function is None:
                targets = evals
            else:
                (value, _) = checked_call(function, evals[0])
                function_size = np.asarray(value).size
                targets = np.zeros((len(evals), function_size))

                for i, ep in enumerate(evals):
                    targets[i] = function(ep)

            if solver is None:
                solver = nengo.solvers.LstsqL2()

            return solver(activities, targets, rng=rng)[0]
开发者ID:ctn-archive,项目名称:nengo_spinnaker_2014,代码行数:26,代码来源:ensemble.py


示例2: build_linear_system

def build_linear_system(model, conn):
    encoders = model.params[conn.pre_obj].encoders
    gain = model.params[conn.pre_obj].gain
    bias = model.params[conn.pre_obj].bias

    eval_points = conn.eval_points
    if eval_points is None:
        eval_points = npext.array(
            model.params[conn.pre_obj].eval_points, min_dims=2)
    else:
        eval_points = npext.array(eval_points, min_dims=2)

    x = np.dot(eval_points, encoders.T / conn.pre_obj.radius)
    activities = conn.pre_obj.neuron_type.rates(x, gain, bias)
    if np.count_nonzero(activities) == 0:
        raise RuntimeError(
            "Building %s: 'activites' matrix is all zero for %s. "
            "This is because no evaluation points fall in the firing "
            "ranges of any neurons." % (conn, conn.pre_obj))

    if conn.function is None:
        targets = eval_points[:, conn.pre_slice]
    else:
        targets = np.zeros((len(eval_points), conn.size_mid))
        for i, ep in enumerate(eval_points[:, conn.pre_slice]):
            targets[i] = conn.function(ep)

    return eval_points, activities, targets
开发者ID:Ocode,项目名称:nengo,代码行数:28,代码来源:connection.py


示例3: target_function

def target_function(eval_points, targets):
    """Get a function that maps evaluation points to target points.

    Use this when making a nengo connection using a sequence
    of evaluation points and targets, instead of passing a
    callable as the connection function.

    Parameters
    ----------
    eval_points: iterable
        A sequence of evaluation points.
    targets: iterable
        A sequence of targets with the same length as ``eval_points``

    Returns
    -------
    dict:
       A diciontary with two keys: ``function`` and ``eval_points``.
       function is the mapping between the evaluation points and the
       targets. ``eval_points`` are the evalutaion points that will
       be passed to the connection

    Examples
    --------
    ens1 = nengo.Ensemble(n_neurons=100, dimensions=1)
    ens2 = nengo.Ensemble(n_neurons=100, dimensions=1)
    eval_points = numpy.arange(-1, 1, 0.01)
    targets = numpy.sin(eval_points)
    #the transformation on this connection approximates a sin function
    nengo.Connection(ens1, ens2,
                     **target_function(eval_points, targets)
    """

    eval_points = npext.array(eval_points, dtype=np.float64, min_dims=2)
    targets = npext.array(targets, dtype=np.float64, min_dims=2)

    if len(eval_points) != len(targets):
        raise ValueError("Number of evaluation points %s "
                         "is not equal to number of targets "
                         "%s" % (len(eval_points), len(targets)))

    func_dict = {}
    for eval_point, target in zip(eval_points, targets):
        func_dict[tuple(eval_point)] = target

    def function(x):
        x = tuple(x)
        return func_dict[x]

    return {'function': function, 'eval_points': eval_points}
开发者ID:Ocode,项目名称:nengo,代码行数:50,代码来源:connection.py


示例4: __set__

    def __set__(self, node, output):
        super(OutputParam, self).validate(node, output)

        size_in_set = node.size_in is not None
        node.size_in = node.size_in if size_in_set else 0

        # --- Validate and set the new size_out
        if output is None:
            if node.size_out is not None:
                warnings.warn("'Node.size_out' is being overwritten with " "'Node.size_in' since 'Node.output=None'")
            node.size_out = node.size_in
        elif isinstance(output, Process):
            if not size_in_set:
                node.size_in = output.default_size_in
            if node.size_out is None:
                node.size_out = output.default_size_out
        elif callable(output):
            # We trust user's size_out if set, because calling output
            # may have unintended consequences (e.g., network communication)
            if node.size_out is None:
                result = self.validate_callable(node, output)
                node.size_out = 0 if result is None else result.size
        elif is_array_like(output):
            # Make into correctly shaped numpy array before validation
            output = npext.array(output, min_dims=1, copy=False, dtype=np.float64)
            self.validate_ndarray(node, output)
            node.size_out = output.size
        else:
            raise ValidationError("Invalid node output type %r" % type(output).__name__, attr=self.name, obj=node)

        # --- Set output
        self.data[node] = output
开发者ID:nengo,项目名称:nengo,代码行数:32,代码来源:node.py


示例5: get_eval_points

def get_eval_points(model, conn, rng):
    if conn.eval_points is None:
        return npext.array(
            model.params[conn.pre_obj].eval_points, min_dims=2)
    else:
        return gen_eval_points(
            conn.pre_obj, conn.eval_points, rng, conn.scale_eval_points)
开发者ID:CamZHU,项目名称:nengo,代码行数:7,代码来源:connection.py


示例6: __set__

    def __set__(self, node, output):
        super(OutputParam, self).validate(node, output)

        # --- Validate and set the new size_out
        if output is None:
            if node.size_out is not None:
                warnings.warn("'Node.size_out' is being overwritten with "
                              "'Node.size_in' since 'Node.output=None'")
            node.size_out = node.size_in
        elif callable(output) and node.size_out is not None:
            # We trust user's size_out if set, because calling output
            # may have unintended consequences (e.g., network communication)
            pass
        elif callable(output):
            result = self.validate_callable(node, output)
            node.size_out = 0 if result is None else result.size
        else:
            # Make into correctly shaped numpy array before validation
            output = npext.array(
                output, min_dims=1, copy=False, dtype=np.float64)
            self.validate_ndarray(node, output)
            node.size_out = output.size

        # --- Set output
        self.data[node] = output
开发者ID:goaaron,项目名称:blouw-etal-2015,代码行数:25,代码来源:node.py


示例7: spikes2events

def spikes2events(t, spikes):
    """Return an event-based representation of spikes (i.e. spike times)"""
    spikes = npext.array(spikes, copy=False, min_dims=2)
    if spikes.ndim > 2:
        raise ValueError("Cannot handle %d-dimensional arrays" % spikes.ndim)
    if spikes.shape[-1] != len(t):
        raise ValueError("Last dimension of `spikes` must equal `len(t)`")

    # find nonzero elements (spikes) in each row, and translate to times
    return [t[spike != 0] for spike in spikes]
开发者ID:Dartonw,项目名称:nengo,代码行数:10,代码来源:neurons.py


示例8: build_lif

def build_lif(model, ens):
    # Create a random number generator
    rng = np.random.RandomState(model.seeds[ens])

    # Get the eval points
    eval_points = ensemble.gen_eval_points(ens, ens.eval_points, rng=rng)

    # Get the encoders
    if isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
        encoders = np.asarray(encoders, dtype=np.float64)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Get maximum rates and intercepts
    max_rates = ensemble.sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = ensemble.sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is None and ens.bias is None:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)
    elif ens.gain is not None and ens.bias is not None:
        gain = ensemble.sample(ens.gain, ens.n_neurons, rng=rng)
        bias = ensemble.sample(ens.bias, ens.n_neurons, rng=rng)
    else:
        raise NotImplementedError(
            "gain or bias set for {!s}, but not both. Solving for one given "
            "the other is not yet implemented.".format(ens)
        )

    # Scale the encoders
    scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    # Store all the parameters
    model.params[ens] = BuiltEnsemble(
        eval_points=eval_points,
        encoders=encoders,
        scaled_encoders=scaled_encoders,
        max_rates=max_rates,
        intercepts=intercepts,
        gain=gain,
        bias=bias
    )

    # Create the object which will handle simulation of the LIF ensemble.  This
    # object will be responsible for adding items to the netlist and providing
    # functions to prepare the ensemble for simulation.  The object may be
    # modified by later methods.
    model.object_operators[ens] = operators.EnsembleLIF(ens)
开发者ID:hunse,项目名称:nengo_spinnaker,代码行数:50,代码来源:ensemble.py


示例9: test_encoders

def test_encoders(n_dimensions, n_neurons=10, encoders=None):
    if encoders is None:
        encoders = np.random.normal(size=(n_neurons, n_dimensions))
        encoders = npext.array(encoders, min_dims=2, dtype=np.float64)
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    model = nengo.Network(label="_test_encoders")
    with model:
        ens = nengo.Ensemble(n_neurons=n_neurons,
                             dimensions=n_dimensions,
                             encoders=encoders,
                             label="A")
    sim = nengo.Simulator(model)

    assert np.allclose(encoders, sim.data[ens].encoders)
开发者ID:goaaron,项目名称:blouw-etal-2015,代码行数:15,代码来源:test_ensemble.py


示例10: __init__

    def __init__(self, output=None, size_in=0, size_out=None, label="Node"):
        if output is not None and not is_callable(output):
            output = npext.array(output, min_dims=1, copy=False)
        self.output = output
        self.label = label
        self.size_in = size_in

        if output is not None:
            if isinstance(output, np.ndarray):
                shape_out = output.shape
            elif size_out is None and is_callable(output):
                t, x = np.asarray(0.0), np.zeros(size_in)
                args = [t, x] if size_in > 0 else [t]
                try:
                    result = output(*args)
                except TypeError:
                    raise TypeError(
                        "The function '%s' provided to '%s' takes %d "
                        "argument(s), where a function for this type "
                        "of node is expected to take %d argument(s)" % (
                            output.__name__, self,
                            output.__code__.co_argcount, len(args)))

                shape_out = (0,) if result is None \
                    else np.asarray(result).shape
            else:
                shape_out = (size_out,)  # assume `size_out` is correct

            if len(shape_out) > 1:
                raise ValueError(
                    "Node output must be a vector (got array shape %s)" %
                    (shape_out,))

            size_out_new = shape_out[0] if len(shape_out) == 1 else 1

            if size_out is not None and size_out != size_out_new:
                raise ValueError(
                    "Size of Node output (%d) does not match `size_out` (%d)" %
                    (size_out_new, size_out))

            size_out = size_out_new
        else:  # output is None
            size_out = size_in

        self.size_out = size_out

        # Set up probes
        self.probes = {'output': []}
开发者ID:ZeitgeberH,项目名称:nengo,代码行数:48,代码来源:objects.py


示例11: rates_kernel

def rates_kernel(t, spikes, kind='gauss', tau=0.04):
    """Estimate firing rates from spikes using a kernel.

    Parameters
    ----------
    t : (M,) array_like
        The times at which raw spike data (spikes) is defined.
    spikes : (M, N) array_like
        The raw spike data from N neurons.
    kind : str {'expon', 'gauss', 'expogauss', 'alpha'}, optional
        The type of kernel to use. 'expon' is an exponential kernel, 'gauss' is
        a Gaussian (normal) kernel, 'expogauss' is an exponential followed by
        a Gaussian, and 'alpha' is an alpha function kernel.
    tau : float
        The time constant for the kernel. The optimal value will depend on the
        firing rate of the neurons, with a longer tau preferred for lower
        firing rates. The default value of 0.04 works well across a wide range
        of firing rates.
    """
    spikes = spikes.T
    spikes = npext.array(spikes, copy=False, min_dims=2)
    if spikes.ndim > 2:
        raise ValidationError("Cannot handle %d-dimensional arrays"
                              % spikes.ndim, attr='spikes')
    if spikes.shape[-1] != len(t):
        raise ValidationError("Last dimension of 'spikes' must equal 'len(t)'",
                              attr='spikes')

    n, nt = spikes.shape
    dt = t[1] - t[0]

    tau_i = tau / dt
    kind = kind.lower()
    if kind == 'expogauss':
        rates = lowpass_filter(spikes, tau_i, kind='expon')
        rates = lowpass_filter(rates, tau_i / 4, kind='gauss')
    else:
        rates = lowpass_filter(spikes, tau_i, kind=kind)

    return rates.T
开发者ID:fmirus,项目名称:nengo_extras,代码行数:40,代码来源:neurons.py


示例12: coerce

    def coerce(self, node, output):
        output = super().coerce(node, output)

        size_in_set = node.size_in is not None
        node.size_in = node.size_in if size_in_set else 0

        # --- Validate and set the new size_out
        if output is None:
            if node.size_out is not None:
                warnings.warn("'Node.size_out' is being overwritten with "
                              "'Node.size_in' since 'Node.output=None'")
            node.size_out = node.size_in
        elif isinstance(output, Process):
            if not size_in_set:
                node.size_in = output.default_size_in
            if node.size_out is None:
                node.size_out = output.default_size_out
        elif callable(output):
            self.check_callable_args_list(node, output)
            # We trust user's size_out if set, because calling output
            # may have unintended consequences (e.g., network communication)
            if node.size_out is None:
                node.size_out = self.check_callable_output(node, output)
        elif is_array_like(output):
            # Make into correctly shaped numpy array before validation
            output = npext.array(
                output, min_dims=1, copy=False, dtype=np.float64)
            self.check_ndarray(node, output)
            if not np.all(np.isfinite(output)):
                raise ValidationError("Output value must be finite.",
                                      attr=self.name, obj=node)
            node.size_out = output.size
        else:
            raise ValidationError("Invalid node output type %r" %
                                  type(output).__name__,
                                  attr=self.name, obj=node)

        return output
开发者ID:nengo,项目名称:nengo,代码行数:38,代码来源:node.py


示例13: make_pool

    def make_pool(self, ens):
        if isinstance(ens.encoders, Distribution):
            encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions,
                                           rng=self.rng)
        else:
            encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
            encoders /= npext.norm(encoders, axis=1, keepdims=True)


        if self.config[ens].compact:
            p = pool.CompactPool(ens.n_neurons)
        elif self.config[ens].fixed:
            p = pool.FixedPool(ens.n_neurons,
                               bits_soma=self.config[ens].fixed_bits_soma,
                               bits_syn=self.config[ens].fixed_bits_syn)
        else:
            p = pool.StdPool(ens.n_neurons)
        intercepts = nengo.builder.sample(ens.intercepts, ens.n_neurons,
                                          rng=self.rng)
        max_rates = nengo.builder.sample(ens.max_rates, ens.n_neurons,
                                          rng=self.rng)
        gain, bias = self.find_gain_bias(p.soma, intercepts, max_rates)
        p.set_bias(bias)
        print 'bias', p.get_bias()

        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

        self.pools[ens] = p

        self.model.params[ens] = BuiltEnsemble(intercepts=intercepts,
                                               max_rates=max_rates,
                                               gain=gain,
                                               bias=bias,
                                               encoders=encoders,
                                               scaled_encoders=scaled_encoders,
                                               eval_points=None,
                                               )
开发者ID:tcstewar,项目名称:neuron_explore,代码行数:37,代码来源:simulator.py


示例14: build_ensemble

def build_ensemble(model, ens):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    # Generate eval points
    if isinstance(ens.eval_points, Distribution):
        n_points = ens.n_eval_points
        if n_points is None:
            n_points = default_n_eval_points(ens.n_neurons, ens.dimensions)
        eval_points = ens.eval_points.sample(n_points, ens.dimensions, rng)
        # eval_points should be in the ensemble's representational range
        eval_points *= ens.radius
    else:
        if (ens.n_eval_points is not None
                and ens.eval_points.shape[0] != ens.n_eval_points):
            warnings.warn("Number of eval_points doesn't match "
                          "n_eval_points. Ignoring n_eval_points.")
        eval_points = np.array(ens.eval_points, dtype=np.float64)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
        encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Determine max_rates and intercepts
    max_rates = sample(ens.max_rates, ens.n_neurons, rng=rng)
    intercepts = sample(ens.intercepts, ens.n_neurons, rng=rng)

    # Build the neurons
    if ens.gain is not None and ens.bias is not None:
        gain = sample(ens.gain, ens.n_neurons, rng=rng)
        bias = sample(ens.bias, ens.n_neurons, rng=rng)
    elif ens.gain is not None or ens.bias is not None:
        # TODO: handle this instead of error
        raise NotImplementedError("gain or bias set for %s, but not both. "
                                  "Solving for one given the other is not "
                                  "implemented yet." % ens)
    else:
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        model.add_op(Copy(src=Signal(bias, name="%s.bias" % ens),
                          dst=model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
开发者ID:Ocode,项目名称:nengo,代码行数:90,代码来源:ensemble.py


示例15: build_network

def build_network(model, network):
    """Takes a Network object and returns a Model.

    This determines the signals and operators necessary to simulate that model.

    Builder does this by mapping each high-level object to its associated
    signals and operators one-by-one, in the following order:

    1) Ensembles, Nodes, Neurons
    2) Subnetworks (recursively)
    3) Connections
    4) Learning Rules
    5) Probes
    """
    def get_seed(obj, rng):
        # Generate a seed no matter what, so that setting a seed or not on
        # one object doesn't affect the seeds of other objects.
        seed = rng.randint(npext.maxint)
        return (seed if not hasattr(obj, 'seed') or obj.seed is None
                else obj.seed)

    if model.toplevel is None:
        model.toplevel = network
        model.sig['common'][0] = Signal(
            npext.array(0.0, readonly=True), name='Common: Zero')
        model.sig['common'][1] = Signal(
            npext.array(1.0, readonly=True), name='Common: One')
        model.seeds[network] = get_seed(network, np.random)

    # Set config
    old_config = model.config
    model.config = network.config

    # assign seeds to children
    rng = np.random.RandomState(model.seeds[network])
    sorted_types = sorted(network.objects, key=lambda t: t.__name__)
    for obj_type in sorted_types:
        for obj in network.objects[obj_type]:
            model.seeds[obj] = get_seed(obj, rng)

    logger.debug("Network step 1: Building ensembles and nodes")
    for obj in network.ensembles + network.nodes:
        model.build(obj)

    logger.debug("Network step 2: Building subnetworks")
    for subnetwork in network.networks:
        model.build(subnetwork)

    logger.debug("Network step 3: Building connections")
    for conn in network.connections:
        model.build(conn)

    logger.debug("Network step 4: Building learning rules")
    for conn in network.connections:
        rule = conn.learning_rule
        if is_iterable(rule):
            for r in (itervalues(rule) if isinstance(rule, dict) else rule):
                model.build(r)
        elif rule is not None:
            model.build(rule)

    logger.debug("Network step 5: Building probes")
    for probe in network.probes:
        model.build(probe)

    # Unset config
    model.config = old_config
    model.params[network] = None
开发者ID:LittileBee,项目名称:nengo,代码行数:68,代码来源:network.py


示例16: before_simulation

    def before_simulation(self, netlist, simulator, n_steps):
        """Generate the values to output for the next set of simulation steps.
        """
        # Write out the system region to deal with the current run-time
        self.system_region.n_steps = n_steps

        # Evaluate the node for this period of time
        if self.period is not None:
            max_n = min(n_steps, int(np.ceil(self.period / simulator.dt)))
        else:
            max_n = n_steps

        ts = np.arange(simulator.steps, simulator.steps + max_n) * simulator.dt
        if callable(self.function):
            values = np.array([self.function(t) for t in ts])
        elif isinstance(self.function, Process):
            values = self.function.run_steps(max_n, d=self.size_out,
                                             dt=simulator.dt)
        else:
            values = np.array([self.function for t in ts])

        # Ensure that the values can be sliced, regardless of how they were
        # generated.
        values = npext.array(values, min_dims=2)

        # Compute the output for each connection
        outputs = []
        for conn, transform in self.conns_transforms:
            output = []

            # For each f(t) for the next set of simulations we calculate the
            # output at the end of the connection.  To do this we first apply
            # the pre-slice, then the function and then the post-slice.
            for v in values:
                # Apply the pre-slice
                v = v[conn.pre_slice]

                # Apply the function on the connection, if there is one.
                if conn.function is not None:
                    v = np.asarray(conn.function(v), dtype=float)

                output.append(np.dot(transform, v.T))
            outputs.append(np.array(output).reshape(max_n, -1))

        # Combine all of the output values to form a large matrix which we can
        # dump into memory.
        output_matrix = np.hstack(outputs)

        new_output_region = regions.MatrixRegion(
            np_to_fix(output_matrix),
            sliced_dimension=regions.MatrixPartitioning.columns
        )

        # Write the simulation values into memory
        for vertex in self.vertices:
            self.vertices_region_memory[vertex][self.system_region].seek(0)
            self.system_region.n_steps = max_n
            self.system_region.write_subregion_to_file(
                self.vertices_region_memory[vertex][self.system_region],
                vertex.slice
            )

            self.vertices_region_memory[vertex][self.output_region].seek(0)
            new_output_region.write_subregion_to_file(
                self.vertices_region_memory[vertex][self.output_region],
                vertex.slice
            )
开发者ID:pabogdan,项目名称:nengo_spinnaker,代码行数:67,代码来源:value_source.py


示例17: build_network

def build_network(model, network):
    """Takes a Network object and returns a Model.

    This determines the signals and operators necessary to simulate that model.

    Builder does this by mapping each high-level object to its associated
    signals and operators one-by-one, in the following order:

    1) Ensembles, Nodes, Neurons
    2) Subnetworks (recursively)
    3) Connections
    4) Learning Rules
    5) Probes
    """
    def get_seed(obj, rng):
        # Generate a seed no matter what, so that setting a seed or not on
        # one object doesn't affect the seeds of other objects.
        seed = rng.randint(npext.maxint)
        return (seed if not hasattr(obj, 'seed') or obj.seed is None
                else obj.seed)

    if model.toplevel is None:
        model.toplevel = network
        model.sig['common'][0] = Signal(
            npext.array(0.0, readonly=True), name='Common: Zero')
        model.sig['common'][1] = Signal(
            npext.array(1.0, readonly=True), name='Common: One')
        model.seeds[network] = get_seed(network, np.random)

    # Set config
    old_config = model.config
    model.config = network.config

    # assign seeds to children
    rng = np.random.RandomState(model.seeds[network])
    sorted_types = sorted(network.objects, key=lambda t: t.__name__)
    for obj_type in sorted_types:
        for obj in network.objects[obj_type]:
            model.seeds[obj] = get_seed(obj, rng)

    logger.debug("Network step 1: Building ensembles and nodes")
    for obj in network.ensembles + network.nodes:
        model.build(obj)

    logger.debug("Network step 2: Building subnetworks")
    for subnetwork in network.networks:
        model.build(subnetwork)

    logger.debug("Network step 3: Building connections")
    for conn in network.connections:
        # NB: we do these in the order in which they're defined, and build the
        # learning rule in the connection builder. Because learning rules are
        # attached to connections, the connection that contains the learning
        # rule (and the learning rule) are always built *before* a connection
        # that attaches to that learning rule. Therefore, we don't have to
        # worry about connection ordering here.
        # TODO: Except perhaps if the connection being learned
        # is in a subnetwork?
        model.build(conn)

    logger.debug("Network step 4: Building probes")
    for probe in network.probes:
        model.build(probe)

    # Unset config
    model.config = old_config
    model.params[network] = None
开发者ID:CamZHU,项目名称:nengo,代码行数:67,代码来源:network.py


示例18: build_ensemble

def build_ensemble(model, ens):
    # Create random number generator
    rng = np.random.RandomState(model.seeds[ens])

    eval_points = gen_eval_points(ens, ens.eval_points, rng=rng)

    # Set up signal
    model.sig[ens]['in'] = Signal(np.zeros(ens.dimensions),
                                  name="%s.signal" % ens)
    model.add_op(Reset(model.sig[ens]['in']))

    # Set up encoders
    if isinstance(ens.neuron_type, Direct):
        encoders = np.identity(ens.dimensions)
    elif isinstance(ens.encoders, Distribution):
        encoders = sample(ens.encoders, ens.n_neurons, ens.dimensions, rng=rng)
    else:
        encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
    encoders /= npext.norm(encoders, axis=1, keepdims=True)

    # Build the neurons
    if ens.gain is not None and ens.bias is not None:
        gain = sample(ens.gain, ens.n_neurons, rng=rng)
        bias = sample(ens.bias, ens.n_neurons, rng=rng)
        max_rates, intercepts = None, None  # TODO: determine from gain & bias
    elif ens.gain is not None or ens.bias is not None:
        # TODO: handle this instead of error
        raise NotImplementedError("gain or bias set for %s, but not both. "
                                  "Solving for one given the other is not "
                                  "implemented yet." % ens)
    else:
        max_rates = sample(ens.max_rates, ens.n_neurons, rng=rng)
        intercepts = sample(ens.intercepts, ens.n_neurons, rng=rng)
        gain, bias = ens.neuron_type.gain_bias(max_rates, intercepts)

    if isinstance(ens.neuron_type, Direct):
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.dimensions), name='%s.neuron_in' % ens)
        model.sig[ens.neurons]['out'] = model.sig[ens.neurons]['in']
        model.add_op(Reset(model.sig[ens.neurons]['in']))
    else:
        model.sig[ens.neurons]['in'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_in" % ens)
        model.sig[ens.neurons]['out'] = Signal(
            np.zeros(ens.n_neurons), name="%s.neuron_out" % ens)
        model.add_op(Copy(src=Signal(bias, name="%s.bias" % ens),
                          dst=model.sig[ens.neurons]['in']))
        # This adds the neuron's operator and sets other signals
        model.build(ens.neuron_type, ens.neurons)

    # Scale the encoders
    if isinstance(ens.neuron_type, Direct):
        scaled_encoders = encoders
    else:
        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

    model.sig[ens]['encoders'] = Signal(
        scaled_encoders, name="%s.scaled_encoders" % ens)

    # Inject noise if specified
    if ens.noise is not None:
        model.build(ens.noise, sig_out=model.sig[ens.neurons]['in'], inc=True)

    # Create output signal, using built Neurons
    model.add_op(DotInc(
        model.sig[ens]['encoders'],
        model.sig[ens]['in'],
        model.sig[ens.neurons]['in'],
        tag="%s encoding" % ens))

    # Output is neural output
    model.sig[ens]['out'] = model.sig[ens.neurons]['out']

    model.params[ens] = BuiltEnsemble(eval_points=eval_points,
                                      encoders=encoders,
                                      intercepts=intercepts,
                                      max_rates=max_rates,
                                      scaled_encoders=scaled_encoders,
                                      gain=gain,
                                      bias=bias)
开发者ID:CamZHU,项目名称:nengo,代码行数:80,代码来源:ensemble.py


示例19: make_ensemble

    def make_ensemble(self, ens):
        """Build an Ensemble."""
        self.model.ensembles.append(ens)

        if isinstance(ens.neuron_type, nengo.Direct):
            self.make_direct_ensemble(ens)
            return

        p = self.make_pool(ens)

        if isinstance(ens.encoders, Distribution):
            encoders = ens.encoders.sample(ens.n_neurons, ens.dimensions,
                                           rng=self.rng)
        else:
            encoders = npext.array(ens.encoders, min_dims=2, dtype=np.float64)
            encoders /= npext.norm(encoders, axis=1, keepdims=True)

        intercepts = nengo.builder.ensemble.sample(ens.intercepts,
                                                   ens.n_neurons, rng=self.rng)
        max_rates = nengo.builder.ensemble.sample(ens.max_rates,
                                                  ens.n_neurons, rng=self.rng)

        if ens.gain is not None and ens.bias is not None:
            gain = nengo.builder.ensemble.sample(ens.gain, ens.n_neurons,
                                                 rng=self.rng)
            bias = nengo.builder.ensemble.sample(ens.bias, ens.n_neurons,
                                                 rng=self.rng)
        elif ens.gain is not None or ens.bias is not None:
            raise NotImplementedError("gain or bias set for %s, but not both. "
                                      "Solving for one given the other is not "
                                      "implemented yet." % ens)
        else:
            gain, bias = neuron_tuning.find_gain_bias(p, intercepts, max_rates)

        scaled_encoders = encoders * (gain / ens.radius)[:, np.newaxis]

        self.model.pools[ens] = p

        self.model.decoders[ens] = {}

        if isinstance(ens.eval_points, Distribution):
            n_points = ens.n_eval_points
            if n_points is None:
                n_points = nengo.utils.builder.default_n_eval_points(
                    ens.n_neurons, ens.dimensions)
            eval_points = ens.eval_points.sample(n_points, ens.dimensions,
                                                 self.rng)
            eval_points *= ens.radius
        else:
            if (ens.n_eval_points is not None
                    and ens.eval_points.shape[0] != ens.n_eval_points):
                warnings.warn("Number of eval_points doesn't match "
                              "n_eval_points. Ignoring n_eval_points.")
            eval_points = np.array(ens.eval_points, dtype=np.float64)

            eval_points *= ens.radius

        J = gain * np.dot(eval_points, encoders.T / ens.radius) + bias
        activity = self.compute_activity(ens, J)

        self.model.params[ens] = BuiltEnsemble(intercepts=intercepts,
                                               max_rates=max_rates,
                                               gain=gain,
                                               bias=bias,
                                               encoders=encoders,
                                               scaled_encoders=scaled_encoders,
                                               eval_points=eval_points,
                                               activity=activity,
                                               neurons=ens.neurons,
                                               radius=ens.radius
                                               )
        self.model.outputs[ens] = np.zeros(ens.n_neurons, dtype=float)
        self.model.input_filters[ens] = {}
        self.model.input_filters[ens.neurons] = {}
开发者ID:mahmoodalmansooei,项目名称:nengo_distilled,代码行数:74,代码来源:builder.py


示例20: build_ensemble

    def build_ensemble(self, ens):
        # Create random number generator
        seed = self.next_seed() if ens.seed is None else ens.seed
        rng = np.random.RandomState(seed)

        # Generate eval points
        if ens.eval_points is None or is_integer(ens.eval_points):
            eval_points = self.generate_eval_points(
                ens=ens, n_points=ens.eval_points, rng=rng)
        else:
            eval_points = npext.array(
                ens.eval_points, dtype=np.float64, min_dims=2)

        # Set  

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.norm函数代码示例发布时间:2022-05-27
下一篇:
Python functions.piecewise函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap