本文整理汇总了Python中pylearn2.utils.safe_zip函数的典型用法代码示例。如果您正苦于以下问题:Python safe_zip函数的具体用法?Python safe_zip怎么用?Python safe_zip使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了safe_zip函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: expr
def expr(self, model, data, ** kwargs):
"""
Returns the sum of the costs the SumOfCosts instance was given at
initialization.
Parameters
----------
model : pylearn2.models.model.Model
the model for which we want to calculate the sum of costs
data : flat tuple of tensor_like variables.
data has to follow the format defined by self.get_data_specs(),
but this format will always be a flat tuple.
"""
self.get_data_specs(model)[0].validate(data)
composite_specs, mapping = self.get_composite_specs_and_mapping(model)
nested_data = mapping.nest(data)
costs = []
for cost, cost_data in safe_zip(self.costs, nested_data):
costs.append(cost.expr(model, cost_data, **kwargs))
assert len(costs) > 0
if any([cost is None for cost in costs]):
sum_of_costs = None
else:
costs = [coeff * cost
for coeff, cost in safe_zip(self.coeffs, costs)]
assert len(costs) > 0
sum_of_costs = reduce(lambda x, y: x + y, costs)
return sum_of_costs
开发者ID:nitbix,项目名称:pylearn2,代码行数:30,代码来源:cost.py
示例2: _read_hdf5
def _read_hdf5(self, sources, aliases, load_all=False, use_h5py=True):
"""
Loads elements from an HDF5 dataset using either h5py or tables. It can
load either the whole object in memory or a reference to the object on
disk, depending on the load_all parameter. Returns a list of objects.
Parameters
----------
sources : list of str
List of HDF5 keys corresponding to the data to be loaded.
load_all : bool, optional (default False)
If true, load dataset into memory.
use_h5py: bool, optional (default True)
If true uses h5py, else tables.
"""
data = alias_dict()
if use_h5py:
for s, a in safe_zip(sources, aliases):
if load_all:
data[s, a] = self._fhandler[s][:]
else:
data[s, a] = self._fhandler[s]
# hdf5 handle has no ndim
data[s].ndim = len(data[s].shape)
else:
for s, a in safe_zip(sources, aliases):
if load_all:
data[s, a](self._fhandler.getNode("/", s)[:])
else:
data[s, a] = self._fhandler.getNode("/", s)
return data
开发者ID:CandyPythonFlow,项目名称:pylearn2,代码行数:31,代码来源:hdf5.py
示例3: get_gradients
def get_gradients(self, model, data, **kwargs):
self.get_data_specs(model)[0].validate(data)
obj, scratch = self.base_cost(model, data, return_locals=True, **kwargs)
if self.supervised:
assert isinstance(data, (list, tuple))
assert len(data) == 2
(X, Y) = data
else:
X, = data
interm_grads = OrderedDict()
H_hat = scratch['H_hat']
terms = scratch['terms']
hidden_layers = scratch['hidden_layers']
grads = OrderedDict()
assert len(H_hat) == len(terms)
assert len(terms) == len(hidden_layers)
num_layers = len(hidden_layers)
for i in xrange(num_layers):
state = H_hat[i]
layer = model.hidden_layers[i]
term = terms[i]
if term == 0.:
continue
else:
print 'term is ',term
if i == 0:
state_below = X
layer_below = model.visible_layer
else:
layer_below = model.hidden_layers[i-1]
state_below = H_hat[i-1]
state_below = layer_below.upward_state(state_below)
components = flatten(state)
real_grads = T.grad(term, components)
fake_state = layer.linear_feed_forward_approximation(state_below)
fake_components = flatten(fake_state)
real_grads = OrderedDict(safe_zip(fake_components, real_grads))
params = list(layer.get_params())
fake_grads = T.grad(cost=None, consider_constant=flatten(state_below),
wrt=params, known_grads = real_grads)
for param, grad in safe_zip(params, fake_grads):
if param in grads:
grads[param] = grads[param] + grad
else:
grads[param] = grad
return grads, OrderedDict()
开发者ID:tempbottle,项目名称:pylearn2,代码行数:59,代码来源:dbm.py
示例4: get_gradients
def get_gradients(self, model, X, Y = None, **kwargs):
"""
.. todo::
WRITEME
"""
if Y is None:
data = X
else:
data = (X, Y)
scratch = self.expr(model, data, include_toronto = False,
return_locals=True, **kwargs)
total_cost = scratch['total_cost']
params = list(model.get_params())
grads = dict(safe_zip(params, T.grad(total_cost, params,
disconnected_inputs='ignore')))
if self.toronto_act_targets is not None:
H_hat = scratch['history'][-1]['H_hat']
for i, packed in enumerate(safe_zip(H_hat,
self.toronto_act_coeffs, self.toronto_act_targets)):
s, c, t = packed
if c == 0.:
continue
s, _ = s
m = s.mean(axis=0)
m_cost = c * T.sqr(m-t).mean()
real_grads = T.grad(m_cost, s)
if i == 0:
below = X
else:
below = H_hat[i-1][0]
W, = model.hidden_layers[i].transformer.get_params()
assert W in grads
b = model.hidden_layers[i].b
ancestor = T.scalar()
hack_W = W + ancestor
hack_b = b + ancestor
fake_s = T.dot(below, hack_W) + hack_b
if fake_s.ndim != real_grads.ndim:
print fake_s.ndim
print real_grads.ndim
assert False
sources = [ (fake_s, real_grads) ]
fake_grads = T.grad(cost=None, known_grads=dict(sources),
wrt=[below, ancestor, hack_W, hack_b])
grads[W] = grads[W] + fake_grads[2]
grads[b] = grads[b] + fake_grads[3]
return grads, OrderedDict()
开发者ID:jbornschein,项目名称:pylearn2,代码行数:59,代码来源:dbm.py
示例5: get_monitoring_channels
def get_monitoring_channels(self, data):
"""
.. todo::
WRITEME
"""
space, source = self.get_monitoring_data_specs()
space.validate(data)
X = data
history = self.mf(X, return_history=True)
q = history[-1]
rval = OrderedDict()
ch = self.visible_layer.get_monitoring_channels()
for key in ch:
rval['vis_' + key] = ch[key]
for state, layer in safe_zip(q, self.hidden_layers):
ch = layer.get_monitoring_channels()
for key in ch:
rval[layer.layer_name + '_' + key] = ch[key]
ch = layer.get_monitoring_channels_from_state(state)
for key in ch:
rval['mf_' + layer.layer_name + '_' + key] = ch[key]
if len(history) > 1:
prev_q = history[-2]
flat_q = flatten(q)
flat_prev_q = flatten(prev_q)
mx = None
for new, old in safe_zip(flat_q, flat_prev_q):
cur_mx = abs(new - old).max()
if new is old:
logger.error('{0} is {1}'.format(new, old))
assert False
if mx is None:
mx = cur_mx
else:
mx = T.maximum(mx, cur_mx)
rval['max_var_param_diff'] = mx
for layer, new, old in safe_zip(self.hidden_layers,
q, prev_q):
sum_diff = 0.
for sub_new, sub_old in safe_zip(flatten(new), flatten(old)):
sum_diff += abs(sub_new - sub_old).sum()
denom = self.batch_size * \
layer.get_total_state_space().get_total_dimension()
denom = np.cast[config.floatX](denom)
rval['mean_'+layer.layer_name+'_var_param_diff'] = \
sum_diff / denom
return rval
开发者ID:HALLAB-Halifax,项目名称:pylearn2,代码行数:57,代码来源:dbm.py
示例6: model
def model(self, large=None, last_layer=None, seed=None):
"""
Creates the MLP model based on internal attributes.
Parameters
----------
large : bool, optional
The variant - large or small; by default, the value stored in
the instance is used.
last_layer : optional
Last layer in the network
seed : optional
Seed for random number generator
Returns
-------
model : pylearn2.models.mlp.MLP
The model
"""
laylist = self.layers()
model = MLP(layers=laylist,
input_space=Conv2DSpace(
shape=self.shape,
num_channels=3,
axes=['b', 0, 1, 'c']),
seed=seed)
last_layer_std = None
index = 0
for lay in laylist[:last_layer_std]:
if not isinstance(lay, (ZeroPad, Softmax)):
# we simulate a get_weights method here as
# the class does not provides one
# It does provide a get_weights_topo() but that is useless
# as the shape is changed
# example:
# get_weights => (96, 3, 7, 7)
# get_weights_topo => (96, 7, 7, 3)
crt_w = lay.transformer.get_params()[0].get_value()
#crt_w = lay.get_weights_topo()
crt_b = lay.get_biases()
assert all([crt == new for crt, new in safe_zip(
crt_w.shape, self.weights[index].shape)])
assert all([crt == new for crt, new in safe_zip(
crt_b.shape, self.biases[index].shape)])
lay.set_weights(self.weights[index])
lay.set_biases(self.biases[index])
index = index + 1
return model
开发者ID:TNick,项目名称:pyl2extra,代码行数:50,代码来源:overfeat.py
示例7: get_expected_warning
def get_expected_warning(from_space, from_batch, to_space):
# composite -> composite
if isinstance(from_space, CompositeSpace) and \
isinstance(to_space, CompositeSpace):
for fs, fb, ts in safe_zip(from_space.components,
from_batch,
to_space.components):
warning, message = get_expected_warning(fs, fb, ts)
if warning is not None:
return warning, message
return None, None
# composite -> simple
if isinstance(from_space, CompositeSpace):
for fs, fb in safe_zip(from_space.components, from_batch):
warning, message = get_expected_warning(fs, fb, to_space)
if warning is not None:
return warning, message
return None, None
# simple -> composite
if isinstance(to_space, CompositeSpace):
if isinstance(from_space, VectorSpace) and \
isinstance(from_batch, theano.sparse.SparseVariable):
assert from_space.sparse
return (UserWarning,
'Formatting from a sparse VectorSpace to a '
'CompositeSpace is currently (2 Jan 2014) a '
'non-differentiable action. This is because it '
'calls slicing operations on a sparse batch '
'(e.g. "my_matrix[r:R, c:C]", which Theano does '
'not yet have a gradient operator for. If '
'autodifferentiation is reporting an error, '
'this may be why.')
for ts in to_space.components:
warning, message = get_expected_warning(from_space,
from_batch,
ts)
if warning is not None:
return warning, message
return None, None
# simple -> simple
return None, None
开发者ID:AlexArgus,项目名称:pylearn2,代码行数:49,代码来源:test_space.py
示例8: make_layer_to_state
def make_layer_to_state(self, num_examples, rng=None):
"""
Makes and returns a dictionary mapping layers to states.
By states, we mean here a real assignment, not a mean field
state. For example, for a layer containing binary random
variables, the state will be a shared variable containing
values in {0,1}, not [0,1]. The visible layer will be included.
Uses a dictionary so it is easy to unambiguously index a layer
without needing to remember rules like vis layer = 0, hiddens
start at 1, etc.
Parameters
----------
num_examples : int
Number of examples to make up the state
rng : MRG_RandomStreams
Random number generator, if None then use model's rng
"""
# Make a list of all layers
layers = [self.visible_layer] + self.hidden_layers
if rng is None:
rng = self.rng
states = [layer.make_state(num_examples, rng) for layer in layers]
def recurse_check(layer, state):
if isinstance(state, (list, tuple)):
for elem in state:
recurse_check(layer, elem)
else:
val = state.get_value()
m = val.shape[0]
if m != num_examples:
raise ValueError(
layer.layer_name + " gave state with " + str(m) + " examples in some component."
"We requested " + str(num_examples)
)
for layer, state in safe_zip(layers, states):
recurse_check(layer, state)
rval = OrderedDict(safe_zip(layers, states))
return rval
开发者ID:JesseLivezey,项目名称:pylearn2,代码行数:48,代码来源:dbm.py
示例9: get_gradients
def get_gradients(self, model, data, **kwargs):
space, sources = self.get_data_specs(model)
space.validate(data)
assert isinstance(model, CompressAdversaryPair)
g = model.compressor
d = model.discriminator
#get raw gradients for d and g objectives...
d_obj, g_obj = self.get_objectives(model, data)
g_params = g.get_params()
d_params = d.get_params()
for param in g_params:
assert param not in d_params
for param in d_params:
assert param not in g_params
d_grads = T.grad(d_obj, d_params)
g_grads = T.grad(g_obj, g_params)
# if self.scale_grads:
# S_grad = T.grad(g_obj, S)
# scale = T.maximum(1., self.target_scale / T.sqrt(T.sqr(S_grad).sum()))
# g_grads = [g_grad * scale for g_grad in g_grads]
#adjust raw gradients with control signals
rval = OrderedDict()
zeros = itertools.repeat(theano.tensor.constant(0., dtype='float32'))
if self.ever_train_discriminator:
rval.update(OrderedDict(safe_zip(d_params, [self.now_train_discriminator * dg for dg in d_grads])))
else:
rval.update(OrderedDict(zip(d_params, zeros)))
if self.ever_train_compressor:
rval.update(OrderedDict(safe_zip(g_params, [self.now_train_compressor * gg for gg in g_grads])))
else:
rval.update(OrderedDict(zip(g_params, zeros)))
#update control signals using the updates return functionality
updates = OrderedDict()
#first, the clock
self.future_train_clock = T.switch(T.ge(self.train_clock,self.discriminator_steps+self.joint_steps+self.compressor_steps),1.,self.train_clock+1.)
updates[self.train_clock] = self.future_train_clock
#then the control signals
updates[self.now_train_discriminator] = T.switch(T.le(self.future_train_clock,self.discriminator_steps+self.joint_steps),1.,0.)
updates[self.now_train_compressor] = T.switch(T.gt(self.future_train_clock,self.discriminator_steps),1.,0.)
return rval, updates
开发者ID:vinmisra,项目名称:adversary-compress,代码行数:48,代码来源:CAN.py
示例10: __call__
def __call__(self, * batches):
"""
.. todo::
WRITEME
"""
for batch in batches:
if not isinstance(batch, list):
raise TypeError("Expected each argument to be a list,"
" but one argument is " +
str(batch) + " of type "+str(type(batch)))
total_examples = np.cast[config.floatX](
sum([batch[0].shape[0] for batch in batches]))
if self.has_updates:
self._clear()
augmented = self._true_inputs(batches[0]) + [total_examples]
self._set_shared(batches[0])
rval = self._func(*augmented)
for batch in batches[1:]:
augmented = self._true_inputs(batch) + [total_examples]
self._set_shared(batch)
# This works if there is no output,
# because the output is an empty list
cur_out = self._func(*augmented)
rval = [x + y for x, y in safe_zip(rval, cur_out)]
if len(rval) == 1:
return rval[0]
return rval
开发者ID:123fengye741,项目名称:pylearn2,代码行数:28,代码来源:batch_gradient_descent.py
示例11: _fill_mapping
def _fill_mapping(self, space, source):
"""Builds a nested tuple of integers representing the mapping"""
if isinstance(space, NullSpace):
# This Space does not contain any data, and should not
# be mapped to anything
assert source == ''
return None
elif not isinstance(space, CompositeSpace):
# Space is a simple Space, source should be a simple source
if isinstance(source, tuple):
source, = source
# If (space, source) has not already been seen, insert it.
# We need both the space and the source to match.
if (space, source) in self.specs_to_index:
spec_index = self.specs_to_index[(space, source)]
else:
spec_index = self.n_unique_specs
self.specs_to_index[(space, source)] = spec_index
self.n_unique_specs += 1
return spec_index
else:
# Recursively fill the mapping, and return it
spec_mapping = tuple(
self._fill_mapping(sub_space, sub_source)
for sub_space, sub_source in safe_zip(
space.components, source))
return spec_mapping
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:32,代码来源:data_specs.py
示例12: _fill_flat
def _fill_flat(self, nested, mapping, rval):
"""Auxiliary recursive function used by self.flatten"""
if isinstance(nested, CompositeSpace):
nested = tuple(nested.components)
if mapping is None:
# The corresponding Space was a NullSpace, which does
# not correspond to actual data, so nested should evaluate
# to False, and should not be included in the flattened version
if not isinstance(nested, NullSpace):
assert not nested, ("The following element is mapped to "
"NullSpace, so it should evaluate to False (for instance, "
"None, an empty string or an empty tuple), but is %s"
% nested)
return
if isinstance(mapping, int):
# "nested" should actually be a single element
idx = mapping
if isinstance(nested, tuple):
nested, = nested
if rval[idx] is None:
rval[idx] = nested
else:
assert rval[idx] == nested, ("This mapping was built "
"with the same element occurring more than once "
"in the nested representation, but current nested "
"sequence has different values (%s and %s) at "
"these positions." % (rval[idx], nested))
else:
for sub_nested, sub_mapping in safe_zip(nested, mapping):
self._fill_flat(sub_nested, sub_mapping, rval)
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:33,代码来源:data_specs.py
示例13: test_variational_cd
def test_variational_cd():
# Verifies that VariationalCD works well with make_layer_to_symbolic_state
visible_layer = BinaryVector(nvis=100)
hidden_layer = BinaryVectorMaxPool(detector_layer_dim=500,
pool_size=1,
layer_name='h',
irange=0.05,
init_bias=-2.0)
model = DBM(visible_layer=visible_layer,
hidden_layers=[hidden_layer],
batch_size=100,
niter=1)
cost = VariationalCD(num_chains=100, num_gibbs_steps=2)
data_specs = cost.get_data_specs(model)
mapping = DataSpecsMapping(data_specs)
space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
theano_args = []
for space, source in safe_zip(space_tuple, source_tuple):
name = '%s' % (source)
arg = space.make_theano_batch(name=name)
theano_args.append(arg)
theano_args = tuple(theano_args)
nested_args = mapping.nest(theano_args)
grads, updates = cost.get_gradients(model, nested_args)
开发者ID:BloodNg,项目名称:pylearn2,代码行数:30,代码来源:test_dbm.py
示例14: inv_prop
def inv_prop(self, state_above):
if not isinstance(state_above, tuple):
expected_space = VectorSpace(self.output_space.get_total_dimension())
state_above = expected_space.format_as(state_above, self.output_space)
self.output_space.validate(state_above)
return tuple(layer.inv_prop(state) for layer,state in safe_zip(self.layers, state_above))
开发者ID:mdenil,项目名称:parameter_prediction,代码行数:7,代码来源:mlp.py
示例15: iterator
def iterator(self, mode=None, batch_size=None, num_batches=None,
rng=None, data_specs=None, return_tuple=False):
"""
Method inherited from `pylearn2.datasets.dataset.Dataset`.
"""
self.mode = mode
self.batch_size = batch_size
self._return_tuple = return_tuple
# TODO: If there is a view_converter, we have to use it to convert
# the stored data for "features" into one that the iterator can return.
space, source = data_specs or (self.X_space, 'features')
assert isinstance(space, CompositeSpace),\
"Unexpected input space for the data."
sub_spaces = space.components
sub_sources = source
conv_fn = lambda x: x.todense().astype(theano.config.floatX)
convert = []
for sp, src in safe_zip(sub_spaces, sub_sources):
convert.append(conv_fn if src in ('features', 'targets') else None)
assert mode is not None,\
"Iteration mode not provided for %s" % str(self)
mode = resolve_iterator_class(mode)
subset_iterator = mode(self.X.shape[0], batch_size, num_batches, rng)
return FiniteDatasetIterator(self,
subset_iterator,
data_specs=data_specs,
return_tuple=return_tuple,
convert=convert)
开发者ID:BrianMiner,项目名称:scikit-neuralnetwork,代码行数:32,代码来源:dataset.py
示例16: setup
def setup(self, model, dataset):
"""
Allows the training algorithm to do some preliminary configuration
*before* we actually start training the model. The dataset is provided
in case other derived training algorithms need to modify model based on
the dataset.
Parameters
----------
model: a Python object representing the model to train loosely
implementing the interface of models.model.Model.
dataset: a pylearn2.datasets.dataset.Dataset object used to draw
training data
"""
self.model = model
self.monitor = Monitor.get_monitor(model)
if self.monitoring_dataset is not None:
# Get the data specifications needed by the model
space, source = model.get_monitoring_data_specs()
# Create Theano variables for each of the individual components
# of that data. Usually, it will be X for inputs and Y for targets.
# First, we need to find these components, and put them in a tuple
mapping = DataSpecsMapping((space, source))
space_tuple = mapping.flatten(space, return_tuple=True)
source_tuple = mapping.flatten(source, return_tuple=True)
# Then, build a flat tuple of these Theano variables
ipt = tuple(sp.make_theano_batch(name='monitor_%s' % src)
for (sp, src) in safe_zip(space_tuple, source_tuple))
# Finally, organize them back into a structure expected by the
# monitoring channels of the model
nested_ipt = mapping.nest(ipt)
self.monitor.add_dataset(dataset=self.monitoring_dataset,
mode="sequential",
batch_size=self.batch_size,
num_batches=self.monitoring_batches)
channels = model.get_monitoring_channels(nested_ipt)
if not isinstance(channels, dict):
raise TypeError("model.get_monitoring_channels must return a "
"dictionary, but it returned " + str(channels))
for name in channels:
J = channels[name]
if isinstance(J, tuple):
assert len(J) == 2
J, prereqs = J
else:
prereqs = None
self.monitor.add_channel(name=name,
ipt=nested_ipt,
val=J,
prereqs=prereqs,
data_specs=(space, source))
self.first = True
self.bSetup = True
开发者ID:Alienfeel,项目名称:pylearn2,代码行数:60,代码来源:default.py
示例17: make_layer_to_symbolic_state
def make_layer_to_symbolic_state(self, num_examples, rng=None):
"""
.. todo::
Explain the difference with `make_layer_to_state`
Makes and returns a dictionary mapping layers to states. By states, we
mean here a real assignment, not a mean field state. For example, for a
layer containing binary random variables, the state will be a shared
variable containing values in {0,1}, not [0,1]. The visible layer will
be included.
Uses a dictionary so it is easy to unambiguously index a layer without
needing to remember rules like vis layer = 0, hiddens start at 1, etc.
Parameters
----------
num_examples : int
WRITEME
rng : WRITEME
"""
# Make a list of all layers
layers = [self.visible_layer] + self.hidden_layers
assert rng is not None
states = [layer.make_symbolic_state(num_examples, rng) for layer in layers]
zipped = safe_zip(layers, states)
rval = OrderedDict(zipped)
return rval
开发者ID:JakeMick,项目名称:pylearn2,代码行数:34,代码来源:dbm.py
示例18: get_gradients
def get_gradients(self, model, data, ** kwargs):
indiv_results = []
composite_specs, mapping = self.get_composite_specs_and_mapping(model)
nested_data = mapping.nest(data)
for cost, cost_data in safe_zip(self.costs, nested_data):
result = cost.get_gradients(model, cost_data, ** kwargs)
indiv_results.append(result)
grads = OrderedDict()
updates = OrderedDict()
params = model.get_params()
for coeff, packed in zip(self.coeffs, indiv_results):
g, u = packed
for param in g:
if param not in params:
raise ValueError("A shared variable (" +
str(param) +
") that is not a parameter appeared "
"a cost gradient dictionary.")
for param in g:
assert param.ndim == g[param].ndim
v = coeff * g[param]
if param not in grads:
grads[param] = v
else:
grads[param] = grads[param] + v
assert grads[param].ndim == param.ndim
assert not any([state in updates for state in u])
assert not any([state in params for state in u])
updates.update(u)
return grads, updates
开发者ID:nitbix,项目名称:pylearn2,代码行数:33,代码来源:cost.py
示例19: topo_view_to_design_mat
def topo_view_to_design_mat(self, topo_array):
"""
Returns a design matrix view/copy of topological matrix.
Parameters
----------
topo_array: numpy.ndarray
An N-D array with axis order given by self.axes. Non-batch axes'
dimension sizes must agree with corresponding sizes in self.shape.
returns: numpy.ndarray
A design matrix with data in rows. Data, is laid out in memory
according to the default axis order ('b', 'c', 0, 1). This will
try to return a view into topo_array if possible; otherwise it will
allocate a new ndarray.
"""
for shape_elem, axis in safe_zip(self.shape, (0, 1, 2, 'c')):
if topo_array.shape[self.axes.index(axis)] != shape_elem:
raise ValueError(
"topo_array's %s axis has a different size "
"(%d) from the corresponding size (%d) in "
"self.shape.\n"
" self.shape: %s (uses standard axis order: 0, 1, "
"'c')\n"
" self.axes: %s\n"
" topo_array.shape: %s (should be in self.axes' order)")
topo_array_bc01 = topo_array.transpose([self.axes.index(ax)
for ax in ('b', 'c', 0, 1, 2)])
return topo_array_bc01.reshape((topo_array_bc01.shape[0],
np.prod(topo_array_bc01.shape[1:])))
开发者ID:robintibor,项目名称:pylearn3dconv,代码行数:32,代码来源:volumetric_dense_design_matrix.py
示例20: _get_standard_neg
def _get_standard_neg(self, model, layer_to_chains):
params = list(model.get_params())
warnings.warn("""TODO: reduce variance of negative phase by
integrating out the even-numbered layers. The
Rao-Blackwellize method can do this for you when
expected gradient = gradient of expectation, but
doing this in general is trickier.""")
#layer_to_chains = model.rao_blackwellize(layer_to_chains)
expected_energy_p = model.energy(
layer_to_chains[model.visible_layer],
[layer_to_chains[layer] for layer in model.hidden_layers]
).mean()
samples = flatten(layer_to_chains.values())
for i, sample in enumerate(samples):
if sample.name is None:
sample.name = 'sample_'+str(i)
neg_phase_grads = OrderedDict(
safe_zip(params, T.grad(-expected_energy_p, params,
consider_constant=samples,
disconnected_inputs='ignore'))
)
return neg_phase_grads
开发者ID:abhiggarwal,项目名称:pylearn2,代码行数:25,代码来源:dbm.py
注:本文中的pylearn2.utils.safe_zip函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论