• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.prod函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.prod函数的典型用法代码示例。如果您正苦于以下问题:Python prod函数的具体用法?Python prod怎么用?Python prod使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了prod函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: maxpool_3D

def maxpool_3D(input, ds, ignore_border=False):
   
    #input.dimshuffle (0, 2, 1, 3, 4)   # convert to make video in back. 
    # no need to reshuffle. 
    if input.ndim < 3:
        raise NotImplementedError('max_pool_3d requires a dimension >= 3')

    # extract nr dimensions
    vid_dim = input.ndim
    # max pool in two different steps, so we can use the 2d implementation of 
    # downsamplefactormax. First maxpool frames as usual. 
    # Then maxpool the time dimension. Shift the time dimension to the third 
    # position, so rows and cols are in the back


    # extract dimensions
    frame_shape = input.shape[-2:]
    
    # count the number of "leading" dimensions, store as dmatrix
    batch_size = T.prod(input.shape[:-2])
    batch_size = T.shape_padright(batch_size,1)
    
    # store as 4D tensor with shape: (batch_size,1,height,width)
    new_shape = T.cast(T.join(0, batch_size,
                                        T.as_tensor([1,]), 
                                        frame_shape), 'int32')
    input_4D = T.reshape(input, new_shape, ndim=4)

    # downsample mini-batch of videos in rows and cols
    op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)          # so second and third dimensions of ds are for height and width
    output = op(input_4D)
    # restore to original shape                                     
    outshape = T.join(0, input.shape[:-2], output.shape[-2:])
    out = T.reshape(output, outshape, ndim=input.ndim)

    # now maxpool time
    # output (time, rows, cols), reshape so that time is in the back
    shufl = (list(range(vid_dim-3)) + [vid_dim-2]+[vid_dim-1]+[vid_dim-3])
    input_time = out.dimshuffle(shufl)
    # reset dimensions
    vid_shape = input_time.shape[-2:]
    
    # count the number of "leading" dimensions, store as dmatrix
    batch_size = T.prod(input_time.shape[:-2])
    batch_size = T.shape_padright(batch_size,1)
    
    # store as 4D tensor with shape: (batch_size,1,width,time)
    new_shape = T.cast(T.join(0, batch_size,
                                        T.as_tensor([1,]), 
                                        vid_shape), 'int32')
    input_4D_time = T.reshape(input_time, new_shape, ndim=4)
    # downsample mini-batch of videos in time
    op = DownsampleFactorMax((1,ds[0]), ignore_border)            # Here the time dimension is downsampled. 
    outtime = op(input_4D_time)
    # output 
    # restore to original shape (xxx, rows, cols, time)
    outshape = T.join(0, input_time.shape[:-2], outtime.shape[-2:])
    shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
    #rval = T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
    return T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
开发者ID:kli-nlpr,项目名称:Convolutional-Neural-Networks,代码行数:60,代码来源:core.py


示例2: output_probabilistic

    def output_probabilistic(self, m_w_previous, v_w_previous):
        if (self.non_linear):
            m_in = self.m_w - m_w_previous
            v_in = self.v_w
            # We compute the mean and variance after the ReLU activation
            lam = self.lam
            v_1 = 1 + 2*lam*v_in
            v_1_inv = v_1**-1

            s_1 = T.prod(v_1,axis=1)**-0.5
            v_2 = 1 + 4*lam*v_in
            v_2_inv = v_2**-1
            s_2 = T.prod(v_2,axis=1)**-0.5
            v_inv = v_in**-1
            exponent1 = m_in**2*(1 - v_1_inv)*v_inv
            exponent1 = T.sum(exponent1,axis=1)
            exponent2 = m_in**2*(1 - v_2_inv)*v_inv
            exponent2 = T.sum(exponent2,axis=1)
            m_a = s_1*T.exp(-0.5*exponent1)
            v_a = s_2*T.exp(-0.5*exponent2) - m_a**2

            return (m_a, v_a)

        else:
            m_w_previous_with_bias = \
            T.concatenate([ m_w_previous, T.alloc(1, 1) ], 0)
            v_w_previous_with_bias = \
            T.concatenate([ v_w_previous, T.alloc(0, 1) ], 0)

            m_linear = T.dot(self.m_w, m_w_previous_with_bias) / T.sqrt(self.n_inputs)
            v_linear = (T.dot(self.v_w, v_w_previous_with_bias) + \
                T.dot(self.m_w**2, v_w_previous_with_bias) + \
                T.dot(self.v_w, m_w_previous_with_bias**2)) / self.n_inputs
            return (m_linear, v_linear)
开发者ID:jshe857,项目名称:thesis-rbfnn,代码行数:34,代码来源:network_layer.py


示例3: apply

        def apply(self, application, *args, **kwargs):
            # extra_ndim is a mandatory parameter, but in order not to
            # confuse with positional inputs, it has to be extracted from
            # **kwargs
            extra_ndim = kwargs.get("extra_ndim", 0)

            inputs = dict(zip(application.inputs, args))
            inputs.update(dict_subset(kwargs, application.inputs, must_have=False))
            reshaped_inputs = inputs
            # To prevent pollution of the computation graph with no-ops
            if extra_ndim > 0:
                for name, input_ in inputs.items():
                    shape, ndim = input_.shape, input_.ndim
                    # Remember extra_dims for reshaping the outputs correctly.
                    # Does not matter from which input, since we assume
                    # extra dimension match for all inputs.
                    extra_dims = shape[:extra_ndim]
                    new_first_dim = tensor.prod(shape[: extra_ndim + 1])
                    new_shape = tensor.join(0, new_first_dim[None], shape[extra_ndim + 1 :])
                    reshaped_inputs[name] = input_.reshape(new_shape, ndim=ndim - extra_ndim)
            outputs = wrapped.__get__(self, None)(**reshaped_inputs)
            if extra_ndim == 0:
                return outputs
            reshaped_outputs = []
            for output in pack(outputs):
                shape, ndim = output.shape, output.ndim
                new_shape = tensor.join(0, extra_dims, (shape[0] // tensor.prod(extra_dims))[None], shape[1:])
                reshaped_outputs.append(output.reshape(new_shape, ndim=ndim + extra_ndim))
            return reshaped_outputs
开发者ID:EloiZ,项目名称:DeepCube,代码行数:29,代码来源:wrappers.py


示例4: _theano_cpu_multi_batch_beam_grad

def _theano_cpu_multi_batch_beam_grad(array, start_idxs, batch_lens, beam_width, wrap_mode, pad_left=0, pad_right=0, idx_dim=0, batch_dim=1, output_grad=None):
  # Note: This is slow and hacky. This will create an index-array of the size of the original array.
  # This is calculated on the CPU. The subtensor then can be done on the GPU, but we should avoid the first part.
  D_beam = output_grad
  prod_array_shape = T.prod(array.shape)
  prod_pad_left_shape = T.prod(pad_left.shape)
  prod_pad_right_shape = T.prod(pad_right.shape)
  D_array_tmp_size = prod_array_shape
  if wrap_mode == "pad":
    D_array_tmp_size += prod_pad_left_shape + prod_pad_right_shape
  D_array_tmp_flat = T.zeros([D_array_tmp_size], dtype="float32")  # with pad values
  if wrap_mode == "pad":
    # Calculate the indices for D_pad_left/D_pad_right in D_array_tmp_flat.
    pad_left_idxs = T.arange(prod_pad_left_shape) + prod_array_shape
    pad_right_idxs = T.arange(prod_pad_right_shape) + prod_array_shape + prod_pad_left_shape
    pad_left_idxs = pad_left_idxs.reshape(pad_left.shape)
    pad_right_idxs = pad_right_idxs.reshape(pad_right.shape)
  else:
    pad_left_idxs = pad_right_idxs = 0
  all_idxs = T.arange(T.prod(array.shape)).reshape(array.shape)
  idxs = multi_batch_beam(array=all_idxs, start_idxs=start_idxs, batch_lens=batch_lens, beam_width=beam_width,
                          wrap_mode=wrap_mode,
                          pad_left=pad_left_idxs, pad_right=pad_right_idxs,
                          idx_dim=idx_dim, batch_dim=batch_dim)
  D_array_tmp_flat = T.inc_subtensor(D_array_tmp_flat[idxs.flatten()], D_beam.flatten())
  if wrap_mode == "pad":
    D_array = D_array_tmp_flat[:prod_array_shape].reshape(array.shape)
    D_pad_left = D_array_tmp_flat[pad_left_idxs.flatten()].reshape(pad_left.shape)
    D_pad_right = D_array_tmp_flat[pad_right_idxs.flatten()].reshape(pad_right.shape)
  else:
    D_array = D_array_tmp_flat.reshape(array.shape)
    D_pad_left = D_pad_right = T.DisconnectedType()()

  return D_array, D_pad_left, D_pad_right
开发者ID:atuxhe,项目名称:returnn,代码行数:34,代码来源:MultiBatchBeam.py


示例5: tensordot

def tensordot(a, b, axes=2):
    """
    implementation of tensordot that reduces to a regular matrix product. This allows tensordot to be GPU accelerated,
    which isn't possible with the default Theano implementation (which is just a wrapper around numpy.tensordot).
    based on code from Tijmen Tieleman's gnumpy http://www.cs.toronto.edu/~tijmen/gnumpy.html
    """
    if numpy.isscalar(axes):
        # if 'axes' is a number of axes to multiply and sum over (trailing axes
        # of a, leading axes of b), we can just reshape and use dot.
        outshape = tensor.concatenate([a.shape[:a.ndim - axes], b.shape[axes:]])
        outndim = a.ndim + b.ndim - 2*axes
        a_reshaped = a.reshape((tensor.prod(a.shape[:a.ndim - axes]), tensor.prod(a.shape[a.ndim - axes:])))
        b_reshaped = b.reshape((tensor.prod(b.shape[:axes]), tensor.prod(b.shape[axes:])))
        return tensor.dot(a_reshaped, b_reshaped).reshape(outshape, ndim=outndim)
    elif len(axes) == 2:
        # if 'axes' is a pair of axis lists, we first shuffle the axes of a and
        # b to reduce this to the first case (note the recursion).
        a_other, b_other = tuple(axes[0]), tuple(axes[1])
        num_axes = len(a_other)
        a_order = tuple(x for x in tuple(xrange(a.ndim)) if x not in a_other) + a_other
        b_order = b_other + tuple(x for x in tuple(xrange(b.ndim)) if x not in b_other)
        a_shuffled = a.dimshuffle(a_order)
        b_shuffled = b.dimshuffle(b_order)
        return tensordot(a_shuffled, b_shuffled, num_axes)
    else:
        raise ValueError("Axes should be scalar valued or a list/tuple of len 2.")
开发者ID:gupta-abhay,项目名称:deep-voteaggregate,代码行数:26,代码来源:misc.py


示例6: __init__

    def __init__(self, rng, input, n_in, n_out, params = None):
		self.input = input
		if(params):
			self.W = params[0]
		else:
		    W_values = np.asarray(
		            rng.uniform(
		                low=-np.sqrt(6. / (n_in + n_out)),
		                high=np.sqrt(6. / (n_in + n_out)),
		                size=(n_in, n_out)
		            ),
		            dtype=theano.config.floatX
		        )
		    self.W = theano.shared(value=W_values, name='W', borrow=True)

		if(params):
			self.b = params[1]
		else:
			b_values = np.zeros((n_out,), dtype=theano.config.floatX)
			self.b = theano.shared(value=b_values, name='b', borrow=True)

		self.output = T.nnet.relu(T.dot(input, self.W) + self.b)

		self.params = [self.W, self.b]

		self.mem_size = (T.prod(self.W.shape)+T.prod(self.b.shape))*4
开发者ID:gauthamvasan,项目名称:Neurohex,代码行数:26,代码来源:layers.py


示例7: kl_normal_diagonal

def kl_normal_diagonal(mu1,sigma_diag1,mu2,sigma_diag2,dim):
    det1 = T.prod(sigma_diag1)
    det2 = T.prod(sigma_diag2)
    inv_sigma_diag2 = 1/sigma_diag_2
    mu_diff = mu2-mu1
    ret = 0.5 * (
        log(det2/det1)
        - dim
        + T.sum(inv_sigma_diag2*sigma_diag1)
        + T.dot(T.dot(mu_diff.T,inv_sigma_diag2),mu_diff)
    )
    return ret
开发者ID:stablum,项目名称:reimplementations,代码行数:12,代码来源:vae.py


示例8: max_pool_3d

def max_pool_3d(input, ds, ignore_border=False):
	"""
		Takes as input a N-D tensor, where N >= 3. It downscales the input video by
		the specified factor, by keeping only the maximum value of non-overlapping
		patches of size (ds[0],ds[1],ds[2]) (time, height, width)  
		
		:type input: N-D theano tensor of input images.
		:param input: input images. Max pooling will be done over the 3 last dimensions.
		:type ds: tuple of length 3
		:param ds: factor by which to downscale. (2,2,2) will halve the video in each dimension.
		:param ignore_border: boolean value. Example when True, (5,5,5) input with ds=(2,2,2) will generate a
		(2,2,2) output. (3,3,3) otherwise.
	"""
	if input.ndim < 3:
		raise NotImplementedError('max_pool_3d requires a dimension >= 3')
		
	vid_dim = input.ndim
	#Maxpool frame
	frame_shape = input.shape[-2:]

	# count the number of "leading" dimensions, store as dmatrix
	batch_size = T.prod(input.shape[:-2])
	batch_size = T.shape_padright(batch_size,1)
	new_shape = T.cast(T.join(0, batch_size,T.as_tensor([1,]),frame_shape), 'int32')
	
	input_4D = T.reshape(input, new_shape, ndim=4)
	# downsample mini-batch of videos in rows and cols
	op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)
	output = op(input_4D)
	# restore to original shape
	outshape = T.join(0, input.shape[:-2], output.shape[-2:])
	out = T.reshape(output, outshape, ndim=input.ndim)
	
	#Maxpool time 
	# output (time, rows, cols), reshape so that time is in the back
	shufl = (list(range(vid_dim-4)) + list(range(vid_dim-3,vid_dim))+[vid_dim-4])
	input_time = out.dimshuffle(shufl)
	# reset dimensions
	vid_shape = input_time.shape[-2:]
	# count the number of "leading" dimensions, store as dmatrix
	batch_size = T.prod(input_time.shape[:-2])
	batch_size = T.shape_padright(batch_size,1)
	# store as 4D tensor with shape: (batch_size,1,width,time)
	new_shape = T.cast(T.join(0, batch_size,T.as_tensor([1,]),vid_shape), 'int32')
	input_4D_time = T.reshape(input_time, new_shape, ndim=4)
	# downsample mini-batch of videos in time
	op = DownsampleFactorMax((1,ds[0]), ignore_border)
	outtime = op(input_4D_time)
	# restore to original shape (xxx, rows, cols, time)
	outshape = T.join(0, input_time.shape[:-2], outtime.shape[-2:])
	shufl = (list(range(vid_dim-4)) + [vid_dim-1] + list(range(vid_dim-4,vid_dim-1)))
	#shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
	return T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
开发者ID:IITM-DONLAB,项目名称:python-dnn,代码行数:53,代码来源:max_pool.py


示例9: get_norms

def get_norms(model, gradients):
    """Compute norm of weights and their gradients divided by the number of elements"""
    norms = []
    grad_norms = []
    for param_name, param in model.params.iteritems():
        norm = T.sqrt(T.sum(T.square(param))) / T.prod(param.shape.astype(theano.config.floatX))
        norm.name = 'norm_' + param_name
        norms.append(norm)
        grad = gradients[param]
        grad_norm = T.sqrt(T.sum(T.square(grad))) / T.prod(grad.shape.astype(theano.config.floatX))
        grad_norm.name = 'grad_norm_' + param_name
        grad_norms.append(grad_norm)
    return norms, grad_norms
开发者ID:Sohl-Dickstein,项目名称:Diffusion-Probabilistic-Models,代码行数:13,代码来源:util.py


示例10: img_2_neibs_with_chans

def img_2_neibs_with_chans(inputs_sym, patch_size):
    flat_patches = neighbours.images2neibs(inputs_sym, patch_size, (1,1))
    topo_flat_patches = T.reshape(flat_patches,(inputs_sym.shape[0],
                                            inputs_sym.shape[1],
                                            inputs_sym.shape[2]-patch_size[0]+1,
                                            inputs_sym.shape[3]-patch_size[1]+1,
                                            patch_size[0],
                                            patch_size[1]))


    flat_patches = topo_flat_patches.dimshuffle(0,2,3,1,4,5)
    flat_patches = T.reshape(flat_patches, (T.prod(flat_patches.shape[:3]),
                                                 T.prod(flat_patches.shape[3:])))
    return flat_patches
开发者ID:robintibor,项目名称:braindecode,代码行数:14,代码来源:mixture.py


示例11: logp_theano_claims

def logp_theano_claims(l,nObs,T,Z,L,X,O_on):

    #O_on = O_on.astype(np.bool)
    # tempVec is 1-X*Z
    tempVec =  (1. - X.reshape((nObs,1,X.shape[1]))*(Z.T).reshape((1,Z.shape[1],Z.shape[0])))
    # Add the contribution from O = 1
    logLike = TT.log(1-(1-TT.tile(L[np.newaxis,:],(nObs,1))[O_on.nonzero()])*TT.prod(tempVec[O_on.nonzero()],axis=1,no_zeros_in_input=True)).sum()
    #logLike = TT.log(1-(1-TT.tile(L[np.newaxis,:],(nObs,1))[O_on.nonzero()])*tempVec[O_on.nonzero()].prod(axis=1,no_zeros_in_input=True)).sum()
    #logLike = TT.log(1-(1-TT.tile(L[np.newaxis,:],(nObs,1))[O_on.nonzero()])*tempVec[O_on.nonzero()].prod(axis=1)).sum()

    # Add the contribution from O = 0
    logLike += TT.log((1-TT.tile(L[np.newaxis,:],(nObs,1))[(1-O_on).nonzero()])*TT.prod(tempVec[(1-O_on).nonzero()],axis=1,no_zeros_in_input=True)).sum()
    #logLike += TT.log((1-TT.tile(L[np.newaxis,:],(nObs,1))[(1-O_on).nonzero()])*tempVec[(1-O_on).nonzero()].prod(axis=1)).sum()

    return logLike
开发者ID:clinicalml,项目名称:ContinuousTimeMarkovModel,代码行数:15,代码来源:distributions.py


示例12: max_pool_3d

def max_pool_3d(input, ds, ignore_border=False):
    # [n,c,x,y,z]以外の入力は受け付けない
    if input.ndim != 5:
        raise NotImplementedError(
            'max_pool_3d requires a input [n, c, x, y, z]')

    # 入力次元
    vid_dim = input.ndim

    # [y, z]フレームの次元数
    frame_shape = input.shape[-2:]

    # バッチサイズ
    # フレーム次元以外の全ての次元の要素数を掛け合わせる
    batch_size = T.prod(input.shape[:-2])
    # http://deeplearning.net/software/theano/library/tensor/basic.html#theano.tensor.shape_padright
    batch_size = T.shape_padright(batch_size, 1)


    new_shape = T.cast(T.join(0, batch_size, T.as_tensor([1, ]), frame_shape),
                       'int32')
    input_4D = T.reshape(input, new_shape, ndim=4)

    op = DownsampleFactorMax((ds[1], ds[2]), ignore_border)
    output = op(input_4D)
    outshape = T.join(0, input.shape[:-2], output.shape[-2:])
    out = T.reshape(output, outshape, ndim=input.ndim)

    shufl = (
        list(range(vid_dim - 3)) + [vid_dim - 2] + [vid_dim - 1] + [
            vid_dim - 3])
    input_time = out.dimshuffle(shufl)
    vid_shape = input_time.shape[-2:]

    batch_size = T.prod(input_time.shape[:-2])
    batch_size = T.shape_padright(batch_size, 1)

    new_shape = T.cast(T.join(0, batch_size,
                              T.as_tensor([1, ]),
                              vid_shape), 'int32')
    input_4D_time = T.reshape(input_time, new_shape, ndim=4)
    op = DownsampleFactorMax((1, ds[0]), ignore_border)
    outtime = op(input_4D_time)
    outshape = T.join(0, input_time.shape[:-2], outtime.shape[-2:])
    shufl = (
        list(range(vid_dim - 3)) + [vid_dim - 1] + [vid_dim - 3] + [
            vid_dim - 2])
    return T.reshape(outtime, outshape, ndim=input.ndim).dimshuffle(shufl)
开发者ID:kanairen,项目名称:CubicCNN,代码行数:48,代码来源:__pool.py


示例13: _backward_negative_z

def _backward_negative_z(inputs, weights, normed_relevances, bias=None):
    inputs_plus = inputs * T.gt(inputs, 0)
    weights_plus = weights * T.gt(weights, 0)
    inputs_minus = inputs * T.lt(inputs, 0)
    weights_minus = weights * T.lt(weights, 0)
    # Compute weights+ * inputs- and weights- * inputs+
    negative_part_a = conv2d(
        normed_relevances, weights_plus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
    )
    negative_part_a *= inputs_minus
    negative_part_b = conv2d(
        normed_relevances, weights_minus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
    )
    negative_part_b *= inputs_plus

    together = negative_part_a + negative_part_b
    if bias is not None:
        bias_negative = bias * T.lt(bias, 0)
        bias_relevance = bias_negative.dimshuffle("x", 0, "x", "x") * normed_relevances
        # Divide bias by weight size before convolving back
        # mean across channel, 0, 1 dims (hope this is correct?)
        fraction_bias = bias_relevance / T.prod(weights.shape[1:]).astype(theano.config.floatX)
        bias_rel_in = conv2d(
            fraction_bias, T.ones_like(weights).dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
        )
        together += bias_rel_in
    return together
开发者ID:robintibor,项目名称:braindecode,代码行数:27,代码来源:heatmap.py


示例14: relevance_conv_z

def relevance_conv_z(out_relevances, inputs, weights, bias=None):
    norms_for_relevances = conv2d(inputs, weights)
    if bias is not None:
        norms_for_relevances += bias.dimshuffle("x", 0, "x", "x")
    # stabilize
    # prevent division by 0 and division by small numbers
    eps = 1e-4
    norms_for_relevances += T.sgn(norms_for_relevances) * eps
    norms_for_relevances += T.eq(norms_for_relevances, 0) * eps

    normed_relevances = out_relevances / norms_for_relevances
    # upconv
    in_relevances = conv2d(normed_relevances, weights.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full")

    in_relevances_proper = in_relevances * inputs

    if bias is not None:
        bias_relevance = bias.dimshuffle("x", 0, "x", "x") * normed_relevances
        # Divide bias by weight size before convolving back
        # mean across channel, 0, 1 dims (hope this is correct?)
        fraction_bias = bias_relevance / T.prod(weights.shape[1:]).astype(theano.config.floatX)
        bias_rel_in = conv2d(
            fraction_bias, T.ones_like(weights).dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
        )
        in_relevances_proper += bias_rel_in

    return in_relevances_proper
开发者ID:robintibor,项目名称:braindecode,代码行数:27,代码来源:heatmap.py


示例15: relevance_conv_z_plus

def relevance_conv_z_plus(out_relevances, inputs, weights, bias=None):
    if bias is not None:
        log.warning("Bias not respected for conv z_plus")
    # hack for negative inputs
    # inputs = T.abs_(inputs)
    weights_plus = weights * T.gt(weights, 0)
    norms_for_relevances = conv2d(inputs, weights_plus)
    # prevent division by 0...
    # adds 1 to every entry that is 0 -> sets 0s to 1
    relevances_are_0 = T.eq(norms_for_relevances, 0)
    norms_for_relevances += relevances_are_0 * 1

    normed_relevances = out_relevances / norms_for_relevances
    # upconv
    in_relevances = conv2d(normed_relevances, weights_plus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full")

    in_relevances_proper = in_relevances * inputs

    # Correct for those parts where all inputs of a relevance were
    # zero, spread relevance equally them
    pool_ones = T.ones(weights_plus.shape, dtype=np.float32)
    # mean across channel, 0, 1 dims (hope this is correct?)
    pool_fractions = pool_ones / T.prod(weights_plus.shape[1:]).astype(theano.config.floatX)
    in_relevances_from_0 = conv2d(
        out_relevances * relevances_are_0, pool_fractions.dimshuffle(1, 0, 2, 3), subsample=(1, 1), border_mode="full"
    )

    in_relevances_proper += in_relevances_from_0

    return in_relevances_proper
开发者ID:robintibor,项目名称:braindecode,代码行数:30,代码来源:heatmap.py


示例16: __init__

    def __init__(self, input, n_in, n_out, prefix='hidden', W=None, b=None,
            activation=T.tanh):
        self.input = input
        if W is None:
            W_values = numpy.asarray(
                np.random.uniform(
                    low=-numpy.sqrt(6. / (n_in + n_out)),
                    high=numpy.sqrt(6. / (n_in + n_out)),
                    size=(n_in, n_out)
                ),
                dtype=theano.config.floatX
            )
            if activation == theano.tensor.nnet.sigmoid:
                W_values *= 4

            W = theano.shared(value=W_values, name=_p(prefix, 'W'), borrow=True)

        if b is None:
            b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
            b = theano.shared(value=b_values, name=_p(prefix, 'b'), borrow=True)

        self.W = W
        self.b = b

        self.pre_activation = T.dot(input, self.W) + self.b
        if activation is None:
            self.output = self.pre_activation
        elif activation == T.nnet.softmax:
            shape= self.pre_activation.shape
            tmp = self.pre_activation.reshape((T.prod(shape[:-1]), shape[-1]))
            self.output = T.nnet.softmax(tmp).reshape(shape)
        else:
            self.output = activation(self.pre_activation)

        self.params = {_p(prefix, 'W'):W, _p(prefix, 'b'):b}
开发者ID:Duum,项目名称:cnn-lstm-ctc,代码行数:35,代码来源:common_layers.py


示例17: unpool_switch_2d

def unpool_switch_2d(input, ds, st=None,
            index_type='flattened', index_scope='local',
            original_input_shape=None):

    if input.ndim < 3:
        raise NotImplementedError('unpool_switched_2d requires a dimension >= 3')
    if input.ndim == 4:
        op = UnpoolSwitch(ds, st=st,
                  index_type=index_type, index_scope=index_scope,
                  original_input_shape=original_input_shape)
        output = op(input)
        return output

    # extract image dimensions
    img_shape = input.shape[-3:]

    # count the number of "leading" dimensions, store as dmatrix
    batch_size = T.prod(input.shape[:-3])
    batch_size = T.shape_padright(batch_size, 1)

    # store as 4D tensor with shape: (batch_size,1,height,width)
    new_shape = T.cast(T.join(0, batch_size,
                                        img_shape), 'int64')
    input_4D = T.reshape(input, new_shape, ndim=4)

    # downsample mini-batch of images
    op = UnpoolSwitch(ds, st=st,
              index_type=index_type, index_scope=index_scope,
              original_input_shape=original_input_shape)
    output = op(input_4D)

    # restore to original shape
    outshp = T.join(0, input.shape[:-2], output.shape[-2:])
    return T.reshape(output, outshp, ndim=input.ndim)
开发者ID:bokorn,项目名称:Keras-and-Theano-layers-for-Switched-Pooling,代码行数:34,代码来源:theano_switched_pooling.py


示例18: build_obj

def build_obj(z_sample,z_mu,z_sigma,x_orig,x_out):
    z_sigma_fixed = z_sigma
    z_sigma_inv = 1/(z_sigma_fixed)
    det_z_sigma = T.prod(z_sigma)
    C = 1./(T.sqrt(((2*np.pi)**z_dim) * det_z_sigma))
    log_q_z_given_x = - 0.5*T.dot(z_sigma_inv, ((z_sample-z_mu)**2).T) + T.log(C) # log(C) can be omitted
    q_z_given_x = C * T.exp(log_q_z_given_x)
    log_p_x_given_z = -(1/(x_sigma))*(((x_orig-x_out)**2).sum()) # because p(x|z) is gaussian
    log_p_z = - (z_sample**2).sum() # gaussian prior with mean 0 and cov I
    #reconstruction_error_const = (0.5*(x_dim*np.log(np.pi)+1)).astype('float32')
    reconstruction_error_proper = 0.5*T.sum((x_orig-x_out)**2)
    reconstruction_error = reconstruction_error_proper #+ reconstruction_error_const
    regularizer = kl_normal_diagonal_vs_unit(z_mu,z_sigma,z_dim)
    obj = reconstruction_error + regularizer
    obj_scalar = obj.reshape((),ndim=0)
    return obj_scalar,[
        reconstruction_error, #1
        regularizer,#2
        log_q_z_given_x,#3
        det_z_sigma,#4
        q_z_given_x,#5
        log_p_x_given_z,#6
        log_p_z,#7
        z_sample,#8
        z_mu,#9
        z_sigma,#10,
        z_sigma_inv,#11
        z_sigma_fixed,#12
        C,#13
        reconstruction_error_proper,#14
    ]
开发者ID:stablum,项目名称:reimplementations,代码行数:31,代码来源:vae.py


示例19: process

    def process(self, input, tparams, BNparams):
        mode = 'full' if self.border_mode == 'same' else self.border_mode
        output = conv.conv2d(
            input=input,
            filters=tparams[p_(self.prefix, 'W')],
            image_shape=[self.batch_size, self.n_in[0]] + self.image_shape,
            filter_shape=[self.n_out] + self.n_in,
            border_mode=mode,
            subsample=self.stride)

        if self.border_mode == 'same':
            a1 = (self.filter_size[0] - 1) // 2
            b1 = (self.filter_size[1] - 1) // 2
            a2 = self.filter_size[0] - a1
            b2 = self.filter_size[1] - b1
            if a2 == 1:
                if b2 == 1:
                    output = output[:, :, a1:, b1:]
                else:
                    output = output[:, :, a1:, b1:-b2+1]
            else:
                if b2 == 1:
                    output = output[:, :, a1:-a2+1, b1:]
                else:
                    output = output[:, :, a1:-a2+1, b1:-b2+1]

        if self.with_bias:
            output += tparams[p_(self.prefix, 'b')].dimshuffle('x', 0, 'x', 'x')

        self.BN_mean = T.mean(output, axis=[0, 2, 3])
        m2 = (1 + 1 / (T.prod(output.shape) / self.n_out - 1)).astype(floatX)
        self.BN_std = T.sqrt(m2 * T.var(output, axis=[0, 2, 3])
                             + npt(self.BN_eps))
        return output
开发者ID:wufangjie,项目名称:dnn,代码行数:34,代码来源:layers.py


示例20: liks

 def liks(self,a_na, b_nb):
     mu_nd = a_na[:, :self.d]
     sig_nd = a_na[:, self.d:]
     prodsig_n = TT.prod(sig_nd,axis=1)
     out = TT.exp( TT.square((mu_nd - b_nb)/sig_nd).sum(axis=1) * -.5 ) / (np.cast[floatX](np.sqrt(2*np.pi)**self.d) * prodsig_n)
     assert out.dtype==floatX
     return out
开发者ID:SFPD,项目名称:rlreloaded,代码行数:7,代码来源:cpd.py



注:本文中的theano.tensor.prod函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.repeat函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.power函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap