• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.dot函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.dot函数的典型用法代码示例。如果您正苦于以下问题:Python dot函数的具体用法?Python dot怎么用?Python dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dot函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self,
                 input=tensor.dvector('input'),
                 target=tensor.dvector('target'),
                 n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw):
        super(NNet, self).__init__(**kw)

        self.input = input
        self.target = target
        self.lr = shared(lr, 'learning_rate')
        self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1')
        self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2')
        # print self.lr.type

        self.hidden = sigmoid(tensor.dot(self.w1, self.input))
        self.output = tensor.dot(self.w2, self.hidden)
        self.cost = tensor.sum((self.output - self.target)**2)

        self.sgd_updates = {
            self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1),
            self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)}

        self.sgd_step = pfunc(
            params=[self.input, self.target],
            outputs=[self.output, self.cost],
            updates=self.sgd_updates)

        self.compute_output = pfunc([self.input], self.output)

        self.output_from_hidden = pfunc([self.hidden], self.output)
开发者ID:12190143,项目名称:Theano,代码行数:29,代码来源:test_misc.py


示例2: generate

    def generate(self, h_, c_, x_):
        h_a = []
        c_a = []
        for it in range(self.n_levels):
            preact = T.dot(x_, self.W[it])
            preact += T.dot(h_[it], self.U[it]) + self.b[it]

            i = T.nnet.sigmoid(self.slice(preact, 0, self.n_dim))
            f = T.nnet.sigmoid(self.slice(preact, 1, self.n_dim))
            o = T.nnet.sigmoid(self.slice(preact, 2, self.n_dim))
            c = T.tanh(self.slice(preact, 3, self.n_dim))

            c = f * c_[it] + i * c
            h = o * T.tanh(c)

            h_a.append(h)
            c_a.append(c)

            x_ = h

        q = T.dot(h, self.L) + self.b0
        # mask = T.concatenate([T.alloc(np_floatX(1.), q.shape[0] - 1), T.alloc(np_floatX(0.), 1)])
        prob = T.nnet.softmax(q / 1)

        return prob, T.stack(h_a).squeeze(), T.stack(c_a)[0].squeeze()
开发者ID:velicue,项目名称:char-rnn-theano,代码行数:25,代码来源:lstm.py


示例3: __init__

  def __init__(self, input, nrLayers, weights, biases,
               visibleDropout, hiddenDropout,
               activationFunction, classificationActivationFunction):

    self.input = input

    self.classificationWeights = classificationWeightsFromTestWeights(weights,
                                            visibleDropout=visibleDropout,
                                            hiddenDropout=hiddenDropout)

    nrWeights = nrLayers - 1

    currentLayerValues = input

    for stage in xrange(nrWeights -1):
      w = self.classificationWeights[stage]
      b = biases[stage]
      linearSum = T.dot(currentLayerValues, w) + b
      currentLayerValues = activationFunction.deterministic(linearSum)

    self.lastHiddenActivations = currentLayerValues

    w = self.classificationWeights[nrWeights - 1]
    b = biases[nrWeights - 1]
    linearSum = T.dot(currentLayerValues, w) + b
    currentLayerValues = classificationActivationFunction.deterministic(linearSum)

    self.output = currentLayerValues
开发者ID:mikimaus78,项目名称:pydeeplearn,代码行数:28,代码来源:deepbelief.py


示例4: __init__

    def __init__(self, rng, input, n_in, n_out, n_component):
        self.input = input

        W_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_out*n_component))
        self.W_mu = theano.shared(value=numpy.asarray(W_value, dtype=theano.config.floatX), name='W_mu', borrow=True)

        self.W_sigma = theano.shared(value=numpy.asarray(W_value.copy(), dtype=theano.config.floatX), name='W_sigma', borrow=True)

        W_mix_value = rng.normal(0.0, 1.0/numpy.sqrt(n_in), size=(n_in, n_component))
        self.W_mix = theano.shared(value=numpy.asarray(W_mix_value, dtype=theano.config.floatX), name='W_mix', borrow=True)

        self.mu = T.dot(self.input, self.W_mu)    #assume linear output for mean vectors
        self.sigma = T.nnet.softplus(T.dot(self.input, self.W_sigma)) # + 0.0001
        #self.sigma = T.exp(T.dot(self.input, self.W_sigma)) # + 0.0001

        self.mix = T.nnet.softmax(T.dot(self.input, self.W_mix))

        self.delta_W_mu    = theano.shared(value = numpy.zeros((n_in, n_out*n_component),
                                           dtype=theano.config.floatX), name='delta_W_mu')
        self.delta_W_sigma = theano.shared(value = numpy.zeros((n_in, n_out*n_component),
                                           dtype=theano.config.floatX), name='delta_W_sigma')
        self.delta_W_mix   = theano.shared(value = numpy.zeros((n_in, n_component),
                                           dtype=theano.config.floatX), name='delta_W_mix')


        self.params = [self.W_mu, self.W_sigma, self.W_mix]
        self.delta_params = [self.delta_W_mu, self.delta_W_sigma, self.delta_W_mix]
开发者ID:CSTR-Edinburgh,项目名称:merlin,代码行数:27,代码来源:mdn_layers.py


示例5: free_energy_at_beta

def free_energy_at_beta(model, samples, beta, pa_bias=None,
                        marginalize_odd=True):
    """
    Computes the free-energy of the sample `h1_sample`, for model p_k(h1).

    Inputs
    ------
    h1_sample: theano.shared
        Shared variable representing a sample of layer h1.
    beta: T.scalar
        Inverse temperature beta_k of model p_k(h1) at which to measure the free-energy.

    Returns
    -------
    Symbolic variable, free-energy of sample `h1_sample`, at inv. temp beta.
    """
    keep_idx = numpy.arange(not marginalize_odd, model.depth, 2)
    marg_idx = numpy.arange(marginalize_odd, model.depth, 2)

    # contribution of biases
    fe = 0.
    for i in keep_idx:
        fe -= T.dot(samples[i], model.bias[i]) * beta
    # contribution of biases
    for i in marg_idx:
        from_im1 = T.dot(samples[i-1], model.W[i]) if i >= 1 else 0.
        from_ip1 = T.dot(samples[i+1], model.W[i+1].T) if i < model.depth-1 else 0
        net_input = (from_im1 + from_ip1 + model.bias[i]) * beta
        fe -= T.sum(T.nnet.softplus(net_input), axis=1)

    fe -= T.dot(samples[not marginalize_odd], pa_bias) * (1. - beta)

    return fe
开发者ID:gdesjardins,项目名称:DBM,代码行数:33,代码来源:ais.py


示例6: _build_marginal_likelihood_logp

 def _build_marginal_likelihood_logp(self, y, X, Xu, sigma):
     sigma2 = tt.square(sigma)
     Kuu = self.cov_func(Xu)
     Kuf = self.cov_func(Xu, X)
     Luu = cholesky(stabilize(Kuu))
     A = solve_lower(Luu, Kuf)
     Qffd = tt.sum(A * A, 0)
     if self.approx == "FITC":
         Kffd = self.cov_func(X, diag=True)
         Lamd = tt.clip(Kffd - Qffd, 0.0, np.inf) + sigma2
         trace = 0.0
     elif self.approx == "VFE":
         Lamd = tt.ones_like(Qffd) * sigma2
         trace = ((1.0 / (2.0 * sigma2)) *
                  (tt.sum(self.cov_func(X, diag=True)) -
                   tt.sum(tt.sum(A * A, 0))))
     else:  # DTC
         Lamd = tt.ones_like(Qffd) * sigma2
         trace = 0.0
     A_l = A / Lamd
     L_B = cholesky(tt.eye(Xu.shape[0]) + tt.dot(A_l, tt.transpose(A)))
     r = y - self.mean_func(X)
     r_l = r / Lamd
     c = solve_lower(L_B, tt.dot(A, r_l))
     constant = 0.5 * X.shape[0] * tt.log(2.0 * np.pi)
     logdet = 0.5 * tt.sum(tt.log(Lamd)) + tt.sum(tt.log(tt.diag(L_B)))
     quadratic = 0.5 * (tt.dot(r, r_l) - tt.dot(c, c))
     return -1.0 * (constant + logdet + quadratic + trace)
开发者ID:bballamudi,项目名称:pymc3,代码行数:28,代码来源:gp.py


示例7: compileFunctions

    def compileFunctions(self, x_image_global, examples, ib, B, K, corrupt):
        if x_image_global == None:
            x_image_global = self.x

        if corrupt == 0.0:
            self.x_c = self.x
        else:
            self.x_c = self.theano_rng.binomial(
                size=self.x.shape, n=1, p=1-corrupt,
                dtype=theano.config.floatX) * self.x

        self.h = self.g(T.dot(self.x_c, self.W_hl) + self.b_hl)
        self.x_r = self.o(T.dot(self.h, self.W_ol) + self.b_ol)
        self.params = [self.W_hl, self.b_hl, self.b_ol]
        self.cost = \
            (- T.sum(
                self.x * T.log(self.x_r) + (1 - self.x) * T.log(1 - self.x_r),
                axis=(0,1)))

        gparams = T.grad(self.cost, self.params)
        updates = [
            (param, param - K * gparam)
            for param, gparam in zip(self.params, gparams)
        ]

        fun_train = theano.function(
            inputs=[ib],
            outputs=(self.cost, self.x_r, self.x_c),
            updates=updates,
            givens={
                x_image_global: examples[ib*B: (ib+1)*B]
            }
        )

        return fun_train
开发者ID:felidadae,项目名称:dnn,代码行数:35,代码来源:AE.py


示例8: factors

	def factors(self, w, x, z, A):
		
		if self.data == 'binary':
			def f_xi(zi, xi):
				pi = T.nnet.sigmoid(T.dot(w['wx'], zi) + T.dot(w['bx'], A)) # pi = p(X_i=1)
				logpxi = - T.nnet.binary_crossentropy(pi, xi).sum(axis=0, keepdims=True)# logpxi = log p(X_i=x_i)
				#logpxi = T.log(pi*xi+(1-pi)*(1-xi)).sum(axis=0, keepdims=True)
				return logpxi
		elif self.data == 'gaussian':
			def f_xi(zi, xi):
				x_mean = T.dot(w['wx'], zi) + T.dot(w['bx'], A)
				x_logvar = T.dot(2*w['logsdx'], A)
				return ap.logpdfs.normal2(xi, x_mean, x_logvar).sum(axis=0, keepdims=True)
		else: raise Exception()
		
		# Factors of X and Z
		logpx = 0
		logpz = 0
		sd = T.dot(T.exp(w['logsd']), A)
		for i in range(self.n_steps):
			if i == 0:
				logpz += logpdfs.standard_normal(z['z'+str(i)]).sum(axis=0, keepdims=True)
			if i > 0:
				mean = T.tanh(T.dot(w['wz'], z['z'+str(i-1)]) + T.dot(w['bz'], A))
				logpz += logpdfs.normal(z['z'+str(i)], mean, sd).sum(axis=0, keepdims=True)
			logpxi = f_xi(z['z'+str(i)], x['x'+str(i)])
			logpx += logpxi
		
		# joint() = logp(x,z,w) = logp(x|z) + logp(z) + logp(w) + C
		# This is a proper scalar function
		logpw = 0
		for i in w:
			logpw += logpdfs.normal(w[i], 0, self.prior_sd).sum() # logp(w)
		
		return logpw, logpx, logpz, {}
开发者ID:Beronx86,项目名称:anglepy,代码行数:35,代码来源:DBN_noAT.py


示例9: rbm_fe

 def rbm_fe(rbm_params, v, b):
     (weights, visbias, hidbias) = rbm_params
     vis_term = b * tensor.dot(v, visbias)
     hid_act = b * (tensor.dot(v, weights) + hidbias)
     fe = -vis_term - tensor.sum(tensor.log(1 + tensor.exp(hid_act)),
                                 axis=1)
     return fe
开发者ID:EderSantana,项目名称:pylearn2,代码行数:7,代码来源:rbm_tools.py


示例10: forward_prop

    def forward_prop(self,F,S):
        # We assume F is a m x n matrix (m rows, n columns)
        # and S is a 1 x o where o is our output size.
        # Our weight matrix (self.w) will be n x o.

        # Resize our bias to be appropriate size (batch_size x o)
        resized_bias = T.extra_ops.repeat(self.bh, F.shape[0], axis=0)
        # Combine our input data (F) with our weight matrix and bias.
        recurrent_gate = T.dot(F,self.wx) #T.nnet.sigmoid(T.dot(F,self.wx))

        # Resize the state value to have batch_size x output_size shape
        weighted_state = T.dot(S,self.wh)
        hidden_state = T.extra_ops.repeat(weighted_state, F.shape[0], axis=0)

        # Combine the recurrent_gate with our resized hidden state
        # Should I use T.tanh on the hidden_state?
        output = T.nnet.sigmoid(recurrent_gate + hidden_state + resized_bias)

        # This will average the values across the batch_size and
        # return a vector of size 1 x o (output_size)
        new_state = T.mean(hidden_state, axis=0)
        new_state = new_state.reshape((1,self.y))
        # Cast the output
        output_cast = T.cast(output,theano.config.floatX)
        return new_state,output_cast
开发者ID:Aabglov,项目名称:TheanoSeq2Seq,代码行数:25,代码来源:layer.py


示例11: model

def model(X, w1, w2, w3, Max_Pooling_Shape, p_drop_conv, p_drop_hidden):
    l1 = T.flatten(
        dropout(max_pool_2d(rectify(conv2d(X, w1, border_mode="valid")), Max_Pooling_Shape), p_drop_conv), outdim=2
    )
    l2 = dropout(rectify(T.dot(l1, w2)), p_drop_hidden)
    pyx = softmax(T.dot(l2, w3))
    return pyx
开发者ID:r3fang,项目名称:foo,代码行数:7,代码来源:convolutional_net.py


示例12: __init__

    def __init__(self, rng, input1, input2, n_in, n_out):

        self.input1 = input1.flatten(2)
        self.input2 = input2.flatten(2)

        self.W = theano.shared(
            value=numpy.asarray(
                rng.uniform(
                    low=-numpy.sqrt(6. / (n_in + n_out)),
                    high=numpy.sqrt(6. / (n_in + n_out)),
                    size=(n_in, n_out)
                ),
                dtype='float32'
            ),
            name='W',
            borrow=True
        )

        self.b = theano.shared(
            value=numpy.zeros((n_out,), dtype='float32'),
            name='b',
            borrow=True
        )

        lin_output1 = T.dot(self.input1, self.W) + self.b
        lin_output2 = T.dot(self.input2, self.W) + self.b

        self.output1 = T.nnet.relu(lin_output1)
        self.output2 = T.nnet.relu(lin_output2)
        self.similarity = self.similarity_func(self.output1, self.output2)
        self.params = [self.W, self.b]
开发者ID:yangli625,项目名称:ReId_theano,代码行数:31,代码来源:Layer.py


示例13: pred_t

		def pred_t(input_voc_t, weight_tm1, memory_tm1):
			rawinput_t = self.embedding[input_voc_t]
			input_t = T.dot(rawinput_t,self.input_w)
			read_m = T.dot(weight_tm1, memory_tm1)
			read_t = T.dot(read_m,self.read_w)
			controller_input = activation(input_t+read_t+self.input_b)
			hid = self.controller.getY(controller_input)
			output = T.nnet.softmax(T.dot(hid, self.output_w)+self.output_b)
			result = T.switch(T.eq(input_voc_t, 0),T.argmax(output,axis=1), theano.shared(0))
			#test = controller_input
			
			memory_inter = memory_tm1
			weight_inter = weight_tm1
			for head in self.heads:
				weight_inter, erase, add= head.emit_new_weight(hid, weight_inter, memory_inter)
				#write to memory
				weight_tdim = weight_inter.dimshuffle((0, 'x'))
				erase_dim = erase.dimshuffle(('x', 0))
				add_dim = add.dimshuffle(('x', 0))
				M_erased = memory_inter*(1-(weight_tdim*erase_dim))
				memory_inter = M_erased+(weight_tdim*add_dim)

			#testing = weight_tm1
			#testing2 = rawinput_t
			memory_t = memory_inter
			weight_t = weight_inter
			

			return weight_t, memory_t, output,result
开发者ID:dandxy89,项目名称:NTMtranslation,代码行数:29,代码来源:ntm_translate.py


示例14: _construct_mom_stuff

 def _construct_mom_stuff(self):
     """
     Construct the cost function for the moment-matching "regularizer".
     """
     a = self.mom_mix_rate
     dist_mean = self.GN.dist_mean
     dist_cov = self.GN.dist_cov
     # Get the generated sample observations for this batch, transformed
     # linearly into the desired space for moment matching...
     X_b = T.dot(self.GN.output, self.mom_match_proj)
     # Get their mean
     batch_mean = T.mean(X_b, axis=0)
     # Get the updated generator distribution mean
     new_mean = ((1.0 - a[0]) * self.GN.dist_mean) + (a[0] * batch_mean)
     # Use the mean to get the updated generator distribution covariance
     X_b_minus_mean = X_b - new_mean
     # Whelp, I guess this line needs the cast... for some reason...
     batch_cov = T.dot(X_b_minus_mean.T, X_b_minus_mean) / T.cast(X_b.shape[0], 'floatX')
     new_cov = ((1.0 - a[0]) * self.GN.dist_cov) + (a[0] * batch_cov)
     # Get the cost for deviation from the target distribution's moments
     mean_err = new_mean - self.target_mean
     cov_err = (new_cov - self.target_cov)
     mm_cost = self.mom_match_weight[0] * \
             (T.sum(mean_err**2.0) + T.sum(cov_err**2.0))
     # Construct the updates for the running estimates of the generator
     # distribution's first and second-order moments.
     mom_updates = OrderedDict()
     mom_updates[self.GN.dist_mean] = new_mean
     mom_updates[self.GN.dist_cov] = new_cov
     return [mm_cost, mom_updates]
开发者ID:darcy0511,项目名称:NN-Python,代码行数:30,代码来源:GCPair.py


示例15: _compile_func

def _compile_func():
    beta = T.vector('beta')
    b = T.scalar('b')
    X = T.matrix('X')
    y = T.vector('y')
    C = T.scalar('C')
    params = [beta, b, X, y, C]
    cost = 0.5 * (T.dot(beta, beta) + b * b) + C * T.sum(
        T.nnet.softplus(
            -T.dot(T.diag(y), T.dot(X, beta) + b)
        )
    )
    # Function computing in one go the cost, its gradient
    # with regard to beta and with regard to the bias.
    cost_grad = theano.function(params,[
        cost,
        T.grad(cost, beta),
        T.grad(cost, b)
    ])

    # Function for computing element-wise sigmoid, used for
    # prediction.
    log_predict = theano.function(
        [beta, b, X],
        T.nnet.sigmoid(b + T.dot(X, beta)),
        on_unused_input='warn'
    )

    return (cost_grad, log_predict)
开发者ID:alexisVallet,项目名称:dpm-identification,代码行数:29,代码来源:lr.py


示例16: mlp

def mlp(insize, hiddensize, outsize, transferfunc='tanh', outfunc='id'):
    P = util.ParameterSet(
        inweights=(insize, hiddensize),
        hiddenbias=hiddensize,
        outweights=(hiddensize, outsize),
        outbias=outsize)

    P.randomize(1e-4)

    inpt = T.matrix('inpt')
    hidden_in = T.dot(inpt, P.inweights)
    hidden_in += P.hiddenbias

    nonlinear = transfermap[transferfunc]
    hidden = nonlinear(hidden_in)
    output_in = T.dot(hidden, P.outweights)
    output_in += P.outbias
    output = output_in
    output = transfermap[outfunc](output_in)

    exprs = {'inpt': inpt,
             'hidden-in': hidden_in,
             'hidden': hidden,
             'output-in': output_in,
             'output': output}
    return exprs, P
开发者ID:osdf,项目名称:climin,代码行数:26,代码来源:tongaMNISTscript10.py


示例17: get_pred_prob

    def get_pred_prob(self):
        z1 = T.dot(self.input, self.W1) + self.b1
        a1 = T.tanh(z1)
        z2 = T.dot(a1, self.W2) + self.b2
        y_hat = T.nnet.softmax(z2) # output probabilties

        return y_hat
开发者ID:MingyanZhao,项目名称:GM_AS5,代码行数:7,代码来源:nnclassifier_zmy.py


示例18: _build_conditional

 def _build_conditional(self, Xnew, pred_noise, diag, X, Xu, y, sigma, cov_total, mean_total):
     sigma2 = tt.square(sigma)
     Kuu = cov_total(Xu)
     Kuf = cov_total(Xu, X)
     Luu = cholesky(stabilize(Kuu))
     A = solve_lower(Luu, Kuf)
     Qffd = tt.sum(A * A, 0)
     if self.approx == "FITC":
         Kffd = cov_total(X, diag=True)
         Lamd = tt.clip(Kffd - Qffd, 0.0, np.inf) + sigma2
     else:  # VFE or DTC
         Lamd = tt.ones_like(Qffd) * sigma2
     A_l = A / Lamd
     L_B = cholesky(tt.eye(Xu.shape[0]) + tt.dot(A_l, tt.transpose(A)))
     r = y - mean_total(X)
     r_l = r / Lamd
     c = solve_lower(L_B, tt.dot(A, r_l))
     Kus = self.cov_func(Xu, Xnew)
     As = solve_lower(Luu, Kus)
     mu = self.mean_func(Xnew) + tt.dot(tt.transpose(As), solve_upper(tt.transpose(L_B), c))
     C = solve_lower(L_B, As)
     if diag:
         Kss = self.cov_func(Xnew, diag=True)
         var = Kss - tt.sum(tt.square(As), 0) + tt.sum(tt.square(C), 0)
         if pred_noise:
             var += sigma2
         return mu, var
     else:
         cov = (self.cov_func(Xnew) - tt.dot(tt.transpose(As), As) +
                tt.dot(tt.transpose(C), C))
         if pred_noise:
             cov += sigma2 * tt.identity_like(cov)
         return mu, stabilize(cov)
开发者ID:bballamudi,项目名称:pymc3,代码行数:33,代码来源:gp.py


示例19: gibbs_vhv

	def gibbs_vhv(self,v_sample):
		h_activation_score = T.dot(v_sample,self.W)   + self.h_bias
		h_activation_probs, h_sample, h_updates = self.h.sample(h_activation_score)
		v_activation_score = T.dot(h_sample,self.W.T) + self.v_bias
		v_activation_probs, v_sample, v_updates  = self.v.sample(v_activation_score)
		return h_activation_score,h_activation_probs,h_sample,\
			   v_activation_score,v_activation_probs,v_sample
开发者ID:parasitew,项目名称:python-dbn,代码行数:7,代码来源:rbm.py


示例20: recurrent_step

 def recurrent_step(self, x_c_t, x_i_t, x_f_t, x_o_t, h_tm1, c_tm1, U_h_c, U_h_i, U_h_f, U_h_o):
     """
     Performs one computation step over time.
     """
     # new memory content c_tilde
     c_tilde = self.hidden_activation_func(
         x_c_t + T.dot(h_tm1, U_h_c)
     )
     # input gate
     i_t = self.inner_hidden_activation_func(
         x_i_t + T.dot(h_tm1, U_h_i)
     )
     # forget gate
     f_t = self.inner_hidden_activation_func(
         x_f_t + T.dot(h_tm1, U_h_f)
     )
     # new memory content
     c_t = f_t*c_tm1 + i_t*c_tilde
     # output gate
     o_t = self.inner_hidden_activation_func(
         x_o_t + T.dot(h_tm1, U_h_o)
     )
     # new hiddens
     h_t = o_t*self.hidden_activation_func(c_t)
     # return the hiddens and memory content
     return h_t, c_t
开发者ID:sherjilozair,项目名称:OpenDeep,代码行数:26,代码来源:lstm.py



注:本文中的theano.tensor.dot函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.dscalar函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.dmatrix函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap