• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.diag函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.diag函数的典型用法代码示例。如果您正苦于以下问题:Python diag函数的具体用法?Python diag怎么用?Python diag使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了diag函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: r_loss

def r_loss(communities = 2, group_size = 10, seed=None, p=0.4, q=0.05, r=1.0, projection_dim=2):
    """testing to see if the loss will decrease backproping through very simple function"""
    B = np.asarray(balanced_stochastic_blockmodel(communities, group_size, p, q, seed)).astype(np.double)
    B = tf.cast(B, tf.float64)
    Diag = tf.diag(tf.reduce_sum(B,0))
    Diag = tf.cast(Diag, tf.float64)

    #r_grid = tf.linspace(r_min, r_max, grid_size)
    r = tf.cast(r, tf.float64)
    
    BH = (tf.square(r)-1)*tf.diag(tf.ones(shape=[communities*group_size], dtype=tf.float64))-tf.mul(r, B)+Diag 
    
    with tf.Session() as sess:
        eigenval, eigenvec = tf.self_adjoint_eig(BH)
        eigenvec_proj = tf.slice(eigenvec, [0,0], [communities*group_size, projection_dim])
                
        true_assignment_a = tf.concat(0, [-1*tf.ones([group_size], dtype=tf.float64),
                                      tf.ones([group_size], dtype=tf.float64)])
        true_assignment_b = -1*true_assignment_a
        true_assignment_a = tf.expand_dims(true_assignment_a, 1)
        true_assignment_b = tf.expand_dims(true_assignment_b, 1)

            
        projected_a = tf.matmul(tf.matmul(eigenvec_proj, tf.transpose(eigenvec_proj)), true_assignment_a)#tf.transpose(true_assignment_a))
        projected_b = tf.matmul(tf.matmul(eigenvec_proj, tf.transpose(eigenvec_proj)), true_assignment_b)#tf.transpose(true_assignment_b))
            
        loss = tf.minimum(tf.reduce_sum(tf.square(tf.sub(projected_a, true_assignment_a))),
                              tf.reduce_sum(tf.square(tf.sub(projected_b, true_assignment_b))))
            

        d = sess.run(loss)
    return d
开发者ID:lishali,项目名称:clusternet,代码行数:32,代码来源:error_bars_loss.py


示例2: multilayer_perceptron

    def multilayer_perceptron(_X_aud, _X_img, _w_aud, _b_aud, _w_img, _b_img, _w_out, _b_out, _dropout, _b_size):

        #layer 1
        aud_layer_1 = tf.nn.relu(tf.add(tf.matmul(_X_aud, _w_aud['h1']), _b_aud['b1']))
        img_layer_1 = tf.nn.relu(tf.add(tf.matmul(_X_img, _w_img['h1']), _b_img['b1']))
        img_layer_1 = tf.nn.dropout(img_layer_1, _dropout)

        '''
        CA 1
        '''
        factor = calculatCA(aud_layer_1, img_layer_1, 1000, _b_size)
        factor = tf.reshape(tf.diag(factor), shape=[_b_size, _b_size])
        aud_layer_1 = tf.matmul(factor, aud_layer_1)
        aud_layer_1 = tf.nn.dropout(aud_layer_1, _dropout)
        img_layer_1 = tf.matmul(factor, img_layer_1)
        #img_layer_1 = tf.nn.dropout(img_layer_1, _dropout)

        #layer 2
        aud_layer_2 = tf.nn.relu(tf.add(tf.matmul(aud_layer_1, _w_aud['h2']), _b_aud['b2'])) #Hidden layer with RELU activation
        img_layer_2 = tf.nn.relu(tf.add(tf.matmul(img_layer_1, _w_img['h2']), _b_img['b2'])) #Hidden layer with RELU activation
        #drop_2 = tf.nn.dropout(img_layer_2, _dropout)

        '''
        CA 2 & merge
        '''
        factor = calculatCA(aud_layer_2, img_layer_2, 600, _b_size)
        factor = tf.reshape(tf.diag(factor), shape=[_b_size, _b_size])
        merge_sum = tf.add(aud_layer_2, img_layer_2)
        facmat = tf.matmul(factor, merge_sum)
        #facmat = tf.nn.dropout(facmat, _dropout)
    
        #output layer
        out_layer_1 = tf.nn.relu(tf.add(tf.matmul(facmat, _w_out['h1']), _b_out['b1'])) #Hidden layer with RELU activation
        out_layer_2 = tf.nn.relu(tf.add(tf.matmul(out_layer_1, _w_out['h2']), _b_out['b2'])) #Hidden layer with RELU activation
        return tf.matmul(out_layer_2, _w_out['out']) + _b_out['out']
开发者ID:lheadjh,项目名称:MultimodalDeepLearning,代码行数:35,代码来源:MM2CA.py


示例3: compute_loss_reg

    def compute_loss_reg(self, sim_reg_mat, offset_label):

        sim_score_mat, p_reg_mat, l_reg_mat = tf.split(2, 3, sim_reg_mat)
        sim_score_mat = tf.reshape(sim_score_mat, [self.batch_size, self.batch_size])
        l_reg_mat = tf.reshape(l_reg_mat, [self.batch_size, self.batch_size])
        p_reg_mat = tf.reshape(p_reg_mat, [self.batch_size, self.batch_size])
        # unit matrix with -2
        I_2 = tf.diag(tf.constant(-2.0, shape=[self.batch_size]))
        all1 = tf.constant(1.0, shape=[self.batch_size, self.batch_size])
        #               | -1  1   1...   |

        #   mask_mat =  | 1  -1  -1...   |

        #               | 1   1  -1 ...  |
        mask_mat = tf.add(I_2, all1)
        # loss cls, not considering iou
        I = tf.diag(tf.constant(1.0, shape=[self.batch_size]))
        I_half = tf.diag(tf.constant(0.5, shape=[self.batch_size]))
        batch_para_mat = tf.constant(self.alpha, shape=[self.batch_size, self.batch_size])
        para_mat = tf.add(I,batch_para_mat)
        loss_mat = tf.log(tf.add(all1, tf.exp(tf.mul(mask_mat, sim_score_mat))))
        loss_mat = tf.mul(loss_mat, para_mat)
        loss_align = tf.reduce_mean(loss_mat)
        # regression loss
        l_reg_diag = tf.matmul(tf.mul(l_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))
        p_reg_diag = tf.matmul(tf.mul(p_reg_mat, I), tf.constant(1.0, shape=[self.batch_size, 1]))
        offset_pred = tf.concat(1, (p_reg_diag, l_reg_diag))
        loss_reg = tf.reduce_mean(tf.abs(tf.sub(offset_pred, offset_label)))

        loss=tf.add(tf.mul(self.lambda_regression, loss_reg), loss_align)
        return loss, offset_pred, loss_reg
开发者ID:BinbinBian,项目名称:TALL,代码行数:31,代码来源:ctrl_model.py


示例4: __init__

    def __init__(self,
                 mps,
                 mpo,
                 name='InfiniteDMRG',
                 precision=1E-12,
                 precision_canonize=1E-12,
                 nmax=1000,
                 nmax_canonize=1000,
                 ncv=40,
                 numeig=1,
                 pinv=1E-20,
                 power_method=False):

        # if not isinstance(mps, InfiniteMPSCentralGauge):
        #     raise TypeError(
        #         'in InfiniteDMRGEngine.__init__(...): mps of type InfiniteMPSCentralGauge expected, got {0}'
        #         .format(type(mps)))

        mps.restore_form(
            precision=precision_canonize,
            ncv=ncv,
            nmax=nmax_canonize,
            numeig=numeig,
            power_method=power_method,
            pinv=pinv)  #this leaves state in left-orthogonal form

        lb, hl = misc_mps.compute_steady_state_Hamiltonian_GMRES(
            'l',
            mps,
            mpo,
            left_dominant=tf.diag(tf.ones(mps.D[-1], dtype=mps.dtype)),
            right_dominant=ncon.ncon([mps.mat, tf.conj(mps.mat)],
                                     [[-1, 1], [-2, 1]]),
            precision=precision,
            nmax=nmax)

        rmps = mps.get_right_orthogonal_imps(
            precision=precision_canonize,
            ncv=ncv,
            nmax=nmax_canonize,
            numeig=numeig,
            pinv=pinv,
            restore_form=False)

        rb, hr = misc_mps.compute_steady_state_Hamiltonian_GMRES(
            'r',
            rmps,
            mpo,
            right_dominant=tf.diag(tf.ones(mps.D[0], dtype=mps.dtype)),
            left_dominant=ncon.ncon([mps.mat, tf.conj(mps.mat)],
                                    [[1, -1], [1, -2]]),
            precision=precision,
            nmax=nmax)

        left_dominant = ncon.ncon([mps.mat, tf.conj(mps.mat)],
                                  [[1, -1], [1, -2]])
        out = mps.unitcell_transfer_op('l', left_dominant)

        super().__init__(mps=mps, mpo=mpo, lb=lb, rb=rb, name=name)
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:59,代码来源:DMRG.py


示例5: logpdf

    def logpdf(self, x, mean=None, cov=1):
        """Log of the probability density function.

        Parameters
        ----------
        x : tf.Tensor
            A 1-D or 2-D tensor.
        mean : tf.Tensor, optional
            A 1-D tensor. Defaults to zero mean.
        cov : tf.Tensor, optional
            A 1-D or 2-D tensor. Defaults to identity matrix.

        Returns
        -------
        tf.Tensor
            A tensor of one dimension less than the input.
        """
        x = tf.cast(x, dtype=tf.float32)
        x_shape = get_dims(x)
        if len(x_shape) == 1:
            d = x_shape[0]
        else:
            d = x_shape[1]

        if mean is None:
            r = x
        else:
            mean = tf.cast(mean, dtype=tf.float32)
            r = x - mean

        if cov is 1:
            L_inv = tf.diag(tf.ones([d]))
            det_cov = tf.constant(1.0)
        else:
            cov = tf.cast(cov, dtype=tf.float32)
            if len(cov.get_shape()) == 1: # vector
                L_inv = tf.diag(1.0 / tf.sqrt(cov))
                det_cov = tf.reduce_prod(cov)
            else: # matrix
                L = tf.cholesky(cov)
                L_inv = tf.matrix_inverse(L)
                det_cov = tf.pow(tf.reduce_prod(tf.diag_part(L)), 2)

        lps = -0.5*d*tf.log(2*np.pi) - 0.5*tf.log(det_cov)
        if len(x_shape) == 1: # vector
            r = tf.reshape(r, shape=(d, 1))
            inner = tf.matmul(L_inv, r)
            lps -= 0.5 * tf.matmul(inner, inner, transpose_a=True)
            return tf.squeeze(lps)
        else: # matrix
            # TODO vectorize further
            out = []
            for r_vec in tf.unpack(r):
                r_vec = tf.reshape(r_vec, shape=(d, 1))
                inner = tf.matmul(L_inv, r_vec)
                out += [tf.squeeze(lps -
                        0.5 * tf.matmul(inner, inner, transpose_a=True))]

            return tf.pack(out)
开发者ID:TalkingData,项目名称:edward,代码行数:59,代码来源:distributions.py


示例6: test_all_finite_raises

 def test_all_finite_raises(self):
     with self.test_session():
         a = np.inf * tf.ones([5]) * np.arange(5)
         b = tf.diag(tf.ones([5])) 
         with self.assertRaisesOpError('Inf'):
             dot(a, b).eval()
         a = tf.ones([5]) * np.arange(5)
         b = np.inf * tf.diag(tf.ones([5])) 
         with self.assertRaisesOpError('Inf'):
             dot(a, b).eval()
开发者ID:TalkingData,项目名称:edward,代码行数:10,代码来源:test_dot.py


示例7: times_diag_tf

def times_diag_tf(input_matrix, n_hidden, diag):
    input_re = input_matrix[:, :n_hidden] #okay so the first left half of the matrix is real numbers
    input_im = input_matrix[:, n_hidden:] #the right half is the imaginary numbers that correspond
    Re = tf.diag(tf.cos(diag))
    Im = tf.diag(tf.sin(diag))
    input_re_times_Re = tf.matmul(input_re, Re) #matmul is the equivalent of dot
    input_re_times_Im = tf.matmul(input_re, Im)
    input_im_times_Re = tf.matmul(input_im, Re)
    input_im_times_Im = tf.matmul(input_im, Im)

    return tf.concat(1, [input_re_times_Re - input_im_times_Im,
                          input_re_times_Im + input_im_times_Re]) #this will combine two matrixes
开发者ID:kod3r,项目名称:Project_RNN_Enhancement,代码行数:12,代码来源:unitary_linear.py


示例8: logpdf

    def logpdf(self, x, mean=None, cov=1):
        """
        Parameters
        ----------
        x : np.array or tf.Tensor
            vector or matrix
        mean : np.array or tf.Tensor, optional
            vector. Defaults to zero mean.
        cov : np.array or tf.Tensor, optional
            vector or matrix. Defaults to identity.
        """
        x = tf.cast(tf.convert_to_tensor(x), dtype=tf.float32)
        x_shape = get_dims(x)
        if len(x_shape) == 1:
            d = x_shape[0]
        else:
            d = x_shape[1]

        if mean is None:
            r = x
        else:
            mean = tf.cast(tf.convert_to_tensor(mean), dtype=tf.float32)
            r = x - mean

        if cov is 1:
            cov_inv = tf.diag(tf.ones([d]))
            det_cov = tf.constant(1.0)
        else:
            cov = tf.cast(tf.convert_to_tensor(cov), dtype=tf.float32)
            if len(cov.get_shape()) == 1: # vector
                cov_inv = tf.diag(1.0 / cov)
                det_cov = tf.reduce_prod(cov)
            else: # matrix
                cov_inv = tf.matrix_inverse(cov)
                det_cov = tf.matrix_determinant(cov)

        lps = -0.5*d*tf.log(2*np.pi) - 0.5*tf.log(det_cov)
        if len(x_shape) == 1:
            r = tf.reshape(r, shape=(d, 1))
            lps -= 0.5 * tf.matmul(tf.matmul(r, cov_inv, transpose_a=True), r)
            return tf.squeeze(lps)
        else:
            # TODO vectorize further
            out = []
            for r_vec in tf.unpack(r):
                r_vec = tf.reshape(r_vec, shape=(d, 1))
                out += [tf.squeeze(lps - 0.5 * tf.matmul(
                                   tf.matmul(r_vec, cov_inv, transpose_a=True),
                                   r_vec))]
            return tf.pack(out)
        """
开发者ID:ScartleRoy,项目名称:edward,代码行数:51,代码来源:distributions.py


示例9: logpdf

    def logpdf(self, x, mean=None, cov=1):
        """
        Arguments
        ----------
        x: tf.Tensor
            vector
        mean: tf.Tensor, optional
            vector. Defaults to zero mean.
        cov: tf.Tensor, optional
            vector or matrix. Defaults to identity.

        Returns
        -------
        tf.Tensor
            scalar
        """
        x = tf.cast(tf.squeeze(x), dtype=tf.float32)
        d = get_dims(x)[0]
        if mean is None:
            r = tf.ones([d]) * x
        else:
            mean = tf.cast(tf.squeeze(mean), dtype=tf.float32)
            r = x - mean
            
        if cov == 1:
            cov_inv = tf.diag(tf.ones([d]))
            det_cov = tf.constant(1.0)
        else:
            cov = tf.cast(tf.squeeze(cov), dtype=tf.float32)
            if len(cov.get_shape()) == 1: 
                cov_inv = tf.diag(1.0 / cov)
                det_cov = tf.reduce_prod(cov)
            else:
                cov_inv = tf.matrix_inverse(cov)
                det_cov = tf.matrix_determinant(cov)
        r = tf.reshape(r, shape=(d, 1))
        lps = -0.5*d*tf.log(2*np.pi) - 0.5*tf.log(det_cov) - \
              0.5 * tf.matmul(tf.matmul(r, cov_inv, transpose_a=True), r)
        """
        # TensorFlow can't reverse-mode autodiff Cholesky
        L = tf.cholesky(cov)
        L_inv = tf.matrix_inverse(L)
        det_cov = tf.pow(tf.matrix_determinant(L), 2)
        inner = dot(L_inv, r)
        out = -0.5*d*tf.log(2*np.pi) - \
              0.5*tf.log(det_cov) - \
              0.5*tf.matmul(tf.transpose(inner), inner)
        """
        return tf.squeeze(lps)
开发者ID:bakersfieldag,项目名称:edward,代码行数:49,代码来源:distributions.py


示例10: dynamic_vae_single

def dynamic_vae_single(T = 50, d_z = 1, d_hidden=2, d_x = 10):

    # MODEL
    transition_mat = np.eye(d_z, dtype=np.float32) #GaussianMatrix(mean=0, std=1.0, output_shape=(D, D), name="transition")
    transition_bias = np.zeros((d_z,), dtype=np.float32)
    transition_cov = np.eye(d_z, dtype=np.float32)
    step_noise = MVGaussianMeanCov(transition_bias, transition_cov)

    w1, w2, b1, b2 = decoder_params(d_z, d_hidden, d_x)

    z = LinearGaussian(T, transition_bias, transition_cov,
                                          transition_mat, transition_bias, transition_cov,
                                          name="z")
    x = VAEDecoderBernoulli(z, w1, w2, b1, b2, name="x")

    # SYNTHETIC OBSERVATION
    x_sampled = x.sample(0)
    q_x = x.observe(x_sampled)

    # INFERENCE MODEL
    upwards_messages = VAEEncoder(q_x.sample, d_hidden, d_z)
    upwards_means = tf.unpack(upwards_messages.mean)
    upwards_vars = tf.unpack(upwards_messages.variance)
    unary_factors = [MVGaussianMeanCov(mean, tf.diag(vs)) for (mean, vs) in zip(upwards_means, upwards_vars)]
    tmat = tf.constant(transition_mat)
    q_z = LinearGaussianChainCRF((T, d_z), tmat, step_noise, unary_factors)
    z.attach_q(q_z)

    return x, z, x_sampled
开发者ID:BenJamesbabala,项目名称:bayesflow,代码行数:29,代码来源:dynamic_vae.py


示例11: build_predict

    def build_predict(self, Xnew, full_cov=False):
        """
        The posterior variance of F is given by

            q(f) = N(f | K alpha, [K^-1 + diag(lambda**2)]^-1)

        Here we project this to F*, the values of the GP at Xnew which is given by

           q(F*) = N ( F* | K_{*F} alpha , K_{**} - K_{*f}[K_{ff} + diag(lambda**-2)]^-1 K_{f*} )

        """

        #compute kernelly things
        Kx = self.kern.K(Xnew, self.X)
        K = self.kern.K(self.X)


        #predictive mean
        f_mean = tf.matmul(Kx, self.q_alpha) + self.mean_function(Xnew)

        #predictive var
        f_var = []
        for d in range(self.num_latent):
            b = self.q_lambda[:,d]
            A = K + tf.diag(1./tf.square(b))
            L = tf.cholesky(A)
            LiKx = tf.matrix_triangular_solve(L, tf.transpose(Kx), lower=True)
            if full_cov:
                f_var.append( self.kern.K(Xnew)- tf.matmul(tf.transpose(LiKx),LiKx) )
            else:
                f_var.append( self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(LiKx),0) )
        f_var = tf.pack(f_var)
        return f_mean, tf.transpose(f_var)
开发者ID:agarbuno,项目名称:GPflow,代码行数:33,代码来源:vgp.py


示例12: symsqrt

def symsqrt(mat, eps=1e-7):
  """Symmetric square root."""
  s, u, v = tf.svd(mat)
  # sqrt is unstable around 0, just use 0 in such case
  print("Warning, cutting off at eps")
  si = tf.where(tf.less(s, eps), s, tf.sqrt(s))
  return u @ tf.diag(si) @ tf.transpose(v)
开发者ID:yaroslavvb,项目名称:stuff,代码行数:7,代码来源:util.py


示例13: pseudo_inverse

def pseudo_inverse(mat, eps=1e-10):
  """Computes pseudo-inverse of mat, treating eigenvalues below eps as 0."""
  
  s, u, v = tf.svd(mat)
  eps = 1e-10   # zero threshold for eigenvalues
  si = tf.where(tf.less(s, eps), s, 1./s)
  return u @ tf.diag(si) @ tf.transpose(v)
开发者ID:yaroslavvb,项目名称:stuff,代码行数:7,代码来源:util.py


示例14: buildGraph

    def buildGraph(self):
        self.graph = tf.Graph()
        with self.graph.as_default():
            # train_input , [batch_size * embed_size] 一个batch有多条
            self.train_input = tf.placeholder(tf.float32,shape=[self.batch_size,self.embed_size],name='train_input')
            self.train_label = tf.placeholder(tf.int32,shape=[self.batch_size],name='train_label')
            label_float = tf.cast(self.train_label,tf.float32)

            # label_matrix = tf.Variable(tf.diag(tf.ones(self.label_size)),trainable=False)
            label_matrix = tf.diag(tf.ones(self.label_size))
            embed_label = tf.nn.embedding_lookup(label_matrix,self.train_label)

            hidden_unit = 50
            self.weight = tf.Variable(tf.truncated_normal(shape=[hidden_unit,self.embed_size],stddev=1.0/math.sqrt(self.embed_size)))
            self.biase = tf.Variable(tf.zeros([hidden_unit]))

            y1 = tf.matmul(self.train_input,self.weight,transpose_b=True) + self.biase
            g1 = tf.nn.sigmoid(y1) # batch_size * label_size

            weight2 = tf.Variable(tf.truncated_normal(shape=[self.label_size,hidden_unit],stddev=1.0/math.sqrt(hidden_unit)))
            biase2 = tf.Variable(tf.zeros([self.label_size]))
            y2 = tf.matmul(g1,weight2,transpose_b=True) + biase2
            g2 = tf.nn.sigmoid(y2)

            self.predict = tf.cast(tf.argmax(g2,axis=1),tf.float32)
            self.error_num = tf.count_nonzero(label_float-self.predict)

            self.loss = tf.reduce_mean(-tf.reduce_sum(embed_label*tf.log(g2+0.0001)+(1-embed_label)*tf.log(1+0.0001-g2),axis=1))

            # self.train_op = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(self.loss)
            self.train_op = tf.train.AdagradOptimizer(learning_rate=1).minimize(self.loss)
            self.init_op = tf.global_variables_initializer()
开发者ID:multiangle,项目名称:PyNLP,代码行数:32,代码来源:simple_add_classifier.py


示例15: _test_logpdf_scalar

def _test_logpdf_scalar(x):
    xtf = tf.constant(x)
    val_true = stats.norm.logpdf(x)
    _assert_eq(norm.logpdf(xtf), val_true)
    _assert_eq(norm.logpdf(xtf, tf.zeros([1]), tf.constant(1.0)), val_true)
    _assert_eq(norm.logpdf(xtf, tf.zeros([1]), tf.ones([1])), val_true)
    _assert_eq(norm.logpdf(xtf, tf.zeros([1]), tf.diag(tf.ones([1]))), val_true)
开发者ID:Beronx86,项目名称:edward,代码行数:7,代码来源:test_stats_norm_logpdf.py


示例16: gate

	def gate(self, sig):
		'''
		Compute the gating coefficients based on the coordinates of the columns
		Input: self.coordinates
		Output: LxLx1 matrix where each row sums to one. The larger the value, the closer you are together.
		'''
		#Step 1: coordinates dot ones transpose -> 3xLxL -> just replicates the 1D vector L times
		output = tf.batch_matmul(self.coordinates, tf.transpose(tf.ones_like(self.coordinates), perm=[0, 2, 1]))
		# output = tf.batch_matmul(self.bbb, tf.transpose(tf.ones_like(self.bbb), perm=[0, 2, 1]))
		
		#Step 2: minus its tranpose -> 3xLxL -> symmetric difference matrix
		output = output - tf.transpose(output, perm=[0, 2, 1]) 
		#Step 3: add 1000 to the diagonals, ie dist to itself is 1000 instead of 0 -> so that the weight column x has to predict itself is very low, preferably zero
		# output = output + tf.reshape(tf.concat(0,[tf.diag(tf.ones([self.L]))*99999, tf.diag(tf.ones([self.L]))*99999, tf.diag(tf.ones([self.L]))*99999]),[self.D,self.L,self.L])
		#could use the function tile for the stuff above, or just add to the diagonal of the next stuff
		#Step 4: square difference and sum over the dimensions -> LxL -> dist of coord to each coord, symmetric
		output = tf.reduce_sum(tf.square(output),0)
		#Step 4.1:
		output = -(sig*output)
		#Make the diagonal very negative, meaning they are far apart, so it get low gating weight to itself
		output = output + tf.diag(tf.ones([self.L]))*(-99999)
		#Step 5: softmax so rows sum to 1
		output = tf.nn.softmax(output)
		#Step 6: reshape so it works later -> LxLx1
		output = tf.reshape(output, [self.L, self.L, 1])
		return output
开发者ID:chriscremer,项目名称:Other_Code,代码行数:26,代码来源:phase_2_model.py


示例17: BatchClipByL2norm

def BatchClipByL2norm(t, upper_bound, name=None):
  """Clip an array of tensors by L2 norm.

  Shrink each dimension-0 slice of tensor (for matrix it is each row) such
  that the l2 norm is at most upper_bound. Here we clip each row as it
  corresponds to each example in the batch.

  Args:
    t: the input tensor.
    upper_bound: the upperbound of the L2 norm.
    name: optional name.
  Returns:
    the clipped tensor.
  """

  assert upper_bound > 0
  with tf.op_scope([t, upper_bound], name, "batch_clip_by_l2norm") as name:
    saved_shape = tf.shape(t)
    batch_size = tf.slice(saved_shape, [0], [1])
    t2 = tf.reshape(t, tf.concat(0, [batch_size, [-1]]))
    upper_bound_inv = tf.fill(tf.slice(saved_shape, [0], [1]),
                              tf.constant(1.0/upper_bound))
    # Add a small number to avoid divide by 0
    l2norm_inv = tf.rsqrt(tf.reduce_sum(t2 * t2, [1]) + 0.000001)
    scale = tf.minimum(l2norm_inv, upper_bound_inv) * upper_bound
    clipped_t = tf.matmul(tf.diag(scale), t2)
    clipped_t = tf.reshape(clipped_t, saved_shape, name=name)
  return clipped_t
开发者ID:Peratham,项目名称:models,代码行数:28,代码来源:utils.py


示例18: multilayer_perceptron

    def multilayer_perceptron(_X_aud, _X_img, _w_aud, _b_aud, _w_img, _b_img, _w_out, _b_out, _dropout, _b_size):
        #aud
        aud_layer_1 = tf.nn.relu(tf.add(tf.matmul(_X_aud, _w_aud['h1']), _b_aud['b1'])) #Hidden layer with RELU activation
        aud_layer_2 = tf.nn.relu(tf.add(tf.matmul(aud_layer_1, _w_aud['h2']), _b_aud['b2'])) #Hidden layer with RELU activation
        #aud_out = tf.matmul(aud_layer_2, _w_aud['out']) + _b_aud['out']
        #Image
        img_layer_1 = tf.nn.relu(tf.add(tf.matmul(_X_img, _w_img['h1']), _b_img['b1'])) #Hidden layer with RELU activation
        drop_1 = tf.nn.dropout(img_layer_1, _dropout)
        img_layer_2 = tf.nn.relu(tf.add(tf.matmul(drop_1, _w_img['h2']), _b_img['b2'])) #Hidden layer with RELU activation
        drop_2 = tf.nn.dropout(img_layer_2, _dropout)
        #img_out = tf.matmul(drop_2, _w_img['out']) + _b_img['out']

        '''
        Merge with CA
        '''
        factor = calculatCA(aud_layer_2, drop_2, 600, _b_size)
        factor = tf.reshape(tf.diag(factor), shape=[_b_size, _b_size])
        merge_sum = tf.add(aud_layer_2, drop_2)
        facmat = tf.nn.relu(tf.matmul(factor, merge_sum))
   
    
        #out_drop = tf.nn.dropout(merge_sum, _dropout)
        out_layer_1 = tf.nn.relu(tf.add(tf.matmul(facmat, _w_out['h1']), _b_out['b1'])) #Hidden layer with RELU activation
        out_layer_2 = tf.nn.relu(tf.add(tf.matmul(out_layer_1, _w_out['h2']), _b_out['b2'])) #Hidden layer with RELU activation
        
        
        #return out_drop
        return tf.matmul(out_layer_2, _w_out['out']) + _b_out['out']
开发者ID:lheadjh,项目名称:MultimodalDeepLearning,代码行数:28,代码来源:MM1CA.py


示例19: backward

def backward(obs,m,scope=None):
    init = np.array([1.]*m*m).astype('float32').reshape(m,m)
    output = []
    output.append(init)
    obs = tf.split(0,m,obs)
    obs = [tf.squeeze(obs_) for obs_ in obs]
    with tf.variable_scope(scope or "Forward") as scope:
        T = tf.get_variable("Transitions",[m,m],
                            initializer=tf.random_uniform_initializer(-1,1))
        #session.run(tf.initialize_all_variables())
        for time_step, o_ in enumerate(reversed(obs)):
            #print(time_step)
            
            if time_step > 0:
                #scope.reuse_variables()
                init = output[time_step]
            O = tf.diag(o_)
            
            tmp = tf.matmul(O,T)
            
            tmp = tf.matmul(tmp,init)
            
            tmp = tf.div(tmp,tf.reduce_sum(tmp))*2
            #print(tmp.get_shape())
            output.append(tmp)
    return output
开发者ID:jme2005,项目名称:tensorflow,代码行数:26,代码来源:cuda_rnn_crf.py


示例20: _define_distance_to_clusters

  def _define_distance_to_clusters(self, data):
    """Defines the Mahalanobis distance to the assigned Gaussian."""
    # TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
    # mean) from log probability function.
    self._all_scores = []
    for shard in data:
      all_scores = []
      shard = tf.expand_dims(shard, 0)
      for c in xrange(self._num_classes):
        if self._covariance_type == FULL_COVARIANCE:
          cov = self._covs[c, :, :]
        elif self._covariance_type == DIAG_COVARIANCE:
          cov = tf.diag(self._covs[c, :])
        inverse = tf.matrix_inverse(cov + self._min_var)
        inv_cov = tf.tile(
            tf.expand_dims(inverse, 0),
            tf.pack([self._num_examples, 1, 1]))
        diff = tf.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
        m_left = tf.batch_matmul(diff, inv_cov)
        all_scores.append(tf.sqrt(tf.batch_matmul(
            m_left, tf.transpose(diff, perm=[0, 2, 1])
        )))
      self._all_scores.append(tf.reshape(
          tf.concat(1, all_scores),
          tf.pack([self._num_examples, self._num_classes])))

    # Distance to the associated class.
    self._all_scores = tf.concat(0, self._all_scores)
    assignments = tf.concat(0, self.assignments())
    rows = tf.to_int64(tf.range(0, self._num_examples))
    indices = tf.concat(1, [tf.expand_dims(rows, 1),
                            tf.expand_dims(assignments, 1)])
    self._scores = tf.gather_nd(self._all_scores, indices)
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:33,代码来源:gmm_ops.py



注:本文中的tensorflow.diag函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.diag_part函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.device函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap