• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.matmul函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.matmul函数的典型用法代码示例。如果您正苦于以下问题:Python matmul函数的具体用法?Python matmul怎么用?Python matmul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了matmul函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self):

        self.x = tf.placeholder(tf.float32, [None, NUM_FEATURES])
        self.y = tf.placeholder(tf.float32, [None, HIDDEN_3_SIZE])

        W_1 = tf.Variable(tf.random_uniform([NUM_FEATURES, HIDDEN_1_SIZE], maxval=1.0))
        b_1 = tf.Variable(tf.random_uniform([HIDDEN_1_SIZE], maxval=1.0))

        W_2 = tf.Variable(tf.random_uniform([HIDDEN_1_SIZE, HIDDEN_2_SIZE], maxval=1.0))
        b_2 = tf.Variable(tf.random_uniform([HIDDEN_2_SIZE], maxval=1.0))

        W_3 = tf.Variable(tf.random_uniform([HIDDEN_2_SIZE, HIDDEN_3_SIZE], maxval=1.0))
        b_3 = tf.Variable(tf.random_uniform([HIDDEN_3_SIZE], maxval=1.0))

        x_drop = tf.nn.dropout(self.x, KEEP_PROB_INPUT)

        h_1 = tf.nn.tanh(tf.matmul(x_drop, W_1) + b_1)
        h_1_drop = tf.nn.dropout(h_1, KEEP_PROB_HIDDEN)

        h_2 = tf.nn.tanh(tf.matmul(h_1_drop, W_2) + b_2)
        h_2_drop = tf.nn.dropout(h_2, KEEP_PROB_HIDDEN)

        h_3 = tf.matmul(h_2_drop, W_3) + b_3

        # self.y_pred = tf.nn.softmax(h_3)
        self.y_pred = h_3

        # self.cross_entropy = tf.reduce_mean(-tf.reduce_sum(self.y * tf.log(self.y_pred), reduction_indices=[1]))
        self.cross_entropy = tf.reduce_mean(tf.square(self.y_pred - self.y))

        self.train_step = tf.train.MomentumOptimizer(109,0.99).minimize(self.cross_entropy)

        self.sess = tf.Session()
开发者ID:Jerryzcn,项目名称:rnn_hack,代码行数:33,代码来源:model_2.py


示例2: build_predict

    def build_predict(self, Xnew, full_cov=False):
        """
        Xnew is a data matrix, point at which we want to predict

        This method computes

            p(F* | Y )

        where F* are points on the GP at Xnew, Y are noisy observations at X.

        """
        Kx = self.kern.K(self.X, Xnew)
        K = self.kern.K(self.X) + eye(self.num_data) * self.likelihood.variance
        L = tf.cholesky(K)
        A = tf.matrix_triangular_solve(L, Kx, lower=True)
        V = tf.matrix_triangular_solve(L, self.Y - self.mean_function(self.X))
        fmean = tf.matmul(tf.transpose(A), V) + self.mean_function(Xnew)
        if full_cov:
            fvar = self.kern.K(Xnew) - tf.matmul(tf.transpose(A), A)
            shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
            fvar = tf.tile(tf.expand_dims(fvar, 2), shape)
        else:
            fvar = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(A), 0)
            fvar = tf.tile(tf.reshape(fvar, (-1, 1)), [1, self.Y.shape[1]])
        return fmean, fvar
开发者ID:erenis,项目名称:GPflow,代码行数:25,代码来源:gpr.py


示例3: output

    def output(self,x):
        
        if(self.no_bias):
            return output_no_bias(self,x)
        
        
        if(self.activation == 'sigmoid'):
           
            return tf.nn.sigmoid(tf.matmul(x,self.W+self.b))
        elif(self.activation == 'relu'):
           
            return tf.nn.relu(tf.matmul(x,self.W+self.b))
        elif(self.activation == 'relu6'):
           
            return tf.nn.relu6(tf.matmul(x,self.W+self.b))
        elif(self.activation == 'leaky_relu'):
            
            return tf.maximum(0.1*tf.matmul(x,self.W+self.b),tf.matmul(x,self.W+self.b))

        elif(self.activation == 'leaky_relu6'):

            return tf.maximum(0.1*tf.matmul(x,self.W+self.b),6)

        elif(self.activation == 'linear'):
           
            return tf.matmul(x,self.W)+self.b
        elif(self.activation == 'softplus'):
            
            return tf.nn.softplus(tf.matmul(x,self.W+self.b))
        elif(self.activation == 'tanh'):
           
            return tf.tanh(tf.matmul(x,self.W+self.b))
        else:
            print "No known activation function selected, using linear"
            return tf.matmul(x,self.W)+self.b
开发者ID:ceru23,项目名称:autoencoder_tf,代码行数:35,代码来源:layer.py


示例4: forward_propagation

def forward_propagation(X, parameters):
    """
    Implements the forward propagation for the model: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
    
    Arguments:
    X -- input dataset placeholder, of shape (input size, number of examples)
    parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
                  the shapes are given in initialize_parameters

    Returns:
    Z3 -- the output of the last LINEAR unit
    """
    
    # Retrieve the parameters from the dictionary "parameters" 
    W1 = parameters['W1']
    b1 = parameters['b1']
    W2 = parameters['W2']
    b2 = parameters['b2']
    W3 = parameters['W3']
    b3 = parameters['b3']
    print(W3.shape)
    
    ### START CODE HERE ### (approx. 5 lines)              # Numpy Equivalents:
    Z1 = tf.add(tf.matmul(W1,X),b1)                                              # Z1 = np.dot(W1, X) + b1
    A1 = tf.nn.relu(Z1)                                              # A1 = relu(Z1)
    Z2 = tf.add(tf.matmul(W2,A1),b2)                                              # Z2 = np.dot(W2, a1) + b2
    A2 = tf.nn.relu(Z2)                                              # A2 = relu(Z2)
    print(A2.shape)
    Z3 = tf.add(tf.matmul(W3,A2),b3)                                              # Z3 = np.dot(W3,Z2) + b3
    ### END CODE HERE ###
    
    return Z3
开发者ID:shriavi,项目名称:datasciencecoursera,代码行数:32,代码来源:Tensorflow+Tutorial.py


示例5: model

 def model(data,text_data, train=False):
     """The Model definition."""
     # 2D convolution, with 'SAME' padding (i.e. the output feature map has
     # the same size as the input). Note that {strides} is a 4D array whose
     # shape matches the data layout: [image index, y, x, depth].
     conv = tf.nn.conv2d(data,
                         conv1_weights,
                         strides=[1, 1, 1, 1],
                         padding='SAME')
     # Bias and rectified linear non-linearity.
     relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
     # Max pooling. The kernel size spec {ksize} also follows the layout of
     # the data. Here we have a pooling window of 2, and a stride of 2.
     pool = tf.nn.max_pool(relu,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME')
     conv = tf.nn.conv2d(pool,
                         conv2_weights,
                         strides=[1, 1, 1, 1],
                         padding='SAME')
     relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
     pool = tf.nn.max_pool(relu,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME')
     print pool.get_shape().as_list()
     conv = tf.nn.conv2d(pool,
                         conv3_weights,
                         strides=[1, 1, 1, 1],
                         padding='SAME')
     relu = tf.nn.relu(tf.nn.bias_add(conv, conv3_biases))
     pool = tf.nn.max_pool(relu,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')
                                                         
     # Reshape the feature map cuboid into a 2D matrix to feed it to the
     # fully connected layers.
     pool_shape = pool.get_shape().as_list()
     print pool_shape
     print fc1_weights.get_shape().as_list()
     reshape = tf.reshape(
         pool,
         [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
     #Add text vector into account before fully connected layer
     
     reshape = tf.concat(1,[reshape,text_data])
     
     # Fully connected layer. Note that the '+' operation automatically
     # broadcasts the biases.
     hidden1 = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
     # Add a 50% dropout during training only. Dropout also scales
     # activations such that no rescaling is needed at evaluation time.
     if train:
         hidden1 = tf.nn.dropout(hidden1, 0.5, seed=SEED)
     hidden2 = tf.nn.relu(tf.matmul(hidden1, fc2_weights) + fc2_biases)
     if train:
         hidden2 = tf.nn.dropout(hidden2, 0.5, seed=SEED)
     return tf.matmul(hidden2, fc3_weights) + fc3_biases
开发者ID:daizhen,项目名称:ImagesCategory,代码行数:60,代码来源:trainning_100_Product_Text_CNN.py


示例6: RNN

def RNN(_X, _istate, _weights, _biases):

    # input shape: (batch_size, n_steps, 28, 28, 1)
    _X = tf.transpose(_X, [1, 0, 2, 3, 4])  # permute n_steps and batch_size
    # input shape: (n_steps=3, batch_size=20, 28, 28, 1)
    # Reshape to prepare input to hidden activation
    #_X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)
    # Linear activation  ==> convolutional net
    #_X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']
    
    A = CNN(_X[0,:,:,:,:])
    B = CNN(_X[1,:,:,:,:])
    C = CNN(_X[2,:,:,:,:])

    # Define a lstm cell with tensorflow
    lstm_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Split data because rnn cell needs a list of inputs for the RNN inner loop
    #_X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)

    # Get lstm cell output
    outputs, states = rnn.rnn(lstm_cell, [A,B,C], initial_state=_istate)

    # Linear activation
    # Get inner loop last output
    out1 = tf.nn.relu( tf.matmul(outputs[-1], _weights['out1']) + _biases['out1'] )
    out2 = tf.matmul(out1, _weights['out2']) + _biases['out2'] 
    return out2
开发者ID:gantzer89,项目名称:sequential-operations,代码行数:27,代码来源:load_rnn.py


示例7: alex_net

def alex_net(_X, _dropout):
    # Reshape input picture
    _X = tf.reshape(_X, shape=[-1, 40, 40, 1])

    # First convolutional layer
    conv1 = conv2d('conv1', _X, wc1, bc1)
    pool1 = max_pool('pool1', conv1, k=2)
    norm1 = norm('norm1', pool1, lsize=4)
    norm1 = tf.nn.dropout(norm1, _dropout)

    # Second convolutional layer
    conv2 = conv2d('conv2', norm1, wc2, bc2)
    pool2 = max_pool('pool2', conv2, k=2)
    norm2 = norm('norm2', pool2, lsize=4)
    norm2 = tf.nn.dropout(norm2, _dropout)

    # Third convolutional layer
    conv3 = conv2d('conv3', norm2, wc3, bc3)
    pool3 = max_pool('pool3', conv3, k=2)
    norm3 = norm('norm3', pool3, lsize=4)
    norm3 = tf.nn.dropout(norm3, _dropout)

    # Reshape conv3 output to fit dense layer input
    dense1 = tf.reshape(norm3, [-1, wd1.get_shape().as_list()[0]])

    # Fully connected layers
    dense1 = tf.nn.relu(tf.matmul(dense1, wd1) + bd1, name='fc1')  # Relu activation
    dense2 = tf.nn.relu(tf.matmul(dense1, wd2) + bd2, name='fc2')  # Relu activation

    # Output, class prediction
    out = tf.matmul(dense2, wout) + bout
    return out
开发者ID:josephwandile,项目名称:transferable-learning-experiments,代码行数:32,代码来源:alexnet.py


示例8: forward

def forward(x, train, regularizer):
    # 实现第一层卷积层的前向传播过程
    conv1_w = get_weight([CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_KERNEL_NUM], regularizer) 
    conv1_b = get_bias([CONV1_KERNEL_NUM]) 
    conv1 = conv2d(x, conv1_w) 
    relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_b)) 
    pool1 = max_pool_2x2(relu1) 

    # 实现第二层卷积层的前向传播过程,并初始化卷积层的对应变量
    conv2_w = get_weight([CONV2_SIZE, CONV2_SIZE, CONV1_KERNEL_NUM, CONV2_KERNEL_NUM],regularizer) 
    conv2_b = get_bias([CONV2_KERNEL_NUM])
    conv2 = conv2d(pool1, conv2_w) 
    relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_b))
    pool2 = max_pool_2x2(relu2)

    # 将上一池化层的输出 pool2(矩阵)转化为下一层全连接层的输入格式(向量)
    pool_shape = pool2.get_shape().as_list() 
    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3] 
    reshaped = tf.reshape(pool2, [pool_shape[0], nodes]) 

    # 实现第三层全连接层的前向传播过程
    fc1_w = get_weight([nodes, FC_SIZE], regularizer) # 初始化全连接层的权重,并加入正则化
    fc1_b = get_bias([FC_SIZE]) # 初始化全连接层的偏置项
    fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_w) + fc1_b) 
    if train: fc1 = tf.nn.dropout(fc1, 0.5)

    # 实现第四层全连接层的前向传播过程,并初始化全连接层对应的变量
    fc2_w = get_weight([FC_SIZE, OUTPUT_NODE], regularizer)
    fc2_b = get_bias([OUTPUT_NODE])
    y = tf.matmul(fc1, fc2_w) + fc2_b
    return y 
开发者ID:foochane,项目名称:Tensorflow-Learning,代码行数:31,代码来源:mnist_lenet5_forward.py


示例9: get_training_model

def get_training_model():
    """
    The training model acts on a batch of 128x64 windows, and outputs a (1 +
    7 * len(common.CHARS) vector, `v`. `v[0]` is the probability that a plate is
    fully within the image and is at the correct scale.
    
    `v[1 + i * len(common.CHARS) + c]` is the probability that the `i`'th
    character is `c`.

    """
    x, conv_layer, conv_vars = convolutional_layers()
    
    # Densely connected layer
    W_fc1 = weight_variable([32 * 8 * 128, 2048])
    b_fc1 = bias_variable([2048])

    conv_layer_flat = tf.reshape(conv_layer, [-1, 32 * 8 * 128])
    h_fc1 = tf.nn.relu(tf.matmul(conv_layer_flat, W_fc1) + b_fc1)

    # Output layer
    W_fc2 = weight_variable([2048, 1 + 7 * len(common.CHARS)])
    b_fc2 = bias_variable([1 + 7 * len(common.CHARS)])

    y = tf.matmul(h_fc1, W_fc2) + b_fc2

    return (x, y, conv_vars + [W_fc1, b_fc1, W_fc2, b_fc2])
开发者ID:0x3a,项目名称:deep-anpr,代码行数:26,代码来源:model.py


示例10: model

	def model(data):
		"""
		Define o modelo da rede neural. Util para usar com diversos dados e nao so os de treinamento.
		Definir uma funcao-modelo aplica os mesmos pesos, ja que sao declarados externamente, ao dado
		de entrada (data), tornando assim possivel a predicao dos dados de validacao e teste utili-
		zando os pesos otimizados.
		"""
		# Camada 1
		net1 = tf.nn.relu(tf.nn.conv2d(data, weights1, [1, 2, 2, 1], padding='SAME'))
		layer1 = tf.nn.relu(net1)

		# Camada 2
		net2 = tf.nn.relu(tf.nn.conv2d(layer1, weights2, [1, 2, 2, 1], padding='SAME'))
		layer2 = tf.nn.relu(net2)
	
		# Formata camada 2
		shape = layer2.get_shape().as_list()
		reshaped_layer2 = tf.reshape(layer2, [shape[0], shape[1] * shape[2] * shape[3]])

		# Camada 3
		net3 = tf.matmul(reshaped_layer2, weights3)
		layer3 = tf.nn.relu(net3)

		# Ultima camada (output)(valor de retorno)
		return tf.matmul(layer3, weights4)
开发者ID:EEmery,项目名称:deep-learning,代码行数:25,代码来源:validation-notMNIST-classifier.py


示例11: loss_fn

 def loss_fn(w_flat):
   w = tf.reshape(w_flat, [visible_size, hidden_size])
   x = tf.matmul(data, w)
   x = tf.sigmoid(x)
   x = tf.matmul(x, w, transpose_b=True)
   x = tf.sigmoid(x)
   return tf.reduce_mean(tf.square(x-data))
开发者ID:yaroslavvb,项目名称:stuff,代码行数:7,代码来源:eager_lbfgs.py


示例12: runNN

def runNN (train_x, train_y, test_x, test_y, numHidden):
	print "NN({})".format(numHidden)
	session = tf.InteractiveSession()

	x = tf.placeholder("float", shape=[None, train_x.shape[1]])
	y_ = tf.placeholder("float", shape=[None, 2])

	W1 = tf.Variable(tf.truncated_normal([train_x.shape[1],numHidden], stddev=0.01))
	b1 = tf.Variable(tf.truncated_normal([numHidden], stddev=0.01))
	W2 = tf.Variable(tf.truncated_normal([numHidden,2], stddev=0.01))
	b2 = tf.Variable(tf.truncated_normal([2], stddev=0.01))

	z = tf.nn.relu(tf.matmul(x,W1) + b1)
	y = tf.nn.softmax(tf.matmul(z,W2) + b2)

	cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)))
	#cross_entropy = -tf.reduce_sum(y_*tf.log(y))
	train_step = tf.train.MomentumOptimizer(learning_rate=.001, momentum=0.1).minimize(cross_entropy)
	#train_step = tf.train.AdamOptimizer(learning_rate=.01).minimize(cross_entropy)

	session.run(tf.initialize_all_variables())
	for i in range(NUM_EPOCHS):
		offset = i*BATCH_SIZE % (train_x.shape[0] - BATCH_SIZE)
		train_step.run({x: train_x[offset:offset+BATCH_SIZE, :], y_: makeLabels(train_y[offset:offset+BATCH_SIZE])})
		if i % 100 == 0:
			util.showProgress(cross_entropy, x, y, y_, test_x, test_y)
	session.close()
开发者ID:jwhitehill,项目名称:MultiEnrollmentProjectWithDustin,代码行数:27,代码来源:run_tensorflow.py


示例13: main

def main(_):
  sess = tf.Session()

  # Construct the TensorFlow network.
  ph_float = tf.placeholder(tf.float32, name="ph_float")
  x = tf.transpose(ph_float, name="x")
  v = tf.Variable(np.array([[-2.0], [-3.0], [6.0]], dtype=np.float32), name="v")
  m = tf.constant(
      np.array([[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]]),
      dtype=tf.float32,
      name="m")
  y = tf.matmul(m, x, name="y")
  z = tf.matmul(m, v, name="z")

  if FLAGS.debug:
    sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type=FLAGS.ui_type)

  if FLAGS.error == "shape_mismatch":
    print(sess.run(y, feed_dict={ph_float: np.array([[0.0], [1.0], [2.0]])}))
  elif FLAGS.error == "uninitialized_variable":
    print(sess.run(z))
  elif FLAGS.error == "no_error":
    print(sess.run(y, feed_dict={ph_float: np.array([[0.0, 1.0, 2.0]])}))
  else:
    raise ValueError("Unrecognized error type: " + FLAGS.error)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:25,代码来源:debug_errors.py


示例14: autoencoder_contd

def autoencoder_contd(input_dim, representation):
	x = tf.placeholder(tf.float32, [None, input_dim]);
	high_decW=tf.Variable(
		initial_value=tf.random_normal(
			[representation,input_dim],
			-math.sqrt(6.0/(input_dim+representation)),
			math.sqrt(6.0/(input_dim+representation))),
		dtype=tf.float32,
		name='high_decW');
	# high_encW=tf.transpose(high_decW);
	high_encW=tf.Variable(
		initial_value=tf.random_normal(
			[input_dim, representation],
			-math.sqrt(6.0/(input_dim+representation)),
			math.sqrt(6.0/(input_dim+representation))),
		name='high_encW');
	high_encb=tf.Variable(tf.zeros([representation]),
		name='high_encb');
	z=tf.nn.sigmoid(tf.matmul(x,high_encW) + high_encb);
	hidden_weights=high_encW;
	
	high_decb=tf.Variable(
		tf.zeros([input_dim]),
		name='high_decb');
	y=tf.nn.sigmoid(tf.matmul(z,high_decW)+high_decb);
	cost=tf.nn.l2_loss(x-y);
	loss_per_pixel=tf.reduce_mean(tf.abs(x-y));
	return {'x':x,'z':z,'y':y,'cost':cost,
		'weights':hidden_weights,
		'encW':high_encW,'decW':high_decW,
		'encb':high_encb,'decb':high_decb,
		'ppx':loss_per_pixel
		};
开发者ID:manic-milos,项目名称:Autoencoders,代码行数:33,代码来源:upscaling_ae_def.py


示例15: BiRNN

def BiRNN(_X, _istate_fw, _istate_bw, _weights, _biases):

     # input shape: (batch_size, n_steps, n_input)
    _X = tf.transpose(_X, [1, 0, 2])  # permute n_steps and batch_size
    # Reshape to prepare input to hidden activation
    _X = tf.reshape(_X, [-1, n_input]) # (n_steps*batch_size, n_input)
    # Linear activation
    _X = tf.matmul(_X, _weights['hidden']) + _biases['hidden']

    # Define lstm cells with tensorflow
    # Forward direction cell
    lstm_fw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Backward direction cell
    lstm_bw_cell = rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0)
    # Split data because rnn cell needs a list of inputs for the RNN inner loop
    _X = tf.split(0, n_steps, _X) # n_steps * (batch_size, n_hidden)

    # Get lstm cell output
    outputs = rnn.bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, _X,
                                            initial_state_fw=_istate_fw,
                                            initial_state_bw=_istate_bw)

    # Linear activation
    # Get inner loop last output
    output = [tf.matmul(o, _weights['out']) + _biases['out'] for o in outputs]
    return output
开发者ID:deepakmuralidharan,项目名称:CM229-Genotype-Imputation-using-Bidirectional-RNN,代码行数:26,代码来源:bi_haploid_training.py


示例16: observation_net

    def observation_net(self, input_):
        #decoder
        # input:[B,Z]

        with tf.name_scope('observation_net'):


            n_layers = len(self.network_architecture['decoder_net'])
            # weights = self.network_weights['decoder_weights']
            # biases = self.network_weights['decoder_biases']

            for layer_i in range(n_layers):

                # input_ = tf.contrib.layers.layer_norm(input_)
                # input_ = self.transfer_fct(tf.add(tf.matmul(input_, self.params_dict['decoder_weights_l'+str(layer_i)]), self.params_dict['decoder_biases_l'+str(layer_i)]))

                input_ = self.transfer_fct(tf.contrib.layers.layer_norm(tf.add(tf.matmul(input_, self.params_dict['decoder_weights_l'+str(layer_i)]), self.params_dict['decoder_biases_l'+str(layer_i)])))
                #add batch norm here

            x_mean = tf.add(tf.matmul(input_, self.params_dict['decoder_weights_out_mean']), self.params_dict['decoder_biases_out_mean'])
            x_log_var = tf.add(tf.matmul(input_, self.params_dict['decoder_weights_out_log_var']), self.params_dict['decoder_biases_out_log_var'])

            reward_mean = tf.add(tf.matmul(input_, self.params_dict['decoder_weights_reward_mean']), self.params_dict['decoder_biases_reward_mean'])
            reward_log_var = tf.add(tf.matmul(input_, self.params_dict['decoder_weights_reward_log_var']), self.params_dict['decoder_biases_reward_log_var'])


        return x_mean, x_log_var, reward_mean, reward_log_var
开发者ID:chriscremer,项目名称:Other_Code,代码行数:27,代码来源:DKF_9feb2017_with_policy.py


示例17: fc_layers

    def fc_layers(self):
        # fc1
        with tf.name_scope('fc1') as scope:
            shape = int(np.prod(self.pool5.get_shape()[1:]))
            fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
                                 trainable=True, name='biases')
            pool5_flat = tf.reshape(self.pool5, [-1, shape])
            fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
            self.fc1 = tf.nn.relu(fc1l)
            self.parameters += [fc1w, fc1b]

        # fc2
        with tf.name_scope('fc2') as scope:
            fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
                                 trainable=True, name='biases')
            fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
            self.fc2 = tf.nn.relu(fc2l)
            self.parameters += [fc2w, fc2b]

        # fc3
        with tf.name_scope('fc3') as scope:
            fc3w = tf.Variable(tf.truncated_normal([4096, 1000],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),
                                 trainable=True, name='biases')
            self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
            self.parameters += [fc3w, fc3b]
开发者ID:muhammadzak,项目名称:TensorFlow-Book,代码行数:34,代码来源:vgg16.py


示例18: conv_net

def conv_net(_X, _weights, _biases, _dropout):
    # Reshape input picture
    _X = tf.reshape(_X, shape=[-1, 28, 28, 1])

    # Convolution Layer
    conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'])
    # Max Pooling (down-sampling)
    conv1 = max_pool(conv1, k=2)
    # Apply Dropout
    conv1 = tf.nn.dropout(conv1, _dropout)

    # Convolution Layer
    conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'])
    # Max Pooling (down-sampling)
    conv2 = max_pool(conv2, k=2)
    # Apply Dropout
    conv2 = tf.nn.dropout(conv2, _dropout)

    # Fully connected layer
    dense1 = tf.reshape(conv2, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv2 output to fit dense layer input
    dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1'])) # Relu activation
    dense1 = tf.nn.dropout(dense1, _dropout) # Apply Dropout

    # Output, class prediction
    out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out'])
    return out
开发者ID:Dayz001,项目名称:MachineLearning,代码行数:26,代码来源:main_load_Conv.py


示例19: forward_propagation

def forward_propagation(images):
  with tf.variable_scope('conv1') as scope:
      W_conv1 = weight_variable([5, 5, 3, 32])
      b_conv1 = bias_variable([32])
      image_matrix = tf.reshape(images, [-1, 1750, 1750, 3])
      h_conv1 = tf.nn.sigmoid(conv2d(image_matrix, W_conv1) + b_conv1)
      _activation_summary(h_conv1)
      h_pool1 = max_pool_5x5(h_conv1)

  with tf.variable_scope('conv2') as scope:
      W_conv2 = weight_variable([5, 5, 32, 64])
      b_conv2 = bias_variable([64])
      h_conv2 = tf.nn.sigmoid(conv2d(h_pool1, W_conv2) + b_conv2)
      _activation_summary(h_conv2)
      h_pool2 = max_pool_5x5(h_conv2)

  with tf.variable_scope('conv3') as scope:
      W_conv3 = weight_variable([5, 5, 64, 128])
      b_conv3 = bias_variable([128])
      h_conv3 = tf.nn.sigmoid(conv2d(h_pool2, W_conv3) + b_conv3)
      _activation_summary(h_conv3)
      h_pool3 = max_pool_5x5(h_conv3)

  with tf.variable_scope('local3') as scope:
      W_fc1 = weight_variable([14 * 14 * 128, 256])
      b_fc1 = bias_variable([256])
      h_pool3_flat = tf.reshape(h_pool3, [-1, 14 * 14 * 128])
      h_fc1 = tf.nn.sigmoid(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
      _activation_summary(h_fc1)
      keep_prob = tf.Variable(1.0)
      W_fc2 = weight_variable([256, 4])
      b_fc2 = bias_variable([4])
      y_conv = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)
      _activation_summary(y_conv)
      return y_conv
开发者ID:StructML,项目名称:Neural-Network-Prostate,代码行数:35,代码来源:Process.py


示例20: dot

def dot(x, y):
    """Compute dot product between a Tensor matrix and a Tensor vector.

    If x is a ``[M x N]`` matrix, then y is a ``M``-vector.

    If x is a ``M``-vector, then y is a ``[M x N]`` matrix.

    Parameters
    ----------
    x : tf.Tensor
        ``M x N`` matrix or ``M`` vector (see above)
    y : tf.Tensor
        ``M`` vector or ``M x N`` matrix (see above)

    Returns
    -------
    tf.Tensor
        ``N``-vector
    """
    if len(x.get_shape()) == 1:
        vec = x
        mat = y
        return tf.matmul(tf.expand_dims(vec, 0), mat)
    else:
        mat = x
        vec = y
        return tf.matmul(mat, tf.expand_dims(vec, 1))
开发者ID:jf0510310315,项目名称:edward,代码行数:27,代码来源:util.py



注:本文中的tensorflow.matmul函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.matrix_band_part函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.map_fn函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap