• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.add函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.add函数的典型用法代码示例。如果您正苦于以下问题:Python add函数的具体用法?Python add怎么用?Python add使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了add函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: neural_network_model

def neural_network_model(data):

    # input_data * weights + biases

    hidden_1_layer = {'weights':tf.Variable(tf.random_normal([784, n_nodes_hl1])),
                      'biases':tf.Variable(tf.random_normal([n_nodes_hl1]))}

    hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
                      'biases':tf.Variable(tf.random_normal([n_nodes_hl2]))}

    hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
                      'biases':tf.Variable(tf.random_normal([n_nodes_hl3]))}

    output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
                      'biases':tf.Variable(tf.random_normal([n_classes]))}

    # input_data * weights + biases

    l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
    l1 = tf.nn.relu(l1)

    l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
    l2 = tf.nn.relu(l2)

    l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
    l3 = tf.nn.relu(l3)

    output = tf.matmul(l3, output_layer['weights']) + output_layer['biases']

    return output
开发者ID:rbdm,项目名称:satupay,代码行数:30,代码来源:simple-mnist.py


示例2: cell_locate

def cell_locate(size, bbox, S):

    """ 
    locate the center of ground truth in which grid cell

    """
    x = tf.cast(tf.slice(bbox, [0,0], [-1,1]), tf.float32)
    y = tf.cast(tf.slice(bbox, [0,1], [-1,1]), tf.float32)
    w = tf.cast(tf.slice(bbox, [0,2], [-1,1]), tf.float32)
    h = tf.cast(tf.slice(bbox, [0,3], [-1,1]), tf.float32)


    height, width = size

    cell_w = width / S
    cell_h = height / S

    center_y = tf.add(y, tf.mul(h, 0.5))
    center_x = tf.add(x, tf.mul(w, 0.5))

    cell_coord_x = tf.cast(tf.div(center_x, cell_w), tf.int32)
    cell_coord_y = tf.cast(tf.div(center_y, cell_h), tf.int32)

    cell_num = tf.add(tf.mul(cell_coord_y, S), cell_coord_x)

    return cell_num
开发者ID:Johannes-brahms,项目名称:Yolo,代码行数:26,代码来源:yolo_utils.py


示例3: layers

def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
    """
    Create the layers for a fully convolutional network.  Build skip-layers using the vgg layers.
    :param vgg_layer3_out: TF Tensor for VGG Layer 3 output
    :param vgg_layer4_out: TF Tensor for VGG Layer 4 output
    :param vgg_layer7_out: TF Tensor for VGG Layer 7 output
    :param num_classes: Number of classes to classify
    :return: The Tensor for the last layer of output
    """
    # upsampling on layer7 by 2
    input = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, strides=(1,1), padding='same', 
                            kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                            kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
    output = tf.layers.conv2d_transpose(input, num_classes, 4, strides = (2, 2), padding= 'same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))

    #skip connection followed by upsampling on layer4 by 2
    input = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, strides=(1,1), padding='same', 
                            kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                            kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
    input = tf.add(input, output)
    output = tf.layers.conv2d_transpose(input, num_classes, 4, strides = (2, 2), padding= 'same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))

    #skip connection followed by upsampling on layer3 by 8
    input = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, strides=(1,1), padding='same', 
                            kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
                            kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
    input = tf.add(input, output)
    nn_last_layer = tf.layers.conv2d_transpose(input, num_classes, 32, strides = (8, 8), padding= 'same', kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
    
    return nn_last_layer
开发者ID:jgliang74,项目名称:CarND-Semantic-Segmentation,代码行数:30,代码来源:main.py


示例4: observation_net

    def observation_net(self, input_):
        #decoder
        # input:[B,Z]

        with tf.name_scope('observation_net'):


            n_layers = len(self.network_architecture['decoder_net'])
            # weights = self.network_weights['decoder_weights']
            # biases = self.network_weights['decoder_biases']

            for layer_i in range(n_layers):

                # input_ = tf.contrib.layers.layer_norm(input_)
                # input_ = self.transfer_fct(tf.add(tf.matmul(input_, self.params_dict['decoder_weights_l'+str(layer_i)]), self.params_dict['decoder_biases_l'+str(layer_i)]))

                input_ = self.transfer_fct(tf.contrib.layers.layer_norm(tf.add(tf.matmul(input_, self.params_dict['decoder_weights_l'+str(layer_i)]), self.params_dict['decoder_biases_l'+str(layer_i)])))
                #add batch norm here

            x_mean = tf.add(tf.matmul(input_, self.params_dict['decoder_weights_out_mean']), self.params_dict['decoder_biases_out_mean'])
            x_log_var = tf.add(tf.matmul(input_, self.params_dict['decoder_weights_out_log_var']), self.params_dict['decoder_biases_out_log_var'])

            reward_mean = tf.add(tf.matmul(input_, self.params_dict['decoder_weights_reward_mean']), self.params_dict['decoder_biases_reward_mean'])
            reward_log_var = tf.add(tf.matmul(input_, self.params_dict['decoder_weights_reward_log_var']), self.params_dict['decoder_biases_reward_log_var'])


        return x_mean, x_log_var, reward_mean, reward_log_var
开发者ID:chriscremer,项目名称:Other_Code,代码行数:27,代码来源:DKF_9feb2017_with_policy.py


示例5: conv_net

def conv_net(x, weights, biases, dropout):
    # Reshape input picture
    x = tf.reshape(x, shape=[-1, 28, 28, 1])

    # Convolution Layer
    conv1 = conv2d(x, weights['wc1'], biases['bc1'])
    # Max Pooling (down-sampling)
    conv1 = maxpool2d(conv1, k=2)

    # Convolution Layer
    conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
    # Max Pooling (down-sampling)
    conv2 = maxpool2d(conv2, k=2)

    # Fully connected layer
    # Reshape conv2 output to fit fully connected layer input
    fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)
    # Apply Dropout
    fc1 = tf.nn.dropout(fc1, dropout)

    # Output, class prediction
    out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
    return out
开发者ID:ElvisLouis,项目名称:code,代码行数:25,代码来源:example.py


示例6: _create_dilation_layer

    def _create_dilation_layer(self, input_batch, layer_index, dilation,
                               in_channels, dilation_channels, skip_channels):
        '''Creates a single causal dilated convolution layer.

        The layer contains a gated filter that connects to dense output
        and to a skip connection:

               |-> [gate]   -|        |-> 1x1 conv -> skip output
               |             |-> (*) -|
        input -|-> [filter] -|        |-> 1x1 conv -|
               |                                    |-> (+) -> dense output
               |------------------------------------|

        Where `[gate]` and `[filter]` are causal convolutions with a
        non-linear activation at the output.
        '''
        variables = self.variables['dilated_stack'][layer_index]

        weights_filter = variables['filter']
        weights_gate = variables['gate']

        conv_filter = causal_conv(input_batch, weights_filter, dilation)
        conv_gate = causal_conv(input_batch, weights_gate, dilation)

        if self.use_biases:
            filter_bias = variables['filter_bias']
            gate_bias = variables['gate_bias']
            conv_filter = tf.add(conv_filter, filter_bias)
            conv_gate = tf.add(conv_gate, gate_bias)

        out = tf.tanh(conv_filter) * tf.sigmoid(conv_gate)

        # The 1x1 conv to produce the residual output
        weights_dense = variables['dense']
        transformed = tf.nn.conv1d(
            out, weights_dense, stride=1, padding="SAME", name="dense")

        # The 1x1 conv to produce the skip output
        weights_skip = variables['skip']
        skip_contribution = tf.nn.conv1d(
            out, weights_skip, stride=1, padding="SAME", name="skip")

        if self.use_biases:
            dense_bias = variables['dense_bias']
            skip_bias = variables['skip_bias']
            transformed = transformed + dense_bias
            skip_contribution = skip_contribution + skip_bias

        layer = 'layer{}'.format(layer_index)
        tf.histogram_summary(layer + '_filter', weights_filter)
        tf.histogram_summary(layer + '_gate', weights_gate)
        tf.histogram_summary(layer + '_dense', weights_dense)
        tf.histogram_summary(layer + '_skip', weights_skip)
        if self.use_biases:
            tf.histogram_summary(layer + '_biases_filter', filter_bias)
            tf.histogram_summary(layer + '_biases_gate', gate_bias)
            tf.histogram_summary(layer + '_biases_dense', dense_bias)
            tf.histogram_summary(layer + '_biases_skip', skip_bias)

        return skip_contribution, input_batch + transformed
开发者ID:hephaex,项目名称:tensorflow_note,代码行数:60,代码来源:model.py


示例7: __init__

    def __init__(self, n_layers, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer()):
        self.n_layers = n_layers
        self.transfer = transfer_function

        network_weights = self._initialize_weights()
        self.weights = network_weights

        # model
        self.x = tf.placeholder(tf.float32, [None, self.n_layers[0]])
        self.hidden_encode = []
        h = self.x
        for layer in range(len(self.n_layers)-1):
            h = self.transfer(
                tf.add(tf.matmul(h, self.weights['encode'][layer]['w']),
                       self.weights['encode'][layer]['b']))
            self.hidden_encode.append(h)

        self.hidden_recon = []
        for layer in range(len(self.n_layers)-1):
            h = self.transfer(
                tf.add(tf.matmul(h, self.weights['recon'][layer]['w']),
                       self.weights['recon'][layer]['b']))
            self.hidden_recon.append(h)
        self.reconstruction = self.hidden_recon[-1]

        # cost
        self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
        self.optimizer = optimizer.minimize(self.cost)

        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)
开发者ID:hoysasulee,项目名称:models,代码行数:32,代码来源:Autoencoder.py


示例8: neural_network_model

def neural_network_model(data):
    hidden_1_layer = {'weights': tf.Variable(tf.random_normal([784, n_nodes_hl1])),
                      'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))}

    hidden_2_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
                      'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))}

    hidden_3_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
                      'biases': tf.Variable(tf.random_normal([n_nodes_hl3]))}

    output_layer = {'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
                    'biases': tf.Variable(tf.random_normal([n_classes]))}

    l1 = tf.add(
        tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
    #  rectified linear is the activation function and act like the threshold function
    l1 = tf.nn.relu(l1)

    l2 = tf.add(
        tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])
    l2 = tf.nn.relu(l2)

    l3 = tf.add(
        tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])
    l3 = tf.nn.relu(l3)

    output = tf.add(
        tf.matmul(l3, output_layer['weights']), output_layer['biases'])

    return output
开发者ID:malnakli,项目名称:ML,代码行数:30,代码来源:tensorflow_1.py


示例9: lenet3_traffic

def lenet3_traffic(features, keep_prob):
    """
    Define simple Lenet-like model with one convolution layer and three fully
    connected layers.
    """
    # Convolutional layer 1
    l1_strides = (1, 1, 1, 1)
    l1_padding = 'VALID'
    l1_conv = tf.nn.conv2d(features, L1_W, l1_strides, l1_padding)
    l1_biases = tf.nn.bias_add(l1_conv, L1_B)

    # Activation.
    l1_relu = tf.nn.relu(l1_biases)

    # Pooling. Input = 28x28xL1_DEPTH. Output = 14x14xL1_DEPTH.
    l1_pool = tf.nn.max_pool(l1_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], \
        padding='VALID')

    # Flatten. Input = 14x14xL1_DEPTH. Output = L1_SIZE.
    flat = flatten(l1_pool)
    print("Flatten dimensions:", flat.get_shape())

    # Layer 2: Fully Connected. Input = L1_SIZE. Output = L2_SIZE.
    l2_linear = tf.add(tf.matmul(flat, L2_W), L2_B)

    # Activation.
    l2_relu = tf.nn.relu(l2_linear)
    l2_drop = tf.nn.dropout(l2_relu, keep_prob)

    # Layer 3: Fully Connected. Input = 500. Output = 43.
    return tf.add(tf.matmul(l2_drop, L3_W), L3_B)
开发者ID:qpham01,项目名称:GitHub,代码行数:31,代码来源:lenet3_simple.py


示例10: conv_basic

def conv_basic(_input, _w, _b, _keepratio):
    _input_r = tf.reshape(_input, shape=[-1, 28, 28, 1]) # Reshape input
    # conv1
    _conv1 = tf.nn.conv2d(_input_r, _w['wc1'], strides=[1, 1, 1, 1], padding='SAME') # Convolutioin
    _conv1 = tf.nn.batch_normalization(_conv1, 0.001, 1.0, 0, 1, 0.0001)
    _conv1 = tf.nn.relu(tf.nn.bias_add(_conv1, _b['bc1'])) # Add-bias
    _pool1 = tf.nn.max_pool(_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Max-pooling
    _pool_dr1 = tf.nn.dropout(_pool1, _keepratio)
    # conv1
    _conv2 = tf.nn.conv2d(_pool_dr1, _w['wc2'], strides=[1, 1, 1, 1], padding='SAME') # Convolutioin
    _conv2 = tf.nn.batch_normalization(_conv2, 0.001, 1.0, 0, 1, 0.0001)
    _conv2 = tf.nn.relu(tf.nn.bias_add(_conv2, _b['bc2'])) # Add-bias
    _pool2 = tf.nn.max_pool(_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # Max-pooling
    _pool_dr2 = tf.nn.dropout(_pool2, _keepratio)
    #vectorize
    _dense1 = tf.reshape(_pool_dr2, [-1, _w['wd1'].get_shape().as_list()[0]])
    #fc1
    _fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
    _fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
    #fc2
    _out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
    out = {
        'input_r': _input_r,
        'conv1': _conv1,
        'pool1': _pool1,
        'pool_dr1': _pool_dr1,
        'conv2': _conv2,
        'pool2': _pool2,
        'pool_dr2': _pool_dr2,
        'dense1': _dense1,
        'fc1': _fc1,
        'fc_dr1': _fc_dr1,
        'out': _out
    } # Return everything
    return out
开发者ID:chopin111,项目名称:Magisterka,代码行数:35,代码来源:cnn_mnist_basic.py


示例11: build_net

def build_net(_X, _weights, _biases, _dropout, _pool_dim, _mean=True, _is_dropout=True, _is_relu=True):
    # Reshape input picture
    _X = tf.reshape(_X, shape=[-1, 28, 28, 1])

    # Convolution Layer
    conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'])
    # Max Pooling (down-sampling)
    if _mean:
        conv1 = mean_pool(conv1, k=_pool_dim)
    else:
        conv1 = max_pool(conv1, k=_pool_dim)
    # Apply Dropout
    if _is_dropout:
        conv1 = tf.nn.dropout(conv1, _dropout)

    # Fully connected layer
    print _weights['wd1']
    print _weights['wd1'].get_shape()
    print _weights['wd1'].get_shape().as_list()
    print _weights['wd1'].get_shape().as_list()[0]
    dense1 = tf.reshape(conv1, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv2 output to fit dense layer input
    if _is_relu:
        dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1'])) # Relu activation
    else:
        dense1 = tf.nn.tanh(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1'])) # tanh activation
    if _is_dropout:
        dense1 = tf.nn.dropout(dense1, _dropout) # Apply Dropout

    # Output, class prediction
    out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out'])
    return out
开发者ID:ahasfura,项目名称:machine_learning,代码行数:31,代码来源:katynetcopy.py


示例12: multilayer_perceptron

def multilayer_perceptron(x, weights, biases):
    # Hidden layer with RELU activation
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    layer_1 = tf.nn.relu(layer_1)
    layer_1 = tf.nn.dropout(layer_1, dropout)
    # Hidden layer with RELU activation
    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
    layer_2 = tf.nn.relu(layer_2)
    #layer_2 = tf.nn.dropout(layer_2, dropout)
    # Hidden layer with RELU activation
    layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
    layer_3 = tf.nn.relu(layer_3)
    #layer_3 = tf.nn.dropout(layer_3, dropout)
    # Hidden layer with RELU activation
    layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
    layer_4 = tf.nn.relu(layer_4)
    #layer_4 = tf.nn.dropout(layer_4, dropout)
    # Hidden layer with RELU activation
    layer_5 = tf.add(tf.matmul(layer_4, weights['h5']), biases['b5'])
    layer_5 = tf.nn.relu(layer_5)
    #layer_5 = tf.nn.dropout(layer_5, dropout)

    # Output layer with linear activation
    out_layer = tf.matmul(layer_5, weights['out']) + biases['out']
    #out_layer = tf.nn.dropout(out_layer, 0.5)
    return out_layer
开发者ID:Becktor,项目名称:speech,代码行数:26,代码来源:DNN-ff.py


示例13: test_tf_consistency

    def test_tf_consistency(self):
        """ Should get the same graph as running pure tf """

        x_val = 2702.142857
        g = tf.Graph()
        with tf.Session(graph=g) as sess:
            x = tf.placeholder(tf.double, shape=[], name="x")
            z = tf.add(x, 3, name='z')
            gdef_ref = g.as_graph_def(add_shapes=True)
            z_ref = sess.run(z, {x: x_val})

        with IsolatedSession() as issn:
            x = tf.placeholder(tf.double, shape=[], name="x")
            z = tf.add(x, 3, name='z')
            gfn = issn.asGraphFunction([x], [z])
            z_tgt = issn.run(z, {x: x_val})

        self.assertEqual(z_ref, z_tgt)

        # Version texts are not essential part of the graph, ignore them
        gdef_ref.ClearField("versions")
        gfn.graph_def.ClearField("versions")

        # The GraphDef contained in the GraphFunction object
        # should be the same as that in the one exported directly from TensorFlow session
        self.assertEqual(str(gfn.graph_def), str(gdef_ref))
开发者ID:mateiz,项目名称:spark-deep-learning,代码行数:26,代码来源:test_builder.py


示例14: fcn

def fcn(image,weights=None): ### use bilinear
    net = vgg_net(image,weights=weights)
    conv_final_layer = net["conv5_3"]
    pool5 = tf.nn.max_pool(conv_final_layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
    conv6 = tf.layers.conv2d(pool5,4096,7,padding='SAME',name='conv6')
    relu6 = tf.nn.relu(conv6, name="relu6")
    relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)

    conv7 = tf.layers.conv2d(relu_dropout6,4096,1,name="conv7")
    relu7 = tf.nn.relu(conv7, name="relu7")
    relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)

    # now to upscale to actual image size
    deconv_shape1 = net["pool4"].get_shape()
    conv8 = tf.layers.conv2d(relu_dropout7,NUM_OF_CLASSESS,1,padding="SAME")
    conv_t1 = tf.image.resize_bilinear(conv8,deconv_shape1[1:3])
    tmp1 = tf.layers.conv2d(net["pool4"],NUM_OF_CLASSESS, 1 ,padding='SAME')
    fuse_1 = tf.add(conv_t1, tmp1, name="fuse_1")

    deconv_shape2 = net["pool3"].get_shape()
    conv_t2 = tf.image.resize_bilinear(fuse_1,deconv_shape2[1:3])
    tmp2 = tf.layers.conv2d(net["pool3"],NUM_OF_CLASSESS,1,padding="SAME")
    fuse_2 = tf.add(conv_t2, tmp2, name="fuse_2")

    shape = tf.shape(image)
    conv_t3 = tf.image.resize_bilinear(fuse_2,shape[1:3])
    return conv_t3
开发者ID:dynasty0,项目名称:test,代码行数:27,代码来源:net.py


示例15: my_conv_net

def my_conv_net(input_data):
    # First Conv-ReLU-MaxPool Layer
    conv1 = tf.nn.conv2d(input_data, conv1_weight, strides=[1, 1, 1, 1], padding='SAME')
    relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_bias))
    max_pool1 = tf.nn.max_pool(relu1, ksize=[1, max_pool_size1, max_pool_size1, 1],
                               strides=[1, max_pool_size1, max_pool_size1, 1], padding='SAME')

    # Second Conv-ReLU-MaxPool Layer
    conv2 = tf.nn.conv2d(max_pool1, conv2_weight, strides=[1, 1, 1, 1], padding='SAME')
    relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_bias))
    max_pool2 = tf.nn.max_pool(relu2, ksize=[1, max_pool_size2, max_pool_size2, 1],
                               strides=[1, max_pool_size2, max_pool_size2, 1], padding='SAME')

    # Transform Output into a 1xN layer for next fully connected layer
    final_conv_shape = max_pool2.get_shape().as_list()
    final_shape = final_conv_shape[1] * final_conv_shape[2] * final_conv_shape[3]
    flat_output = tf.reshape(max_pool2, [final_conv_shape[0], final_shape])

    # First Fully Connected Layer
    fully_connected1 = tf.nn.relu(tf.add(tf.matmul(flat_output, full1_weight), full1_bias))

    # Second Fully Connected Layer
    final_model_output = tf.add(tf.matmul(fully_connected1, full2_weight), full2_bias)
    
    return(final_model_output)
开发者ID:hzw1199,项目名称:TensorFlow-Machine-Learning-Cookbook,代码行数:25,代码来源:introductory_cnn.py


示例16: predict_action

    def predict_action(self, prev_z):
        '''
        prev_z: [B, Z]
        return: [B, A]

        needs to be one hot because model has only seen one hot actions
        '''


        with tf.name_scope('predict_action'):

            n_layers = len(self.network_architecture['policy_net'])
            # weights = self.params_dict['policy_weights']
            # biases = self.params_dict['policy_biases']

            input_ = prev_z

            for layer_i in range(n_layers):

                input_ = self.transfer_fct(tf.add(tf.matmul(input_, self.params_dict['policy_weights_l'+str(layer_i)]), self.params_dict['policy_biases_l'+str(layer_i)])) 

            action = tf.add(tf.matmul(input_, self.params_dict['policy_weights_out_mean']), self.params_dict['policy_biases_out_mean'])



            action = tf.nn.softmax(action)

        # action = tf.argmax(action, axis=1) 
        # action = tf.one_hot(indices=action, depth=self.action_size, axis=None)

        return action
开发者ID:chriscremer,项目名称:Other_Code,代码行数:31,代码来源:policy_network.py


示例17: conv_basic

def conv_basic(_input, _w, _b, _keepratio):
    # INPUT
    _input_r = tf.reshape(_input, shape=[-1, imgsize[0], imgsize[1], 1])
    # CONVOLUTION LAYER 1
    _conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(_input_r
        , _w['wc1'], strides=[1, 1, 1, 1], padding='SAME'), _b['bc1']))
    _pool1 = tf.nn.max_pool(_conv1, ksize=[1, 2, 2, 1]
        , strides=[1, 2, 2, 1], padding='SAME')
    _pool_dr1 = tf.nn.dropout(_pool1, _keepratio)
    # CONVOLUTION LAYER 2
    _conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(_pool_dr1
        , _w['wc2'], strides=[1, 1, 1, 1], padding='SAME'), _b['bc2']))
    _pool2 = tf.nn.max_pool(_conv2, ksize=[1, 2, 2, 1]
        , strides=[1, 2, 2, 1], padding='SAME')
    _pool_dr2 = tf.nn.dropout(_pool2, _keepratio)
    # VECTORIZE
    _dense1 = tf.reshape(_pool_dr2
                         , [-1, _w['wd1'].get_shape().as_list()[0]])
    # FULLY CONNECTED LAYER 1
    _fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
    _fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
    # FULLY CONNECTED LAYER 2
    _out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
    # RETURN
    out = {
        'input_r': _input_r, 'conv1': _conv1, 'pool1': _pool1
        , 'pool1_dr1': _pool_dr1, 'conv2': _conv2, 'pool2': _pool2
        , 'pool_dr2': _pool_dr2, 'dense1': _dense1, 'fc1': _fc1
        , 'fc_dr1': _fc_dr1, 'out': _out
    }
    return out
开发者ID:sungjin712,项目名称:Modu_Image,代码行数:31,代码来源:emotion_cnn_160927.py


示例18: feed_forward

def feed_forward(_X,_weights,_biases,_dropout):
    """
    Args:
      _X: batch of images placeholders
      _weights: weight variables
      _biases: biases variables
      _dropout: keep probability
    Return:
      out: Predictions of the forward pass
    """
    _X = tf.reshape(_X,[-1,28,28,1])

    #Convolutional Layer 1
    conv1 = conv2d(_X,_weights['wc1'],_biases['bc1'])
    conv1 = max_pool(conv1,k=2)
    conv1 = tf.nn.dropout(conv1,_dropout)

    #Convolutional Layer 2
    conv2 = conv2d(conv1,_weights['wc2'],_biases['bc2'])
    conv2 = max_pool(conv2,k=2)
    conv2 = tf.nn.dropout(conv2,_dropout)

    #Fully Connected Layer 1
    dense1 = tf.reshape(conv2,[-1,_weights['wd1'].get_shape().as_list()[0]])
    dense1 = tf.nn.relu(tf.add(tf.matmul(dense1,_weights['wd1']),_biases['bd1']))
    dense1 = tf.nn.dropout(dense1,_dropout)

    #Prediction Layer
    out = tf.add(tf.matmul(dense1,_weights['out']),_biases['out'])
    return out
开发者ID:zmoon111,项目名称:TensorFlow_Practice,代码行数:30,代码来源:convolution.py


示例19: drawGraph

    def drawGraph(self, n_row, n_latent, n_col):
        with tf.name_scope('matDecomp'):
            self._p = tf.placeholder(tf.float32, shape=[None, n_col])
            self._c = tf.placeholder(tf.float32, shape=[None, n_col])
            self._lambda = tf.placeholder(tf.float32)
            self._index = tf.placeholder(tf.float32, shape=[None, n_row])
            self._A = tf.Variable(tf.truncated_normal([n_row, n_latent]))
            self._B = tf.Variable(tf.truncated_normal([n_latent, n_col]))
            self._h = tf.matmul(tf.matmul(self._index, self._A), self._B) 
            
            weighted_loss = tf.reduce_mean(tf.mul(self._c, tf.squared_difference(self._p, self._h)))
            self._weighted_loss = weighted_loss
            l2_A = tf.reduce_sum(tf.square(self._A))
            l2_B = tf.reduce_sum(tf.square(self._B))
            n_w = tf.constant(n_row * n_latent + n_latent * n_col, tf.float32)
            l2 = tf.truediv(tf.add(l2_A, l2_B), n_w)
            reg_term = tf.mul(self._lambda, l2)
            self._loss = tf.add(weighted_loss, reg_term)
            
            self._mask = tf.placeholder(tf.float32, shape=[n_row, n_col])
            one = tf.constant(1, tf.float32)
            pred = tf.cast(tf.greater_equal(tf.matmul(self._A, self._B), one), tf.float32)
            cor = tf.mul(tf.cast(tf.equal(pred, self._p), tf.float32), self._c)
            self._vali_err = tf.reduce_sum(tf.mul(cor, self._mask))

            self._saver = tf.train.Saver([v for v in tf.all_variables() if v.name.find('matDecomp') != -1])
            tf.scalar_summary('training_weighted_loss_l2', self._loss)
            tf.scalar_summary('validation_weighted_loss', self._weighted_loss)
            merged = tf.merge_all_summaries()
开发者ID:cning,项目名称:ehc,代码行数:29,代码来源:model.py


示例20: cross_entropy

    def cross_entropy(u, label_u, alpha=0.5, normed=False):

        label_ip = tf.cast(
            tf.matmul(label_u, tf.transpose(label_u)), tf.float32)
        s = tf.clip_by_value(label_ip, 0.0, 1.0)

        # compute balance param
        # s_t \in {-1, 1}
        s_t = tf.multiply(tf.add(s, tf.constant(-0.5)), tf.constant(2.0))
        sum_1 = tf.reduce_sum(s)
        sum_all = tf.reduce_sum(tf.abs(s_t))
        balance_param = tf.add(tf.abs(tf.add(s, tf.constant(-1.0))),
                               tf.multiply(tf.div(sum_all, sum_1), s))

        if normed:
            # ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
            ip_1 = tf.matmul(u, tf.transpose(u))

            def reduce_shaper(t):
                return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])
            mod_1 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(u)),
                                      reduce_shaper(tf.square(u)), transpose_b=True))
            ip = tf.div(ip_1, mod_1)
        else:
            ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
        ones = tf.ones([tf.shape(u)[0], tf.shape(u)[0]])
        return tf.reduce_mean(tf.multiply(tf.log(ones + tf.exp(alpha * ip)) - s * alpha * ip, balance_param))
开发者ID:AllenMao,项目名称:DeepHash,代码行数:27,代码来源:dhn.py



注:本文中的tensorflow.add函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.add_n函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.abs函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap