• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.constant_initializer函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.constant_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python constant_initializer函数的具体用法?Python constant_initializer怎么用?Python constant_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了constant_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _build_body

    def _build_body(self):
        # input projection
        _Wi = tf.get_variable('Wi', [self.obs_size, self.n_hidden],
                              initializer=xavier_initializer())
        _bi = tf.get_variable('bi', [self.n_hidden],
                              initializer=tf.constant_initializer(0.))

        # add relu/tanh here if necessary
        _projected_features = tf.matmul(self._features, _Wi) + _bi

        _lstm_f = tf.contrib.rnn.LSTMCell(self.n_hidden, state_is_tuple=True)
        _lstm_op, self._next_state = _lstm_f(inputs=_projected_features,
                                             state=(self._state_c,
                                                    self._state_h))

        # reshape LSTM's state tuple (2,n_hidden) -> (1,n_hidden*2)
        _state_reshaped = tf.concat(axis=1,
                                    values=(self._next_state.c,
                                            self._next_state.h))

        # output projection
        _Wo = tf.get_variable('Wo', [self.n_hidden*2, self.n_actions],
                              initializer=xavier_initializer())
        _bo = tf.get_variable('bo', [self.n_actions],
                              initializer=tf.constant_initializer(0.))
        # get logits
        _logits = tf.matmul(_state_reshaped, _Wo) + _bo
        # probabilities normalization : elemwise multiply with action mask
        self._probs = tf.multiply(tf.squeeze(tf.nn.softmax(_logits)),
                                  self._action_mask,
                                  name='probs')
        return _logits
开发者ID:CuteCha,项目名称:DeepPavlov,代码行数:32,代码来源:network.py


示例2: __init__

    def __init__(self, epsilon=1e-2, shape=()):

        self._sum = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(0.0),
            name="runningsum", trainable=False)
        self._sumsq = tf.get_variable(
            dtype=tf.float64,
            shape=shape,
            initializer=tf.constant_initializer(epsilon),
            name="runningsumsq", trainable=False)
        self._count = tf.get_variable(
            dtype=tf.float64,
            shape=(),
            initializer=tf.constant_initializer(epsilon),
            name="count", trainable=False)
        self.shape = shape

        self.mean = tf.to_float(self._sum / self._count)
        self.std = tf.sqrt( tf.maximum( tf.to_float(self._sumsq / self._count) - tf.square(self.mean) , 1e-2 ))

        newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum')
        newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var')
        newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count')
        self.incfiltparams = U.function([newsum, newsumsq, newcount], [],
            updates=[tf.assign_add(self._sum, newsum),
                     tf.assign_add(self._sumsq, newsumsq),
                     tf.assign_add(self._count, newcount)])
开发者ID:IcarusTan,项目名称:baselines,代码行数:29,代码来源:mpi_running_mean_std.py


示例3: _initialize_weights

	def _initialize_weights(self):
		all_weights = dict()
		# Encoding layers
		for i, n_hidden in enumerate(self.hidden_units):
			weight_name = 'encoder%d_W' % i
			bias_name = 'encoder%d_b' % i
			if i == 0:
				weight_shape = [self.n_input, n_hidden]
			else:
				weight_shape = [self.hidden_units[i-1], n_hidden]

			all_weights[weight_name] = tf.get_variable(weight_name, weight_shape, 
				initializer=tf.contrib.layers.xavier_initializer())
			all_weights[bias_name] = tf.get_variable(bias_name, [n_hidden],
				initializer=tf.constant_initializer(0.0))
		
		# Decoding layers
		hidden_units_rev = self.hidden_units[::-1]
		for i, n_hidden in enumerate(hidden_units_rev):
			weight_name = 'decoder%d_W' % i
			bias_name = 'decoder%d_b' % i
			if i != len(hidden_units_rev) - 1: # not the last layer
				weight_shape = [n_hidden, hidden_units_rev[i+1]]
			else:
				weight_shape = [n_hidden, self.n_input]

			all_weights[weight_name] = tf.get_variable(weight_name, weight_shape, 
				initializer=tf.contrib.layers.xavier_initializer())
			all_weights[bias_name] = tf.get_variable(bias_name, [n_hidden],
				initializer=tf.constant_initializer(0.0))

		return all_weights
开发者ID:wangz10,项目名称:tensorflow-playground,代码行数:32,代码来源:autoencoders.py


示例4: generator

def generator(X, batch_size=64):
    with tf.variable_scope('generator'):

        K = 256
        L = 128
        M = 64

        W1 = tf.get_variable('G_W1', [100, 7*7*K], initializer=tf.random_normal_initializer(stddev=0.1))
        B1 = tf.get_variable('G_B1', [7*7*K], initializer=tf.constant_initializer())

        W2 = tf.get_variable('G_W2', [4, 4, M, K], initializer=tf.random_normal_initializer(stddev=0.1))
        B2 = tf.get_variable('G_B2', [M], initializer=tf.constant_initializer())

        W3 = tf.get_variable('G_W3', [4, 4, 1, M], initializer=tf.random_normal_initializer(stddev=0.1))
        B3 = tf.get_variable('G_B3', [1], initializer=tf.constant_initializer())

        X = lrelu(tf.matmul(X, W1) + B1)
        X = tf.reshape(X, [batch_size, 7, 7, K])
        deconv1 = deconv(X, W2, B2, shape=[batch_size, 14, 14, M], stride=2, name='deconv1')
        bn1 = tf.contrib.layers.batch_norm(deconv1)
        deconv2 = deconv(tf.nn.dropout(lrelu(bn1), 0.4), W3, B3, shape=[batch_size, 28, 28, 1], stride=2, name='deconv2')

        XX = tf.reshape(deconv2, [-1, 28*28], 'reshape')

        return tf.nn.sigmoid(XX)
开发者ID:hephaex,项目名称:tensorflow_note,代码行数:25,代码来源:model_conv.py


示例5: __init__

    def __init__(self,sess,n_features,n_actions,lr=0.001):
        self.sess = sess

        self.s = tf.placeholder(tf.float32,[1,n_features],name='state')
        self.a = tf.placeholder(tf.int32,None,name='act')
        self.td_error = tf.placeholder(tf.float32,None,"td_error")

        with tf.variable_scope('Actor'):
            l1 = tf.layers.dense(
                inputs = self.s,
                units = 20,
                activation = tf.nn.relu,
                kernel_initializer = tf.random_normal_initializer(mean=0,stddev=0.1),
                bias_initializer = tf.constant_initializer(0.1),
                name = 'l1'
            )

            self.acts_prob = tf.layers.dense(
                inputs = l1,
                units = n_actions,
                activation = tf.nn.softmax,
                kernel_initializer = tf.random_normal_initializer(mean=0,stddev=0.1),
                bias_initializer = tf.constant_initializer(0.1),
                name = 'acts_prob'
            )


            with tf.variable_scope('exp_v'):
                log_prob = tf.log(self.acts_prob[0,self.a])
                self.exp_v = tf.reduce_mean(log_prob * self.td_error)


            with tf.variable_scope('train'):
                self.train_op =  tf.train.AdamOptimizer(lr).minimize(-self.exp_v)
开发者ID:huyuxiang,项目名称:tensorflow_practice,代码行数:34,代码来源:Actor.py


示例6: add_model

  def add_model(self, input_data):
    """Adds a linear-layer plus a softmax transformation

    The core transformation for this model which transforms a batch of input
    data into a batch of predictions. In this case, the mathematical
    transformation effected is

    y = softmax(xW + b)

    Hint: Make sure to create tf.Variables as needed. Also, make sure to use
          tf.name_scope to ensure that your name spaces are clean.
    Hint: For this simple use-case, it's sufficient to initialize both weights W
          and biases b with zeros.

    Args:
      input_data: A tensor of shape (batch_size, n_features).
    Returns:
      out: A tensor of shape (batch_size, n_classes)
    """
    ### YOUR CODE HERE
    # W = tf.Variable(tf.zeros((self.config.n_features, self.config.n_classes)), name="weights")
    # b = tf.Variable(tf.zeros((self.config.n_classes, )), name="biases")
    
    with tf.variable_scope('softmax'):
        W = tf.get_variable("weights", (self.config.n_features, self.config.n_classes),
                            initializer=tf.constant_initializer(0.0))
        b = tf.get_variable("bias", (self.config.n_classes,),
                            initializer=tf.constant_initializer(0.0))
    
    out = softmax(tf.matmul(input_data, W) + b)
    ### END YOUR CODE
    return out
开发者ID:lbbc1117,项目名称:CS224d-2016,代码行数:32,代码来源:q1_classifier.py


示例7: critic_network

def critic_network(states, action):
  h1_dim = 400
  h2_dim = 300

  # define policy neural network
  W1 = tf.get_variable("W1", [state_dim, h1_dim],
                       initializer=tf.contrib.layers.xavier_initializer())
  b1 = tf.get_variable("b1", [h1_dim],
                       initializer=tf.constant_initializer(0))
  h1 = tf.nn.relu(tf.matmul(states, W1) + b1)
  # skip action from the first layer
  h1_concat = tf.concat(axis=1, values=[h1, action])

  W2 = tf.get_variable("W2", [h1_dim + action_dim, h2_dim],
                       initializer=tf.contrib.layers.xavier_initializer())
  b2 = tf.get_variable("b2", [h2_dim],
                       initializer=tf.constant_initializer(0))
  h2 = tf.nn.relu(tf.matmul(h1_concat, W2) + b2)

  W3 = tf.get_variable("W3", [h2_dim, 1],
                       initializer=tf.contrib.layers.xavier_initializer())
  b3 = tf.get_variable("b3", [1],
                       initializer=tf.constant_initializer(0))
  v = tf.matmul(h2, W3) + b3
  return v
开发者ID:yukezhu,项目名称:tensorflow-reinforce,代码行数:25,代码来源:run_ddpg_mujoco.py


示例8: __call__

    def __call__(self, input_layer, epsilon=1e-5, decay=0.9, name="batch_norm",
                 in_dim=None, phase=Phase.train):
        shape = input_layer.shape
        shp = in_dim or shape[-1]
        with tf.variable_scope(name) as scope:
            self.mean = self.variable('mean', [shp], init=tf.constant_initializer(0.), train=False)
            self.variance = self.variable('variance', [shp], init=tf.constant_initializer(1.0), train=False)

            self.gamma = self.variable("gamma", [shp], init=tf.random_normal_initializer(1., 0.02))
            self.beta = self.variable("beta", [shp], init=tf.constant_initializer(0.))

            if phase == Phase.train:
                mean, variance = tf.nn.moments(input_layer.tensor, [0, 1, 2])
                mean.set_shape((shp,))
                variance.set_shape((shp,))

                update_moving_mean = moving_averages.assign_moving_average(self.mean, mean, decay)
                update_moving_variance = moving_averages.assign_moving_average(self.variance, variance, decay)

                with tf.control_dependencies([update_moving_mean, update_moving_variance]):
                    normalized_x = tf.nn.batch_norm_with_global_normalization(
                        input_layer.tensor, mean, variance, self.beta, self.gamma, epsilon,
                        scale_after_normalization=True)
            else:
                normalized_x = tf.nn.batch_norm_with_global_normalization(
                    input_layer.tensor, self.mean, self.variance,
                    self.beta, self.gamma, epsilon,
                    scale_after_normalization=True)
            return input_layer.with_tensor(normalized_x, parameters=self.vars)
开发者ID:Soledad89,项目名称:StackGAN,代码行数:29,代码来源:custom_ops.py


示例9: add_model

	def add_model(self, input_data):
		with tf.variable_scope("FirstConv") as CLayer1:
			w_conv1 = tf.get_variable("w_conv1", (11, 11, 1, 32), initializer=tf.truncated_normal_initializer(stddev=0.1))
			b_conv1 = tf.get_variable("b_conv1", (32), initializer=tf.constant_initializer(0.1))
			conv1 =   tf.nn.conv2d(input_data, w_conv1, strides=[1, 1, 1, 1], padding='VALID')
			hconv1 =  tf.nn.relu(conv1 + b_conv1)
			h_pool1 = tf.nn.max_pool(hconv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
			with tf.variable_scope("SecondConv") as CLayer2:
				w_conv2 = tf.get_variable("w_conv2", (11 , 11, 32, 64), initializer=tf.truncated_normal_initializer(stddev=0.1))
				b_conv2 = tf.get_variable("b_conv2", (64), initializer=tf.constant_initializer(0.1))
				conv2 =   tf.nn.conv2d(h_pool1, w_conv2, strides=[1, 1, 1, 1], padding='VALID')
				hconv2 =  tf.nn.relu(conv2 + b_conv2)
				h_pool2 = tf.nn.max_pool(hconv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
				with tf.variable_scope("FullyConnected") as FC:
					flattend_input = tf.reshape(input_data, [self.config.batch_size, -1])
					w_input = tf.get_variable("w_input", (self.config.DIM_ETA*self.config.DIM_PHI, 32), initializer=tf.truncated_normal_initializer(stddev=0.1))
					wfc1 = tf.get_variable("wfc1", (self.config.final_size*64, 32), initializer=tf.truncated_normal_initializer(stddev=0.1))
					#bfc1 = tf.get_variable("bfc1", (32), initializer=tf.constant_initializer(0.1))
					h_pool2_flat = tf.reshape(h_pool2, [-1, self.config.final_size*64])
					h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, wfc1) + tf.matmul(flattend_input, w_input))#+  bfc1) 
					h_fc1_drop = tf.nn.dropout(h_fc1, self.dropout_placeholder) 
					with tf.variable_scope("ReadoutLayer") as RL:
						wfc2 = tf.get_variable("wfc2", (32, self.config.num_classes), initializer=tf.truncated_normal_initializer(stddev=0.1))
						bfc2 = tf.get_variable("bfc2", (self.config.num_classes), initializer=tf.constant_initializer(0.1))
						y_conv = tf.matmul(h_fc1_drop, wfc2)  + bfc2 
		return y_conv
开发者ID:derylucio,项目名称:HEP-ML,代码行数:26,代码来源:tfcnn.py


示例10: get_transform

def get_transform(point_cloud, is_training, bn_decay=None, K = 3):
    """ Transform Net, input is BxNx3 gray image
        Return:
            Transformation matrix of size 3xK """
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value

    input_image = tf.expand_dims(point_cloud, -1)
    net = tf_util.conv2d(input_image, 64, [1,3], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 128, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv3', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv4', bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 128, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay)

    with tf.variable_scope('transform_XYZ') as sc:
        assert(K==3)
        weights = tf.get_variable('weights', [128, 3*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
        biases = tf.get_variable('biases', [3*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32) + tf.constant([1,0,0,0,1,0,0,0,1], dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    #transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3')
    transform = tf.reshape(transform, [batch_size, 3, K])
    return transform
开发者ID:idontlikelongname,项目名称:pointnet,代码行数:30,代码来源:pointnet_part_seg.py


示例11: _build_net

    def _build_net(self):
        with tf.name_scope('inputs'):
            self.tf_obs=tf.placeholder(tf.float32,[None,self.n_features],name="observations")
            self.tf_acts=tf.placeholder(tf.int32,[None,],name="actions_num")
            self.tf_vt=tf.placeholder(tf.float32,[None,],name="actions_value")

        layer=tf.layers.dense(
            inputs=self.tf_obs,
            units=10,
            activation=tf.nn.tanh
            kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.3),
            bias_initializer=tf.constant_initializer(0.1),
            name='fc1'

        )

        all_act=tf.layers.dense(
            inputs=layer,
            units=self.n_actions,
            activation=None,
            kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.3)
            bias_initializer=tf.constant_initializer(0.1)
            name='fc2'


        )

        self.all_act_prob=tf.nn.softmax(all_act,name='act_prob')

        with tf.name_scope('loss'):
            neg_log_prob=tf.nn.sparse_softmax_cross_enrtropy_with_logits(logits=all_act,labels=self.tf_acts)
            loss=tf.reduce_mean(neg_log_prob*self.tf_vt)#用log_p*R的最大化来表示目标

        with tf.name_scope('train'):
            self.train_op=tf.train.AdamOptimizer(self.lr).minimize(loss)
开发者ID:niceIrene,项目名称:MetisRL,代码行数:35,代码来源:rl_brain.py


示例12: get_transform_K

def get_transform_K(inputs, is_training, bn_decay=None, K = 3):
    """ Transform Net, input is BxNx1xK gray image
        Return:
            Transformation matrix of size KxK """
    batch_size = inputs.get_shape()[0].value
    num_point = inputs.get_shape()[1].value

    net = tf_util.conv2d(inputs, 256, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv1', bn_decay=bn_decay)
    net = tf_util.conv2d(net, 1024, [1,1], padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training, scope='tconv2', bn_decay=bn_decay)
    net = tf_util.max_pool2d(net, [num_point,1], padding='VALID', scope='tmaxpool')

    net = tf.reshape(net, [batch_size, -1])
    net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training, scope='tfc1', bn_decay=bn_decay)
    net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training, scope='tfc2', bn_decay=bn_decay)

    with tf.variable_scope('transform_feat') as sc:
        weights = tf.get_variable('weights', [256, K*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
        biases = tf.get_variable('biases', [K*K], initializer=tf.constant_initializer(0.0), dtype=tf.float32) + tf.constant(np.eye(K).flatten(), dtype=tf.float32)
        transform = tf.matmul(net, weights)
        transform = tf.nn.bias_add(transform, biases)

    #transform = tf_util.fully_connected(net, 3*K, activation_fn=None, scope='tfc3')
    transform = tf.reshape(transform, [batch_size, K, K])
    return transform
开发者ID:idontlikelongname,项目名称:pointnet,代码行数:26,代码来源:pointnet_part_seg.py


示例13: __init__

    def __init__(self, sess, n_features, lr=0.01):
        self.sess = sess

        self.s = tf.placeholder(tf.float32, [1, n_features], "state")
        self.v_ = tf.placeholder(tf.float32, [1, 1], "v_next")
        self.r = tf.placeholder(tf.float32, None, 'r')

        with tf.variable_scope('Critic'):
            l1 = tf.layers.dense(
                inputs=self.s,
                units=20,  # number of hidden units
                activation=tf.nn.relu,  # None
                # have to be linear to make sure the convergence of actor.
                # But linear approximator seems hardly learns the correct Q.
                kernel_initializer=tf.random_normal_initializer(0., .1),  # weights
                bias_initializer=tf.constant_initializer(0.1),  # biases
                name='l1'
            )

            self.v = tf.layers.dense(
                inputs=l1,
                units=1,  # output units
                activation=None,
                kernel_initializer=tf.random_normal_initializer(0., .1),  # weights
                bias_initializer=tf.constant_initializer(0.1),  # biases
                name='V'
            )

        with tf.variable_scope('squared_TD_error'):
            self.td_error = self.r + GAMMA * self.v_ - self.v
            self.loss = tf.square(self.td_error)    # TD_error = (r+gamma*V_next) - V_eval
        with tf.variable_scope('train'):
            self.train_op = tf.train.AdamOptimizer(lr).minimize(self.loss)
开发者ID:Emrys-Hong,项目名称:Reinforcement-learning-with-tensorflow,代码行数:33,代码来源:AC_CartPole.py


示例14: inference

def inference(images):
    with tf.variable_scope("conv1") as scope:
        kernel = _variable_with_weight_decay("weights", [5, 5, 3, 64], stddev=1e-4, wd=0.0)
        conv = conv2d_basic(images, kernel)
        bias = _variable_on_cpu("bias", [64], tf.constant_initializer(0.0))
        h_conv1 = tf.nn.relu(conv + bias, name=scope.name)
        activation_summary(h_conv1)

    # norm1
    norm1 = tf.nn.lrn(h_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
                      name='norm1')

    with tf.variable_scope("conv2") as scope:
        kernel = _variable_with_weight_decay("weights", [1, 1, 64, 32], stddev=1e-4, wd=0.0)
        conv = conv2d_basic(norm1, kernel)
        bias = _variable_on_cpu("bias", [32], tf.constant_initializer(0.0))
        h_conv2 = tf.nn.relu(conv + bias, name=scope.name)
        activation_summary(h_conv2)

    # norm2
    norm2 = tf.nn.lrn(h_conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
                      name='norm2')

    with tf.variable_scope("output") as scope:
        kernel = _variable_with_weight_decay("weights", [5, 5, 32, 3], stddev=1e-4, wd=0.0)
        conv = conv2d_basic(norm2, kernel)
        bias = _variable_on_cpu("bias", [3], tf.constant_initializer(0.0))
        result = tf.nn.bias_add(conv, bias, name=scope.name)

    return result
开发者ID:RosieCampbell,项目名称:TensorflowProjects,代码行数:30,代码来源:Deblurring.py


示例15: __init__

 def __init__(
         self,
         layer=None,
         act=tf.identity,
         epsilon=1e-5,
         scale_init=tf.constant_initializer(1.0),
         offset_init=tf.constant_initializer(0.0),
         G=32,
         name='group_norm',
 ):
     Layer.__init__(self, name=name)
     self.inputs = layer.outputs
     print("  [TL] GroupNormLayer %s: epsilon:%f act:%s" % (self.name, epsilon, act.__name__))
     inputs_shape = get_shape(layer.outputs)
     G = tf.minimum(G, inputs_shape[-1])
     # [N, H, W, C] to [N, C, H, W]
     temp_input = tf.transpose(self.inputs, [0, 3, 1, 2])
     temp_input = tf.reshape(temp_input, [inputs_shape[0], G, inputs_shape[-1]//G, inputs_shape[1], inputs_shape[2]],
                             name='group_reshape1')
     with tf.variable_scope(name) as vs:
         mean, var = tf.nn.moments(temp_input, [2, 3, 4], keep_dims=True)
         scale = tf.get_variable('scale', shape=[1, inputs_shape[-1], 1, 1], initializer=scale_init, dtype=D_TYPE)
         offset = tf.get_variable('offset', shape=[1, inputs_shape[-1], 1, 1], initializer=offset_init, dtype=D_TYPE)
         temp_input = (temp_input - mean) / tf.sqrt(var + epsilon)
         temp_input = tf.reshape(temp_input, shape=[inputs_shape[0], inputs_shape[-1], inputs_shape[1], inputs_shape[2]],
                                 name='group_reshape2')
         self.outputs = scale * temp_input + offset
         self.outputs = tf.transpose(self.outputs, [0, 2, 3, 1])
         self.outputs = act(self.outputs)
         variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
     self.all_layers = list(layer.all_layers)
     self.all_params = list(layer.all_params)
     self.all_drop = dict(layer.all_drop)
     self.all_layers.extend([self.outputs])
     self.all_params.extend(variables)
开发者ID:xy694942097,项目名称:InsightFace_TF,代码行数:35,代码来源:tl_layers_modify.py


示例16: fc

    def fc(self, input, num_out, name, relu=True, trainable=True):
        with tf.variable_scope(name) as scope:
            # only use the first input
            if isinstance(input, tuple):
                input = input[0]

            input_shape = input.get_shape()
            if input_shape.ndims == 4:
                dim = 1
                for d in input_shape[1:].as_list():
                    dim *= d
                feed_in = tf.reshape(tf.transpose(input,[0,3,1,2]), [-1, dim])
            else:
                feed_in, dim = (input, int(input_shape[-1]))

            if name == 'bbox_pred':
                init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
                init_biases = tf.constant_initializer(0.0)
            else:
                init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
                init_biases = tf.constant_initializer(0.0)

            weights = self.make_var('weights', [dim, num_out], init_weights, trainable, \
                                    regularizer=self.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY))
            biases = self.make_var('biases', [num_out], init_biases, trainable)

            op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
            fc = op(feed_in, weights, biases, name=scope.name)
            return fc
开发者ID:beneo,项目名称:faster-CTPN,代码行数:29,代码来源:network.py


示例17: batch_norm

def batch_norm(x, phase_train, name='bn', decay=0.9, reuse=None, affine=True):
    """
    Batch normalization on convolutional maps.
    from: https://stackoverflow.com/questions/33949786/how-could-i-
    use-batch-normalization-in-tensorflow
    Only modified to infer shape from input tensor x.

    [DEPRECATED] Use tflearn or slim batch normalization instead.

    Parameters
    ----------
    x
        Tensor, 4D BHWD input maps
    phase_train
        boolean tf.Variable, true indicates training phase
    name
        string, variable name
    decay : float, optional
        Description
    reuse : None, optional
        Description
    affine
        whether to affine-transform outputs

    Return
    ------
    normed
        batch-normalized maps
    """
    with tf.variable_scope(name, reuse=reuse):
        shape = x.get_shape().as_list()
        beta = tf.get_variable(
            name='beta',
            shape=[shape[-1]],
            initializer=tf.constant_initializer(0.0),
            trainable=True)
        gamma = tf.get_variable(
            name='gamma',
            shape=[shape[-1]],
            initializer=tf.constant_initializer(1.0),
            trainable=affine)
        if len(shape) == 4:
            batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
        else:
            batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)
        ema_apply_op = ema.apply([batch_mean, batch_var])
        ema_mean, ema_var = ema.average(batch_mean), ema.average(batch_var)

        def mean_var_with_update():
           with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = control_flow_ops.cond(phase_train, mean_var_with_update,
                                          lambda: (ema_mean, ema_var))

        # tf.nn.batch_normalization
        normed = tf.nn.batch_norm_with_global_normalization(
            x, mean, var, beta, gamma, 1e-6, affine)
    return normed
开发者ID:pradeeps,项目名称:pycadl,代码行数:60,代码来源:batch_norm.py


示例18: actor_network

def actor_network(states):
  h1_dim = 400
  h2_dim = 300

  # define policy neural network
  W1 = tf.get_variable("W1", [state_dim, h1_dim],
                       initializer=tf.contrib.layers.xavier_initializer())
  b1 = tf.get_variable("b1", [h1_dim],
                       initializer=tf.constant_initializer(0))
  h1 = tf.nn.relu(tf.matmul(states, W1) + b1)

  W2 = tf.get_variable("W2", [h1_dim, h2_dim],
                       initializer=tf.contrib.layers.xavier_initializer())
  b2 = tf.get_variable("b2", [h2_dim],
                       initializer=tf.constant_initializer(0))
  h2 = tf.nn.relu(tf.matmul(h1, W2) + b2)

  # use tanh to bound the action
  W3 = tf.get_variable("W3", [h2_dim, action_dim],
                       initializer=tf.contrib.layers.xavier_initializer())
  b3 = tf.get_variable("b3", [action_dim],
                       initializer=tf.constant_initializer(0))

  # we assume actions range from [-1, 1]
  # you can scale action outputs with any constant here
  a = tf.nn.tanh(tf.matmul(h2, W3) + b3)
  return a
开发者ID:yukezhu,项目名称:tensorflow-reinforce,代码行数:27,代码来源:run_ddpg_mujoco.py


示例19: __init__

 def __init__(self, max_id, shortlist_size=100, name_prefix=''):
   """Creates a new TopN."""
   self.ops = topn_ops.Load()
   self.shortlist_size = shortlist_size
   # id_to_score contains all the scores we are tracking.
   self.id_to_score = tf.get_variable(
       name=name_prefix + 'id_to_score',
       dtype=tf.float32,
       shape=[max_id],
       initializer=tf.constant_initializer(tf.float32.min))
   # sl_ids and sl_scores together satisfy four invariants:
   # 1) If sl_ids[i] != -1, then
   #    id_to_score[sl_ids[i]] = sl_scores[i] >= sl_scores[0]
   # 2) sl_ids[0] is the number of i > 0 for which sl_ids[i] != -1.
   # 3) If id_to_score[i] > sl_scores[0], then
   #    sl_ids[j] = i for some j.
   # 4) If sl_ids[i] == -1, then sl_scores[i] = tf.float32.min.
   self.sl_ids = tf.get_variable(
       name=name_prefix + 'shortlist_ids',
       dtype=tf.int64,
       shape=[shortlist_size + 1],
       initializer=tf.constant_initializer(-1))
   # Ideally, we would set self.sl_ids[0] = 0 here.  But then it is hard
   # to pass that control dependency to the other other Ops.  Instead, we
   # have insert, remove and get_best all deal with the fact that
   # self.sl_ids[0] == -1 actually means the shortlist size is 0.
   self.sl_scores = tf.get_variable(
       name=name_prefix + 'shortlist_scores',
       dtype=tf.float32,
       shape=[shortlist_size + 1],
       initializer=tf.constant_initializer(tf.float32.min))
   # TopN keeps track of its internal data dependencies, so the user
   # doesn't have to.
   self.last_ops = []
开发者ID:BloodD,项目名称:tensorflow,代码行数:34,代码来源:topn.py


示例20: discriminator

def discriminator(X, reuse=False):
    with tf.variable_scope('discriminator'):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        K = 64
        M = 128
        N = 256

        W1 = tf.get_variable('D_W1', [4, 4, 1, K], initializer=tf.random_normal_initializer(stddev=0.1))
        B1 = tf.get_variable('D_B1', [K], initializer=tf.constant_initializer())
        W2 = tf.get_variable('D_W2', [4, 4, K, M], initializer=tf.random_normal_initializer(stddev=0.1))
        B2 = tf.get_variable('D_B2', [M], initializer=tf.constant_initializer())
        W3 = tf.get_variable('D_W3', [7*7*M, N], initializer=tf.random_normal_initializer(stddev=0.1))
        B3 = tf.get_variable('D_B3', [N], initializer=tf.constant_initializer())
        W4 = tf.get_variable('D_W4', [N, 1], initializer=tf.random_normal_initializer(stddev=0.1))
        B4 = tf.get_variable('D_B4', [1], initializer=tf.constant_initializer())

        X = tf.reshape(X, [-1, 28, 28, 1], 'reshape')

        conv1 = conv(X, W1, B1, stride=2, name='conv1')
        bn1 = tf.contrib.layers.batch_norm(conv1)
        conv2 = conv(tf.nn.dropout(lrelu(bn1), 0.4), W2, B2, stride=2, name='conv2')
        # conv2 = conv(lrelu(conv1), W2, B2, stride=2, name='conv2')

        bn2 = tf.contrib.layers.batch_norm(conv2)
        flat = tf.reshape(tf.nn.dropout(lrelu(bn2), 0.4), [-1, 7*7*M], name='flat')
        # flat = tf.reshape(lrelu(conv2), [-1, 7*7*M], name='flat')

        dense = lrelu(tf.matmul(flat, W3) + B3)
        logits = tf.matmul(dense, W4) + B4
        prob = tf.nn.sigmoid(logits)
        return prob, logits
开发者ID:hephaex,项目名称:tensorflow_note,代码行数:33,代码来源:model_conv.py



注:本文中的tensorflow.constant_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.control_dependencies函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.constant函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap