• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.random_uniform_initializer函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.random_uniform_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python random_uniform_initializer函数的具体用法?Python random_uniform_initializer怎么用?Python random_uniform_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了random_uniform_initializer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: build

    def build(self):
        with tf.name_scope('weigths'):
            self.W = tf.get_variable(
                shape=[self.hidden_dim, self.nb_classes],
                initializer=tf.random_uniform_initializer(-0.2, 0.2),
                # initializer=tf.truncated_normal_initializer(stddev=0.01),
                name='lstm_weights'
            )
            self.T = tf.get_variable(
                shape=[self.feat_size, self.nb_classes],
                initializer=tf.random_uniform_initializer(-0.2, 0.2),
                # initializer=tf.truncated_normal_initializer(stddev=0.01),
                name='feat_weights'
            )
            self.lstm_fw = tf.contrib.rnn.LSTMCell(self.hidden_dim)

        with tf.name_scope('biases'):
            self.b = tf.Variable(tf.zeros([self.nb_classes], name="bias"))
            # self.b = tf.get_variable(
            #     shape=[self.nb_classes],
            #     initializer=tf.truncated_normal_initializer(stddev=0.01),
            #     # initializer=tf.random_uniform_initializer(-0.2, 0.2),
            #     name="bias"
            # )
        return
开发者ID:caoyujiALgLM,项目名称:LSTM-CRF,代码行数:25,代码来源:hybrid_model.py


示例2: build_lstm_forward

def build_lstm_forward(H, x, googlenet, phase, reuse):
    grid_size = H['arch']['grid_width'] * H['arch']['grid_height']
    outer_size = grid_size * H['arch']['batch_size']
    input_mean = 117.
    x -= input_mean
    Z = googlenet_load.model(x, googlenet, H)
    with tf.variable_scope('decoder', reuse=reuse):
        scale_down = 0.01
        if H['arch']['early_dropout'] and phase == 'train':
            Z = tf.nn.dropout(Z, 0.5)
        lstm_input = tf.reshape(Z * scale_down, (H['arch']['batch_size'] * grid_size, 1024))
        lstm_outputs = build_lstm_inner(lstm_input, H)

        pred_boxes = []
        pred_logits = []
        for i in range(H['arch']['rnn_len']):
            output = lstm_outputs[i]
            if H['arch']['late_dropout'] and phase == 'train':
                output = tf.nn.dropout(output, 0.5)
            box_weights = tf.get_variable('box_ip%d' % i, shape=(H['arch']['lstm_size'], 4),
                initializer=tf.random_uniform_initializer(-0.1, 0.1))
            conf_weights = tf.get_variable('conf_ip%d' % i, shape=(H['arch']['lstm_size'], 2),
                initializer=tf.random_uniform_initializer(-0.1, 0.1))
            pred_boxes.append(tf.reshape(tf.matmul(output, box_weights) * 50,
                                         [outer_size, 1, 4]))
            pred_logits.append(tf.reshape(tf.matmul(output, conf_weights),
                                         [outer_size, 1, 2]))
        pred_boxes = tf.concat(1, pred_boxes)
        pred_logits = tf.concat(1, pred_logits)
        pred_logits_squash = tf.reshape(pred_logits,
                                        [outer_size * H['arch']['rnn_len'], 2])
        pred_confidences_squash = tf.nn.softmax(pred_logits_squash)
        pred_confidences = tf.reshape(pred_confidences_squash,
                                      [outer_size, H['arch']['rnn_len'], 2])
    return pred_boxes, pred_logits, pred_confidences
开发者ID:BlakePan,项目名称:TensorBox,代码行数:35,代码来源:train.py


示例3: compute_feedback

    def compute_feedback(self, inputs, full_state, layer_sizes, scope=None):
        with tf.variable_scope("Global Reset"):
            cur_state_pos = 0
            full_state_size = sum(layer_sizes)
            summation_term = tf.get_variable("summation", self.state_size, initializer=tf.constant_initializer())
            for i, layer_size in enumerate(layer_sizes):
                with tf.variable_scope("Cell%d" % i):
                    # Compute global reset gate
                    w_g = tf.get_variable("w_g", self.input_size, initializer=tf.random_uniform_initializer(-0.1, 0.1))
                    u_g = tf.get_variable("u_g", full_state_size, initializer=tf.random_uniform_initializer(-0.1, 0.1))
                    g__i_j = tf.sigmoid(tf.matmul(inputs, w_g) + tf.matmul(full_state, u_g))

                    # Accumulate sum
                    h_t_1 = \
                        tf.slice(
                                full_state,
                                [0, cur_state_pos],
                                [-1, layer_size]
                        )
                    cur_state_pos += layer_size
                    U = tf.get_variable("U", [self.input_size, self._num_units],
                                        initializer=tf.random_uniform_initializer(-0.1, 0.1))
                    b = tf.get_variable("b", self.state_size, initializer=tf.constant_initializer(1.))
                    summation_term = tf.add(summation_term, g__i_j * tf.matmul(U, h_t_1) + b)

        return summation_term
开发者ID:ZhangBanger,项目名称:recurrent-network-variants,代码行数:26,代码来源:feedback_cell.py


示例4: xavier_init

def xavier_init(input_size, output_size, uniform=True):
    if uniform:
        init_range= tf.sqrt(6.0/(input_size+output_size))
        return tf.random_uniform_initializer(stdevv=init_range)
    else:
        init_range= tf.sqrt(3.0/(input_size+output_size))
        return tf.random_uniform_initializer(stdevv=init_range)
开发者ID:codertimo,项目名称:Tensorflow-Study,代码行数:7,代码来源:2-Xavier_init_Mnist.py


示例5: weight

def weight(name, shape, init='he', range=None):
    """ Initializes weight.
    :param name: Variable name
    :param shape: Tensor shape
    :param init: Init mode. xavier / normal / uniform / he (default is 'he')
    :param range:
    :return: Variable
    """
    initializer = tf.constant_initializer()
    if init == 'xavier':
        fan_in, fan_out = _get_dims(shape)
        range = math.sqrt(6.0 / (fan_in + fan_out))
        initializer = tf.random_uniform_initializer(-range, range)

    elif init == 'he':
        fan_in, _ = _get_dims(shape)
        std = math.sqrt(2.0 / fan_in)
        initializer = tf.random_normal_initializer(stddev=std)

    elif init == 'normal':
        initializer = tf.random_normal_initializer(stddev=0.1)

    elif init == 'uniform':
        if range is None:
            raise ValueError("range must not be None if uniform init is used.")
        initializer = tf.random_uniform_initializer(-range, range)

    var = tf.get_variable(name, shape, initializer=initializer)
    tf.add_to_collection('l2', tf.nn.l2_loss(var))  # Add L2 Loss
    return var
开发者ID:BabelTower,项目名称:dmn-tensorflow,代码行数:30,代码来源:nn.py


示例6: __init__

  def __init__(self,
               state_size,
               num_obs,
               steps_per_obs,
               sigma_min=1e-5,
               dtype=tf.float32,
               random_seed=None):
    self.state_size = state_size
    self.sigma_min = sigma_min
    self.dtype = dtype
    self.steps_per_obs = steps_per_obs
    self.num_obs = num_obs
    self.num_timesteps = num_obs*steps_per_obs +1

    initializers =  {
      "w": tf.random_uniform_initializer(seed=random_seed),
      "b": tf.zeros_initializer
    }
    self.mus = [
        snt.Linear(output_size=state_size, initializers=initializers)
        for t in xrange(self.num_timesteps)
    ]
    self.sigmas = [
        tf.get_variable(
            shape=[state_size],
            dtype=self.dtype,
            name="q_sigma_%d" % (t + 1),
            initializer=tf.random_uniform_initializer(seed=random_seed))
        for t in xrange(self.num_timesteps)
    ]
开发者ID:812864539,项目名称:models,代码行数:30,代码来源:models.py


示例7: count_sketch

def count_sketch(probs, project_size):
    """ Calculates count-min sketch of a tensor.
    Args:
      probs: A `Tensor`
      project_size: output size (`int`)

    Returns:c
      A projected count-min sketch `Tensor` with shape [batch_size, project_size].
    """
    with tf.variable_scope('CountSketch_'+probs.name.replace(':', '_')) as scope:
        input_size = int(probs.get_shape()[1])

        # h, s must be sampled once
        history = tf.get_collection('__countsketch')
        if scope.name in history: scope.reuse_variables()
        tf.add_to_collection('__countsketch', scope.name)

        h = tf.get_variable('h', [input_size], initializer=tf.random_uniform_initializer(0, project_size), trainable=False)
        s = tf.get_variable('s', [input_size], initializer=tf.random_uniform_initializer(0, 2), trainable=False)

        h = tf.cast(h, 'int32')
        s = tf.cast(tf.floor(s) * 2 - 1, 'int32') # 1 or -1

        sk = _sketch_op.count_sketch(probs, h, s, project_size)
        sk.set_shape([probs.get_shape()[0], project_size])
        return sk
开发者ID:ml-ai-nlp-ir,项目名称:compact-bilinear-pooling-tf,代码行数:26,代码来源:count_sketch.py


示例8: __call__

    def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__):
            # Conveniently the concatenation of all hidden states at t-1
            h_star_t_prev = state
            u_g = tf.get_variable("u_g", [self.state_size],
                                  initializer=tf.random_uniform_initializer(-0.1, 0.1))
            cur_state_pos = 0
            cur_inp = inputs
            new_states = []
            for i, cell in enumerate(self._cells):
                with tf.variable_scope("Cell%d" % i):
                    cur_state = array_ops.slice(
                            state, [0, cur_state_pos], [-1, cell.state_size])
                    with tf.variable_scope("Global Reset"):
                        w_g = tf.get_variable("w_g", cell.state_size,
                                              initializer=tf.random_uniform_initializer(-0.1, 0.1))
                        g = tf.sigmoid(tf.mul(w_g, cur_state) + tf.mul(u_g, h_star_t_prev))
                        U = tf.get_variable("U", [cell.state_size],
                                            initializer=tf.random_uniform_initializer(-0.1, 0.1))
                        cur_state = tf.reduce_sum(g * tf.matmul(cur_state, U))

                    cur_state_pos += cell.state_size
                    cur_inp, new_state = cell(cur_inp, cur_state)
                    new_states.append(new_state)

        return cur_inp, array_ops.concat(1, new_states)
开发者ID:jimfleming,项目名称:gated-recurrent-feedback,代码行数:26,代码来源:feedback_cell.py


示例9: _create_embedders

  def _create_embedders(self):

    #placeholder for input data
    self._src_input_data = tf.placeholder(tf.int32, [None, self.MAX_SEQ_LENGTH], name='source_sequence')
    self._tgt_input_data = tf.placeholder(tf.int32, [None, self.MAX_SEQ_LENGTH], name='target_sequence')
    self._labels = tf.placeholder(tf.int64, [None], name='targetSpace_labels')
    self._src_lens = tf.placeholder(tf.int32, [None], name='source_seq_lenths')
    self._tgt_lens = tf.placeholder(tf.int32, [None], name='target_seq_lenths')

    #create word embedding vectors
    self.src_word_embedding = tf.get_variable('src_word_embedding', [self.src_vocab_size, self.word_embed_size],
                                         initializer=tf.random_uniform_initializer(-0.25,0.25))

    self.tgt_word_embedding = tf.get_variable('tgt_word_embedding', [self.tgt_vocab_size, self.word_embed_size],
                                         initializer=tf.random_uniform_initializer(-0.25, 0.25))

    #transform input tensors from tokenID to word embedding
    self.src_input_distributed = tf.nn.embedding_lookup( self.src_word_embedding, self._src_input_data, name='dist_source')
    self.tgt_input_distributed = tf.nn.embedding_lookup( self.tgt_word_embedding, self._tgt_input_data, name='dist_target')


    if self.network_mode == 'source-encoder-only':
      self._source_encoder_only_network()
    elif self.network_mode == 'dual-encoder':
      self._dual_encoder_network()
    elif self.network_mode == 'shared-encoder':
      self._shared_encoder_network()
    else:
      print('Error!! Unsupported network mode: %s. Please specify on: source-encoder-only, dual-encoder or shared-encoder.' % self.network_mode )
      exit(-1)
开发者ID:yaoyaowd,项目名称:tensorflow_demo,代码行数:30,代码来源:ebay_model.py


示例10: sin_bank

def sin_bank(x, bank_size, length, scope=None):
    with tf.variable_op_scope([x], scope, "SinBank") as scope:
        bank = tf.get_variable("bank", dtype=tf.float32, shape=[bank_size, ],
                        initializer=tf.random_uniform_initializer(0.0, length))
        shift = tf.get_variable("shift", dtype=tf.float32, shape=[bank_size, ],
                        initializer=tf.random_uniform_initializer(0.0, length))
        if not tf.get_variable_scope().reuse:
            tf.histogram_summary(bank.name, bank)
        return tf.sin(x*bank+shift)
开发者ID:lukemetz,项目名称:cppn,代码行数:9,代码来源:adv_cppn_model.py


示例11: _build_net

    def _build_net(self):
        
        with tf.name_scope('inputs'):
            self.tf_obs=tf.placeholder(tf.float32,[None,self.n_features],name="observations")
            self.tf_acts=tf.placeholder(tf.int32,[None, ],name="actions")
            self.tf_vt=tf.placeholder(tf.float32,[None, ],name="action_values")
        
        layer_1=tf.layers.dense(
                
                inputs=self.tf_obs,
                units=H,
                activation=tf.nn.tanh,
                kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.3),
                #kernel_initializer=tf.random_uniform_initializer(-0.23,0.23),
                bias_initializer=tf.constant_initializer(0),
                name='h_layer1',             
                
                )
        layer_2=tf.layers.dense(
                
                inputs=layer_1,
                units=H,
                activation=tf.nn.tanh,
                #kernel_initializer=tf.random_normal_initializer(mean=0,stddev=0.3),
                kernel_initializer=tf.random_uniform_initializer(-0.23,0.23),
                bias_initializer=tf.constant_initializer(0),
                name='h_layer2',             
                
                )
        
        all_act=tf.layers.dense(
                inputs=layer_2,
                units=self.n_actions,
                activation=tf.nn.tanh,
                kernel_initializer=tf.random_uniform_initializer(-0.23,0.23),
                #kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),         
                #kernel_initializer=tf.truncated_normal_initializer(mean=0, stddev=0.3),
                bias_initializer=tf.constant_initializer(0),
                name='output'
        )

        self.all_act_prob =tf.nn.softmax(all_act, name='act_prob') 
        
        loss=tf.log(self.all_act_prob)
        
        with tf.name_scope('loss'):
            
            neg_log_prob=tf. reduce_sum(-tf.log(self.all_act_prob)*tf.one_hot(self.tf_acts,self.n_actions),axis=1)
            loss=tf.reduce_mean(neg_log_prob*self.tf_vt)

        
        with tf.name_scope('optimizer'):
            
            self.train = tf.train.AdamOptimizer(self.learning_rate).minimize(loss)
开发者ID:sarikayamehmet,项目名称:RL_Driverless,代码行数:54,代码来源:RL.py


示例12: dense_layer

    def dense_layer(self, input, out_dim, name, func=tf.nn.relu):
        in_dim = input.get_shape().as_list()[-1]
        d = 1.0 / np.sqrt(in_dim)
        with tf.variable_scope(name):
            w_init = tf.random_uniform_initializer(-d, d)
            b_init = tf.random_uniform_initializer(-d, d)
            w = tf.get_variable('w', dtype=tf.float32, shape=[in_dim, out_dim], initializer=w_init)
            b = tf.get_variable('b', shape=[out_dim], initializer=b_init)

            output = tf.matmul(input, w) + b
            if func is not None:
                output = func(output)

        return output
开发者ID:superjax,项目名称:NNOA,代码行数:14,代码来源:NetworkVP.py


示例13: __init__

    def __init__(self, embedding_dim=100, batch_size=64, n_hidden=100, learning_rate=0.01,
                 n_class=3, max_sentence_len=50, l2_reg=0., display_step=4, n_iter=100, type_=''):
        self.embedding_dim = embedding_dim
        self.batch_size = batch_size
        self.n_hidden = n_hidden
        self.learning_rate = learning_rate
        self.n_class = n_class
        self.max_sentence_len = max_sentence_len
        self.l2_reg = l2_reg
        self.display_step = display_step
        self.n_iter = n_iter
        self.type_ = type_
        self.word_id_mapping, self.w2v = load_w2v(FLAGS.embedding_file_path, self.embedding_dim)
        self.word_embedding = tf.constant(self.w2v, name='word_embedding')
        # self.word_embedding = tf.Variable(self.w2v, name='word_embedding')
        # self.word_id_mapping = load_word_id_mapping(FLAGS.word_id_file_path)
        # self.word_embedding = tf.Variable(
        #     tf.random_uniform([len(self.word_id_mapping), self.embedding_dim], -0.1, 0.1), name='word_embedding')

        self.dropout_keep_prob = tf.placeholder(tf.float32)
        with tf.name_scope('inputs'):
            self.x = tf.placeholder(tf.int32, [None, self.max_sentence_len])
            self.y = tf.placeholder(tf.int32, [None, self.n_class])
            self.sen_len = tf.placeholder(tf.int32, None)

            self.x_bw = tf.placeholder(tf.int32, [None, self.max_sentence_len])
            self.y_bw = tf.placeholder(tf.int32, [None, self.n_class])
            self.sen_len_bw = tf.placeholder(tf.int32, [None])

            self.target_words = tf.placeholder(tf.int32, [None, 1])

        with tf.name_scope('weights'):
            self.weights = {
                'softmax_bi_lstm': tf.get_variable(
                    name='bi_lstm_w',
                    shape=[2 * self.n_hidden, self.n_class],
                    initializer=tf.random_uniform_initializer(-0.003, 0.003),
                    regularizer=tf.contrib.layers.l2_regularizer(self.l2_reg)
                )
            }

        with tf.name_scope('biases'):
            self.biases = {
                'softmax_bi_lstm': tf.get_variable(
                    name='bi_lstm_b',
                    shape=[self.n_class],
                    initializer=tf.random_uniform_initializer(-0.003, 0.003),
                    regularizer=tf.contrib.layers.l2_regularizer(self.l2_reg)
                )
            }
开发者ID:haozijie,项目名称:TD-LSTM,代码行数:50,代码来源:tc_lstm.py


示例14: testRandomInitializer

 def testRandomInitializer(self):
     # Sanity check that the slices uses a different seed when using a random
     # initializer function.
     with self.test_session():
         var0, var1 = tf.create_partitioned_variables([20, 12], [1, 2], tf.random_uniform_initializer())
         tf.global_variables_initializer().run()
         val0, val1 = var0.eval().flatten(), var1.eval().flatten()
         self.assertTrue(np.linalg.norm(val0 - val1) > 1e-6)
     # Negative test that proves that slices have the same values if
     # the random initializer uses a seed.
     with self.test_session():
         var0, var1 = tf.create_partitioned_variables([20, 12], [1, 2], tf.random_uniform_initializer(seed=201))
         tf.global_variables_initializer().run()
         val0, val1 = var0.eval().flatten(), var1.eval().flatten()
         self.assertAllClose(val0, val1)
开发者ID:shakamunyi,项目名称:tensorflow,代码行数:15,代码来源:partitioned_variables_test.py


示例15: get_params

  def get_params(self):
    n_first_layer = self.n_inputs + self.n_heads * self.mem_ncols
    init_min = -0.1
    init_max = 0.1
    weights = {
      "hidden": tf.get_variable(
        name="hidden_weight",
        shape=[n_first_layer, self.n_hidden],
        initializer=tf.random_uniform_initializer(init_min, init_max),
      ),
      "output": tf.get_variable(
        name="output_weight",
        shape=[self.n_hidden, self.n_outputs],
        initializer=tf.random_uniform_initializer(init_min, init_max),
      ),
    }
    biases = {
      "hidden": tf.get_variable(
        name="hidden_bias",
        shape=[self.n_hidden],
        initializer=tf.constant_initializer(0),
      ),
      "output": tf.get_variable(
        name="output_bias",
        shape=[self.n_outputs],
        initializer=tf.constant_initializer(0),
      ),
    }

    for i in xrange(self.n_heads):
      self.add_head_params(
        weights=weights,
        biases=biases,
        i=i,
        init_min=init_min,
        init_max=init_max,
        is_write=True,
      )
      self.add_head_params(
        weights=weights,
        biases=biases,
        i=i,
        init_min=init_min,
        init_max=init_max,
        is_write=False,
      )

    return weights, biases
开发者ID:yeoedward,项目名称:Neural-Turing-Machine,代码行数:48,代码来源:ntm.py


示例16: __call__

    def __call__(self, inputs, state, full_state, layer_sizes, scope=None):
        """
        Recurrence functionality here
        In contrast to tensorflow implementation, variables will be more explicit
        :param inputs: 2D Tensor with shape [batch_size x self.input_size]
        :param state: 2D Tensor with shape [batch_size x self.state_size]
        :param full_state: 2D Tensor with shape [batch_size x self.full_state_size]
        :param scope: VariableScope for the created subgraph; defaults to class name
        :return:
            h_t - Output: A 2D Tensor with shape [batch_size x self.output_size]
            h_t - New state: A 2D Tensor with shape [batch_size x self.state_size].
            (the new state is also the output in a GRU cell)
        """
        with tf.variable_scope(scope or type(self).__name__):
            h_t_prev, _ = tf.split(1, 2, state)
            x_t = inputs
            with tf.variable_scope("Update Gate"):
                W_z = tf.get_variable("W_z", [self.input_size, self._num_units],
                                      initializer=tf.random_uniform_initializer(-0.1, 0.1))
                U_z = tf.get_variable("U_z", [self.input_size, self._num_units],
                                      initializer=tf.random_uniform_initializer(-0.1, 0.1))
                b_z = tf.get_variable("b_z", [self._num_units], tf.constant_initializer(0.0))

                z_t = tf.sigmoid(tf.matmul(x_t, W_z) + tf.matmul(h_t_prev, U_z) + b_z, name="z_t")

            with tf.variable_scope("Reset Gate"):
                W_r = tf.get_variable("W_r", [self.input_size, self._num_units],
                                      initializer=tf.random_uniform_initializer(-0.1, 0.1))
                U_r = tf.get_variable("U_r", [self.input_size, self._num_units],
                                      initializer=tf.random_uniform_initializer(-0.1, 0.1))
                b_r = tf.get_variable("b_r", [self._num_units], tf.constant_initializer(1.0))

                r_t = tf.sigmoid(tf.matmul(x_t, W_r) + tf.matmul(h_t_prev, U_r) + b_r, name="r_t")

            with tf.variable_scope("Candidate"):
                # New memory content
                W = tf.get_variable("W", [self.input_size, self._num_units],
                                    initializer=tf.random_uniform_initializer(-0.1, 0.1))

                b = tf.get_variable("b", [self._num_units], tf.constant_initializer(0.0))

                summation_term = self.compute_feedback(x_t, full_state, layer_sizes)
                hc_t = tf.tanh(tf.matmul(x_t, W) + tf.mul(r_t, summation_term))

            with tf.Variable("Output"):
                h_t = tf.mul(z_t, hc_t) + tf.mul((1 - z_t), h_t_prev)

        return h_t, h_t
开发者ID:ZhangBanger,项目名称:recurrent-network-variants,代码行数:48,代码来源:gated_recurrent.py


示例17: testLSTMBasicToBlockPeeping

  def testLSTMBasicToBlockPeeping(self):
    with self.test_session(use_gpu=self._use_gpu) as sess:
      batch_size = 2
      input_size = 3
      cell_size = 4
      sequence_length = 5

      inputs = []
      for _ in range(sequence_length):
        inp = tf.convert_to_tensor(
            np.random.randn(batch_size, input_size),
            dtype=tf.float32)
        inputs.append(inp)

      initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=19890212)
      with tf.variable_scope("basic", initializer=initializer):
        cell = tf.nn.rnn_cell.LSTMCell(cell_size,
                                       use_peepholes=True,
                                       state_is_tuple=True)
        outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)

        sess.run([tf.initialize_all_variables()])
        basic_outputs = sess.run(outputs)
        basic_grads = sess.run(tf.gradients(outputs, inputs))
        basic_wgrads = sess.run(tf.gradients(outputs, tf.trainable_variables()))

      with tf.variable_scope("block", initializer=initializer):
        w = tf.get_variable("w",
                            shape=[input_size + cell_size, cell_size * 4],
                            dtype=tf.float32)
        b = tf.get_variable("b",
                            shape=[cell_size * 4],
                            dtype=tf.float32,
                            initializer=tf.zeros_initializer)

        wci = tf.get_variable("wci", shape=[cell_size], dtype=tf.float32)
        wcf = tf.get_variable("wcf", shape=[cell_size], dtype=tf.float32)
        wco = tf.get_variable("wco", shape=[cell_size], dtype=tf.float32)

        _, _, _, _, _, _, outputs = fused_lstm(
            tf.convert_to_tensor(sequence_length,
                                 dtype=tf.int64),
            inputs,
            w,
            b,
            wci=wci,
            wcf=wcf,
            wco=wco,
            cell_clip=0,
            use_peephole=True)

        sess.run([tf.initialize_all_variables()])
        block_outputs = sess.run(outputs)
        block_grads = sess.run(tf.gradients(outputs, inputs))
        block_wgrads = sess.run(tf.gradients(outputs, [w, b, wci, wcf, wco]))

      self.assertAllClose(basic_outputs, block_outputs)
      self.assertAllClose(basic_grads, block_grads)
      for basic, block in zip(basic_wgrads, block_wgrads):
        self.assertAllClose(basic, block, rtol=1e-2, atol=1e-2)
开发者ID:10imaging,项目名称:tensorflow,代码行数:60,代码来源:lstm_ops_test.py


示例18: xavier_init

def xavier_init( n_inputs, n_outputs, uniform=True ):
    if uniform:
        init_range = tf.sqrt( 6.0 / (n_inputs + n_outputs) )
        return tf.random_uniform_initializer( -init_range, init_range )
    else:
        stddev = tf.sqrt( 3.0 / (n_inputs + n_outputs) )
        return tf.truncated_normal_initializer( stddev=stddev )
开发者ID:yeoshim,项目名称:MLWTF,代码行数:7,代码来源:DNNwXavier4MNIST.py


示例19: modular_layer

def modular_layer(inputs, modules: ModulePool, parallel_count: int, context: ModularContext):
    with tf.variable_scope(None, 'modular_layer'):
        inputs = context.begin_modular(inputs)

        flat_inputs = tf.layers.flatten(inputs)
        logits = tf.layers.dense(flat_inputs, modules.module_count * parallel_count)
        logits = tf.reshape(logits, [-1, parallel_count, modules.module_count])
        ctrl = tfd.Categorical(logits)

        initializer = tf.random_uniform_initializer(maxval=modules.module_count, dtype=tf.int32)
        shape = [context.dataset_size, parallel_count]
        best_selection_persistent = tf.get_variable('best_selection', shape, tf.int32, initializer)

        if context.mode == ModularMode.E_STEP:
            # 1 x batch_size x 1
            best_selection = tf.gather(best_selection_persistent, context.data_indices)[tf.newaxis]
            # sample_size x batch_size x 1
            sampled_selection = tf.reshape(ctrl.sample(), [context.sample_size, -1, parallel_count])
            selection = tf.concat([best_selection, sampled_selection[1:]], axis=0)
            selection = tf.reshape(selection, [-1, parallel_count])
        elif context.mode == ModularMode.M_STEP:
            selection = tf.gather(best_selection_persistent, context.data_indices)
        elif context.mode == ModularMode.EVALUATION:
            selection = ctrl.mode()
        else:
            raise ValueError('Invalid modular mode')

        attrs = ModularLayerAttributes(selection, best_selection_persistent, ctrl)
        context.layers.append(attrs)

        return run_modules(inputs, selection, modules.module_fnc, modules.output_shape)
开发者ID:timediv,项目名称:libmodular,代码行数:31,代码来源:layers.py


示例20: __call__

    def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope or type(self).__name__):
            initializer = tf.random_uniform_initializer(-0.1, 0.1)

            def get_variable(name, shape):
                return tf.get_variable(name, shape, initializer=initializer, dtype=inputs.dtype)

            c_prev, y_prev = tf.split(1, 2, state)

            W_z = get_variable("W_z", [self.input_size, self._num_blocks])
            W_f = get_variable("W_f", [self.input_size, self._num_blocks])
            W_o = get_variable("W_o", [self.input_size, self._num_blocks])

            R_z = get_variable("R_z", [self._num_blocks, self._num_blocks])
            R_f = get_variable("R_f", [self._num_blocks, self._num_blocks])
            R_o = get_variable("R_o", [self._num_blocks, self._num_blocks])

            b_z = get_variable("b_z", [1, self._num_blocks])
            b_f = get_variable("b_f", [1, self._num_blocks])
            b_o = get_variable("b_o", [1, self._num_blocks])

            p_f = get_variable("p_f", [self._num_blocks])
            p_o = get_variable("p_o", [self._num_blocks])

            g = h = tf.tanh

            z = g(tf.matmul(inputs, W_z) + tf.matmul(y_prev, R_z) + b_z)
            i = 1
            f = tf.sigmoid(tf.matmul(inputs, W_f) + tf.matmul(y_prev, R_f) + tf.mul(c_prev, p_f) + b_f)
            c = tf.mul(i, z) + tf.mul(f, c_prev)
            o = tf.sigmoid(tf.matmul(inputs, W_o) + tf.matmul(y_prev, R_o) + tf.mul(c, p_o) + b_o)
            y = tf.mul(h(c), o)

            return y, tf.concat(1, [c, y])
开发者ID:ZhangBanger,项目名称:lstm_search,代码行数:34,代码来源:nig.py



注:本文中的tensorflow.random_uniform_initializer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.range函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.random_uniform函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap