• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python array_ops.split函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.array_ops.split函数的典型用法代码示例。如果您正苦于以下问题:Python split函数的具体用法?Python split怎么用?Python split使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了split函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: cluster_feature_analysis

def cluster_feature_analysis(sess, user_ids):
    # Get trained parameters
    lstm_vars = [v for v in tf.all_variables() if v.name.startswith('lstm')]
    matrix_var = sess.run(lstm_vars[0])
    bias_var = sess.run(lstm_vars[1])
    
    # Split the gates
    matrix_i, matrix_j, matrix_f, matrix_o = sess.run(array_ops.split(1, 4, matrix_var))
    bias_i, bias_j, bias_f, bias_o = sess.run(array_ops.split(0, 4, bias_var))
    
    dict_i, dict_j, dict_f, dict_o = dict(), dict(), dict(), dict()
    for feature in range(len(config.feature_desc)):
        dict_i[feature] = []
        dict_j[feature] = []
        dict_f[feature] = []
        dict_o[feature] = []
    for user_id in user_ids:
        print user_id
        gates_i, gates_j, gates_f, gates_o = feature_importance(sess, user_id, matrix_i, 
                                                                matrix_j, matrix_f, matrix_o, 
                                                                bias_i, bias_j, bias_f, bias_o)
        for feature in range(len(config.feature_desc)):
            dict_i[feature].append(gates_i[feature])
            dict_j[feature].append(gates_j[feature])
            dict_f[feature].append(gates_f[feature])
            dict_o[feature].append(gates_o[feature])                        
    return dict_i, dict_j, dict_f, dict_o
开发者ID:minhitbk,项目名称:data-science,代码行数:27,代码来源:lstm_analysis.py


示例2: _ragged_split

def _ragged_split(tensor, pieces):
  """Like split for 1D tensors but allows case where len % pieces != 0.

  Args:
    tensor: T `tf.Tensor` that must be 1D.
    pieces: a positive integer specifying the number of pieces into which
      tensor should be split.

  Returns:
    list of T `tf.Tensor` of length pieces, which hold the values of
      the input tensor, in order.  The final tensor may be shorter
      than the others, which will all be of equal length.

  Raises:
    ValueError: input tensor must be 1D.
  """
  shape = tensor.shape
  if 1 != len(shape):
    raise ValueError("input tensor must be 1D")
  tensor_len = shape.dims[0].value
  chunk_size = tensor_len // pieces
  with ops.colocate_with(tensor):
    if tensor_len != (pieces * chunk_size):
      # last piece will be short
      assert pieces > 1
      last_chunk_size = tensor_len - ((pieces - 1) * chunk_size)
      assert last_chunk_size > 0
      piece_lens = [chunk_size for _ in range(pieces - 1)] + [last_chunk_size]
      return array_ops.split(tensor, piece_lens)
    else:
      return array_ops.split(tensor, pieces)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:31,代码来源:all_reduce.py


示例3: _split_batch

def _split_batch(features, labels, number_of_shards, device):
  """Split input features and labes into batches."""

  def split_dictionary(dictionary):
    """Split a dictionary into shards."""
    shards = [{} for _ in range(number_of_shards)]
    for name, tensor in six.iteritems(dictionary):
      if isinstance(tensor, sparse_tensor.SparseTensor):
        for i, shard in enumerate(
            sparse_ops.sparse_split(
                sp_input=tensor, num_split=number_of_shards, axis=0)):
          shards[i][name] = shard
      else:
        for i, shard in enumerate(array_ops.split(tensor, number_of_shards)):
          shards[i][name] = shard
    return shards

  with ops_lib.name_scope('split_inputs'):
    with ops_lib.device(device):
      if isinstance(features, dict):
        feature_shards = split_dictionary(features)
      else:
        feature_shards = array_ops.split(features, number_of_shards)

      if labels is None:
        label_shards = None
      elif isinstance(labels, dict):
        label_shards = split_dictionary(labels)
      else:
        label_shards = array_ops.split(labels, number_of_shards)
  return feature_shards, label_shards
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:31,代码来源:replicate_model_fn.py


示例4: __call__

    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            if self._state_is_tuple:
                c, h = state
            else:
                c, h = array_ops.split(1, 2, state)
            concat = _linear([inputs, h], 4 * self._num_units, True, 0.,
                             self.weights_init, self.trainable, self.restore,
                             self.reuse)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = array_ops.split(1, 4, concat)

            new_c = (c * self._inner_activation(f + self._forget_bias) +
                     self._inner_activation(i) *
                     self._activation(j))
            new_h = self._activation(new_c) * self._inner_activation(o)

            if self._state_is_tuple:
                new_state = _rnn_cell.LSTMStateTuple(new_c, new_h)
            else:
                new_state = array_ops.concat(1, [new_c, new_h])

            # Retrieve RNN Variables
            with tf.variable_scope('Linear', reuse=True):
                self.W = tf.get_variable('Matrix')
                self.b = tf.get_variable('Bias')

            return new_h, new_state
开发者ID:mixml,项目名称:tflearn,代码行数:31,代码来源:recurrent.py


示例5: call

    def call(self, inputs, state):
        sigmoid = math_ops.sigmoid
        # Parameters of gates are concatenated into one multiply for efficiency.
        if self._state_is_tuple:
            c, h = state
        else:
            c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)

        # get context from encoder outputs
        context = self._simple_attention(self._encoder_vector,
                                         self._encoder_proj, h)

        if self._linear is None:
            self._linear = _Linear([inputs, context, h], 4 * self._num_units,
                                   True)
        # i = input_gate, j = new_input, f = forget_gate, o = output_gate
        i, j, f, o = array_ops.split(
            value=self._linear([inputs, context, h]),
            num_or_size_splits=4,
            axis=1)

        new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
                 self._activation(j))
        new_h = self._activation(new_c) * sigmoid(o)

        if self._state_is_tuple:
            new_state = LSTMStateTuple(new_c, new_h)
        else:
            new_state = array_ops.concat([new_c, new_h], 1)
        return new_h, new_state
开发者ID:absorbguo,项目名称:Paddle,代码行数:30,代码来源:machine_translation.py


示例6: get_model_params

def get_model_params(variable_prefix, split_lstm_matrices=True):
  if variable_prefix:
    exclude = [ variable_prefix+"/Variable", variable_prefix+"/Variable_1" ]
    tmp = { v.op.name: v.eval() for v in tf.global_variables() if (v.op.name.startswith(variable_prefix) and v.op.name not in exclude) }
  else:
    exclude = [ "Variable", "Variable_1" ]
    tmp = { v.op.name: v.eval() for v in tf.global_variables() if v.op.name not in exclude }
  # Rename keys
  params = {name.replace("/", "-"): param for name, param in tmp.items()}
  if split_lstm_matrices:
    for name in params.keys():
      if "LSTMCell" in name:
        # i = input_gate, j = new_input, f = forget_gate, o = output_gate
        if "Matrix" in name:
          i, j, f, o = array_ops.split(1, 4, params[name])
        elif "Bias" in name:
          i, j, f, o = array_ops.split(0, 4, params[name])
        else:
          logging.error("Unknown tensor type..")
          exit(1)
        name_i = name.replace("LSTMCell", "LSTMCell-i")
        name_j = name.replace("LSTMCell", "LSTMCell-j")
        name_f = name.replace("LSTMCell", "LSTMCell-f")
        name_o = name.replace("LSTMCell", "LSTMCell-o")
        params[name_i] = i.eval()
        params[name_j] = j.eval()
        params[name_f] = f.eval()
        params[name_o] = o.eval()
        del params[name]
      elif "AttnV" in name:
        params[name] = array_ops.reshape(params[name], [ params[name].shape[0], 1 ]).eval()
      elif "AttnW" in name:
        # remove dims of size 1
        params[name] = tf.squeeze(params[name]).eval()
  return params
开发者ID:ehasler,项目名称:tensorflow,代码行数:35,代码来源:model_utils.py


示例7: testZerosCacheDoesntLeakAcrossModes

  def testZerosCacheDoesntLeakAcrossModes(self):
    with ops.Graph().as_default():
      t = random_ops.random_normal(shape=[100, 2])
      x = random_ops.random_normal(shape=[100, 4])
      dy = random_ops.random_normal(shape=[100, 4])
      with backprop.GradientTape() as gradient_tape:
        gradient_tape.watch(x)
        x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
        y1 = x1 ** 2.
        y = array_ops.concat([y1, t], axis=1)

      dx = gradient_tape.gradient(y, x, output_gradients=dy)
      with self.test_session() as sess:
        sess.run(variables.global_variables_initializer())
        sess.run(dx)

    t = random_ops.random_normal(shape=[100, 2])
    x = random_ops.random_normal(shape=[100, 4])
    dy = random_ops.random_normal(shape=[100, 4])
    with backprop.GradientTape() as gradient_tape:
      gradient_tape.watch(x)
      x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
      y1 = x1 ** 2.
      y = array_ops.concat([y1, t], axis=1)

    dx = gradient_tape.gradient(y, x, output_gradients=dy)
开发者ID:meteorcloudy,项目名称:tensorflow,代码行数:26,代码来源:backprop_test.py


示例8: testSplit

  def testSplit(self):
    for dtype in self.numeric_types:
      for axis in [0, -3]:
        self._testBinary(
            lambda x, y: array_ops.split(value=y, num_or_size_splits=3, axis=x),
            np.int32(axis),
            np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
                     dtype=dtype),
            expected=[
                np.array([[[1], [2]]], dtype=dtype),
                np.array([[[3], [4]]], dtype=dtype),
                np.array([[[5], [6]]], dtype=dtype),
            ],
            equality_test=self.ListsAreClose)

      for axis in [1, -2]:
        self._testBinary(
            lambda x, y: array_ops.split(value=y, num_or_size_splits=2, axis=x),
            np.int32(axis),
            np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
                     dtype=dtype),
            expected=[
                np.array([[[1]], [[3]], [[5]]], dtype=dtype),
                np.array([[[2]], [[4]], [[6]]], dtype=dtype),
            ],
            equality_test=self.ListsAreClose)
开发者ID:craffel,项目名称:tensorflow,代码行数:26,代码来源:binary_ops_test.py


示例9: call

  def call(self, inputs, states, training=None):
    h_tm1 = states[0]  # previous memory state
    c_tm1 = states[1]  # previous carry state

    # dropout matrices for input units
    dp_mask = self.get_dropout_mask_for_cell(inputs, training, count=4)
    # dropout matrices for recurrent units
    rec_dp_mask = self.get_recurrent_dropout_mask_for_cell(
        h_tm1, training, count=4)

    if 0 < self.dropout < 1.:
      inputs_i = inputs * dp_mask[0]
      inputs_f = inputs * dp_mask[1]
      inputs_c = inputs * dp_mask[2]
      inputs_o = inputs * dp_mask[3]
    else:
      inputs_i = inputs
      inputs_f = inputs
      inputs_c = inputs
      inputs_o = inputs

    if 0 < self.recurrent_dropout < 1.:
      h_tm1_i = h_tm1 * rec_dp_mask[0]
      h_tm1_f = h_tm1 * rec_dp_mask[1]
      h_tm1_c = h_tm1 * rec_dp_mask[2]
      h_tm1_o = h_tm1 * rec_dp_mask[3]
    else:
      h_tm1_i = h_tm1
      h_tm1_f = h_tm1
      h_tm1_c = h_tm1
      h_tm1_o = h_tm1

    (kernel_i, kernel_f,
     kernel_c, kernel_o) = array_ops.split(self.kernel, 4, axis=3)
    (recurrent_kernel_i,
     recurrent_kernel_f,
     recurrent_kernel_c,
     recurrent_kernel_o) = array_ops.split(self.recurrent_kernel, 4, axis=3)

    if self.use_bias:
      bias_i, bias_f, bias_c, bias_o = array_ops.split(self.bias, 4)
    else:
      bias_i, bias_f, bias_c, bias_o = None, None, None, None

    x_i = self.input_conv(inputs_i, kernel_i, bias_i, padding=self.padding)
    x_f = self.input_conv(inputs_f, kernel_f, bias_f, padding=self.padding)
    x_c = self.input_conv(inputs_c, kernel_c, bias_c, padding=self.padding)
    x_o = self.input_conv(inputs_o, kernel_o, bias_o, padding=self.padding)
    h_i = self.recurrent_conv(h_tm1_i, recurrent_kernel_i)
    h_f = self.recurrent_conv(h_tm1_f, recurrent_kernel_f)
    h_c = self.recurrent_conv(h_tm1_c, recurrent_kernel_c)
    h_o = self.recurrent_conv(h_tm1_o, recurrent_kernel_o)

    i = self.recurrent_activation(x_i + h_i)
    f = self.recurrent_activation(x_f + h_f)
    c = f * c_tm1 + i * self.activation(x_c + h_c)
    o = self.recurrent_activation(x_o + h_o)
    h = o * self.activation(c)
    return h, [h, c]
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:59,代码来源:convolutional_recurrent.py


示例10: _tf_to_cudnn_biases

 def _tf_to_cudnn_biases(self, *tf_biases):
   r"""Reverse the operations in StitchBiases()."""
   # b_ir is the summed bias of reset and update gate.
   b_ir, b_wh, b_rh = tf_biases
   bi, br = b_ir * 0.5, b_ir * 0.5
   b_wi, b_wr = array_ops.split(bi, 2, axis=0)
   b_ri, b_rr = array_ops.split(br, 2, axis=0)
   return b_wi, b_wr, b_wh, b_ri, b_rr, b_rh
开发者ID:keveman,项目名称:tensorflow,代码行数:8,代码来源:cudnn_rnn_ops.py


示例11: testVariableShapeFunction

  def testVariableShapeFunction(self):
    # size_splits too big
    with self.assertRaises(ValueError):
      array_ops.split([0, 1], [3, -1], axis=0)

    # Correct inference of variable dimension
    s0, s1 = array_ops.split([0, 1, 2], [2, -1], axis=0)
    assert s0.shape.as_list() == [2]
    assert s1.shape.as_list() == [1]
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:9,代码来源:split_op_test.py


示例12: _testSpecialCasesVariable

  def _testSpecialCasesVariable(self):
    inp = np.random.rand(4, 4).astype("f")

    with test_util.device(use_gpu=True):
      result = self.evaluate(array_ops.split(inp, [4], 0))
      self.assertAllEqual(result[0], inp)

      result = self.evaluate(array_ops.split(inp, [-1, 3], 0))
      self.assertAllEqual(result[0], inp[0:1, :])
      self.assertAllEqual(result[1], inp[1:4, :])
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:10,代码来源:split_op_test.py


示例13: _testSpecialCasesVariable

  def _testSpecialCasesVariable(self, use_gpu):
    inp = np.random.rand(4, 4).astype("f")

    with self.test_session(use_gpu=use_gpu) as sess:
      result = sess.run(array_ops.split(inp, [4], 0))
      self.assertAllEqual(result[0], inp)

      result = sess.run(array_ops.split(inp, [-1, 3], 0))
      self.assertAllEqual(result[0], inp[0:1, :])
      self.assertAllEqual(result[1], inp[1:4, :])
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:10,代码来源:split_op_test.py


示例14: testInvalidNumOutputs

  def testInvalidNumOutputs(self):
    with self.assertRaisesRegexp(
        Exception,
        "Value for attr 'num_split' of -1 must be at least minimum 1"):
      array_ops.split(value=[1, 2, 3], num_or_size_splits=-1)

    with self.assertRaisesRegexp(
        Exception,
        "Value for attr 'num_split' of 0 must be at least minimum 1"):
      array_ops.split(value=[1, 2, 3], num_or_size_splits=0)
开发者ID:gautam1858,项目名称:tensorflow,代码行数:10,代码来源:pywrap_tfe_test.py


示例15: _untransform_gru_canonical

  def _untransform_gru_canonical(self, transformed_weights, transformed_biases):
    """The reverse procedure of _fuse_gru_canonical().

    Args:
      transformed_weights: a list of tensors, 3 for each layer. The 1st for
        reset and update gates; the 2nd and 3rd for the new memory gate.
      transformed_biases: 5 tensors each layer. The first for reset_and_update
        gate; the next two in line for candidate gate. The last 2 are original
        tensors for reset_and_update gates, retained since cuDNN biases are not
        restorable from the fused version.

    Returns:
      Two lists of tensors for weights and biases respectively.
      There are 6 tensors per weight and per bias for each layer:
      tensor 0-2 are applied to the input from the previous layer and
      tensor 3-5 to the recurrent input. Tensor 0 and 3 are for the reset gate;
      tensor 1 and 4 the update gate; tensor 2 and 5 the new memory gate.
    """
    weights, biases = [], []
    assert 5 * len(transformed_weights) == len(transformed_biases) * 3
    for i in range(len(transformed_weights) // 3):
      base_idx = 3 * i
      num_units = self._cudnn_rnn.num_units
      input_size = self._cudnn_rnn.input_size if i == 0 else num_units
      # reset and update gate weights applied on layer inputs.
      w_i = array_ops.slice(transformed_weights[base_idx], [0, 0],
                            [input_size, 2 * num_units])
      # reset and update gate weights applied on recurrent inputs.
      w_r = array_ops.slice(transformed_weights[base_idx], [input_size, 0],
                            [num_units, 2 * num_units])
      wi_list = array_ops.split(w_i, 2, axis=1)
      wr_list = array_ops.split(w_r, 2, axis=1)

      wi_list = [_flatten_transpose(w) for w in wi_list]
      wr_list = [_flatten_transpose(w) for w in wr_list]

      # candidate gate weights
      ih, hh = [
          _flatten_transpose(w)
          for w in transformed_weights[base_idx + 1:base_idx + 3]
      ]
      weights.extend(wi_list)
      weights.append(ih)
      weights.extend(wr_list)
      weights.append(hh)

      base_idx = 5 * i
      # Recover biases for reset and update gates.
      bi_list = array_ops.split(transformed_biases[base_idx + 3], 2, axis=0)
      br_list = array_ops.split(transformed_biases[base_idx + 4], 2, axis=0)
      biases.extend(bi_list)
      biases.append(transformed_biases[base_idx + 1])
      biases.extend(br_list)
      biases.append(transformed_biases[base_idx + 2])
    return weights, biases
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:55,代码来源:cudnn_rnn_ops.py


示例16: testExplicitNum

  def testExplicitNum(self):
    size_splits = array_ops.constant([2, 2, 6], dtype=dtypes.int32)
    value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]

    # Eager and Graph modes raise different exceptions
    with self.assertRaises((errors_impl.InvalidArgumentError, ValueError)):
      array_ops.split(value, size_splits, num=4)

    r = self.evaluate(array_ops.split(value, size_splits, num=3))
    self.assertAllEqual(r[0], value[0:2])
    self.assertAllEqual(r[1], value[2:4])
    self.assertAllEqual(r[2], value[4:])
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:12,代码来源:split_op_test.py


示例17: __call__

  def __call__(self, inputs, state, scope=None):
    """Recurrent Highway Network cell (RHN)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicRHNCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      if self._state_is_tuple:
        y = state
      else:
        y = array_ops.split(1, 1, state)
      assert self._recurrence_depth > 0 and type(self._recurrence_depth) is int
      # h_transform = [None] * self._recurrence_depth
      # t = [None] * self._recurrence_depth
      # s = [None] * self._recurrence_depth
      # concat = [None] * self._recurrence_depth
      # for i in range(self._recurrence_depth):
      #   if i == 0:
      #     concat[i] = _linear([inputs, h], 2 * self._num_units, True)
      #     # h = nonlinear transform, t = transfer gate
      #     h_transform[i], t[i] = array_ops.split(1, 2, concat[i])
      #     t[i] = sigmoid(t[i] + self._transfer_bias)
      #     s[i] = self._activation(h_transform[i]) * t[i] + \
      #         (1.0 - t[i]) * _linear([inputs], 1 * self._num_units, False)
      #   if i > 0:
      #     concat[i] = _linear([h], 2 * self._num_units, True)
      #     # h = nonlinear transform, t = transfer gate
      #     h_transform[i], t[i] = array_ops.split(1, 2, concat[i])
      #     t[i] = sigmoid(t[i] + self._transfer_bias)
      #     s[i] = self._activation(h_transform[i]) * t[i] + \
      #         (1.0 - t[i]) * s[i-1]

      # ALTERNATIVE IMPLEMENTATION:
      for i in range(self._recurrence_depth):
        if i == 0:
          concat = _linear([inputs, y], 2 * self._num_units, True)
          # h = nonlinear transform, t = transfer gate
          h, t = array_ops.split(1, 2, concat)
          t = sigmoid(t + self._transfer_bias)
          s = self._activation(h) * t + \
              (1.0 - t) * _linear([inputs], 1 * self._num_units, False)
        if i > 0:
          concat = _linear([s], 2 * self._num_units, True)
          # h = nonlinear transform, t = transfer gate
          h, t = array_ops.split(1, 2, concat)
          t = sigmoid(t + self._transfer_bias)
          s = self._activation(h) * t + \
              (1.0 - t) * s
      new_y = s

      if self._state_is_tuple:
        new_state = RHNStateTuple(new_y)
      else:
        new_state = array_ops.concat(1, new_y)
      return new_y
开发者ID:julian121266,项目名称:tensorflow,代码行数:52,代码来源:rnn_cell.py


示例18: testNonexistentDimTensor

  def testNonexistentDimTensor(self):
    x = array_ops.placeholder(dtypes.int32)
    values = np.zeros([5, 30])
    splits = array_ops.placeholder(dtypes.int32)
    with self.assertRaisesRegexp(ValueError, "Cannot infer"):
      y = array_ops.split(values, splits, axis=x)

    splits = array_ops.placeholder(dtypes.int32, [3])
    y = array_ops.split(values, splits, axis=x)
    with self.test_session(use_gpu=True) as sess:
      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
                                   "must have exactly one element"):
        sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]})
开发者ID:Huoxubeiyin,项目名称:tensorflow,代码行数:13,代码来源:split_op_test.py


示例19: _padded_split

def _padded_split(tensor, pieces):
  """Like split for 1D tensors but pads-out case where len % pieces != 0.

  Args:
    tensor: T `tf.Tensor` that must be 1D.
    pieces: a positive integer specifying the number of pieces into which
      tensor should be split.

  Returns:
    list of T `tf.Tensor` of length pieces, which hold the values of
      thin input tensor, in order.  The final tensor may
      be zero-padded on the end to make its size equal to those of all
      of the other tensors.

  Raises:
    ValueError: The input tensor is not 1D.
  """
  shape = tensor.shape
  if 1 != len(shape):
    raise ValueError("input tensor must be 1D")
  tensor_len = shape.dims[0].value
  with ops.colocate_with(tensor):
    if tensor_len % pieces != 0:
      # pad to an even length
      chunk_size = 1 + tensor_len // pieces
      if pieces > tensor_len:
        # This is an edge case that should not come up in practice,
        # i.e. a different reduction algorithm would be better,
        # but we'll make it work just for completeness.
        pad_len = pieces - tensor_len
        extended_whole = array_ops.concat(
            [tensor, array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)
        parts = array_ops.split(extended_whole, pieces)
        return parts, pad_len
      elif (pieces - 1) * chunk_size >= tensor_len:
        # Another edge case of limited real interest.
        pad_len = (pieces * chunk_size) % tensor_len
        extended_whole = array_ops.concat(
            [tensor, array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)
        parts = array_ops.split(extended_whole, pieces)
        return parts, pad_len
      else:
        last_chunk_size = tensor_len - (pieces - 1) * chunk_size
        pad_len = chunk_size - last_chunk_size
        piece_lens = [chunk_size for _ in range(pieces - 1)] + [last_chunk_size]
        parts = array_ops.split(tensor, piece_lens)
        parts[-1] = array_ops.concat(
            [parts[-1], array_ops.zeros([pad_len], dtype=tensor.dtype)], 0)
        return parts, pad_len
    else:
      return array_ops.split(tensor, pieces), 0
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:51,代码来源:all_reduce.py


示例20: __call__

    def __call__(self, inputs, state, scope):
        # Parameters of gates are concatenated into one multiply for efficiency.
        c, h = array_ops.split(1, 2, state)
        self.W, self.b, concat = _linear([inputs, h], 4 * self._num_units,
                                        self.bias, self.W, self.b, self.W_init,
                                        trainable=self.trainable, scope=scope)

        # i = input_gate, j = new_input, f = forget_gate, o = output_gate
        i, j, f, o = array_ops.split(1, 4, concat)

        new_c = c * self.activation(f + self._forget_bias) + self.activation(
            i) * self.inner_activation(j)
        new_h = self.inner_activation(new_c) * self.activation(o)
        return new_h, array_ops.concat(1, [new_c, new_h])
开发者ID:XuedongLiu,项目名称:tflearn,代码行数:14,代码来源:recurrent.py



注:本文中的tensorflow.python.ops.array_ops.split函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python array_ops.squeeze函数代码示例发布时间:2022-05-27
下一篇:
Python array_ops.sparse_placeholder函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap