• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.neg函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.neg函数的典型用法代码示例。如果您正苦于以下问题:Python neg函数的具体用法?Python neg怎么用?Python neg使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了neg函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _compile

    def _compile(self):
        """
        compile the tensorflow function "self._objective"
        """
        self.make_tf_array(self._free_vars)
        with self.tf_mode():
            f = self.build_likelihood() + self.build_prior()
            g, = tf.gradients(f, self._free_vars)

        minusF = tf.neg( f, name = 'objective' )
        minusG = tf.neg( g, name = 'grad_objective' )

        #initialize variables. I confess I don;t understand what this does - JH
        init = tf.initialize_all_variables()
        self._session.run(init)

        #build tensorflow functions for computing the likelihood and predictions
        print("compiling tensorflow function...")
        sys.stdout.flush()
        def obj(x):
            return self._session.run([minusF,minusG], feed_dict={self._free_vars: x})
        self._objective = obj
        print("done")
        sys.stdout.flush()
        self._needs_recompile = False
开发者ID:beckdaniel,项目名称:GPflow,代码行数:25,代码来源:model.py


示例2: build_energy_op

 def build_energy_op(self):
     with self.graph.as_default(), tf.device(self.device):
         # [1, nbatch]
         e_x_0 = tf.neg((self.state_pl[0, :] ** 2) / (self.scale ** 2), name='E_x_0')
         # [ndims - 1, nbatch]
         e_x_k = tf.neg((self.state_pl[1:, :] ** 2) / tf.exp(self.state_pl[0, :]), name='E_x_k')
         # [nbatch]
         self.energy_op = tf.reduce_sum(tf.add(e_x_0, e_x_k), 0, name='energy_op')
开发者ID:rueberger,项目名称:MJHMC,代码行数:8,代码来源:tf_distributions.py


示例3: test_cwise_unary_grad

    def test_cwise_unary_grad(self):
        """
        Ensure that all component-wise unary functions in the math op library yield an identical gradient to tensorflow
        """
        test_config = tf.ConfigProto(allow_soft_placement=False)
        test_config.graph_options.optimizer_options.opt_level = -1
        with tf.Session(config=test_config) as s:
            arg_np = np.random.random(100)
            grad_above = tf.constant(np.random.random(100))

            arg = tf.constant(arg_np)

            def test_grad(fcn, tf_fcn):
                ovl_out = as_tensorflow(fcn(arg))
                tf_out = tf_fcn(arg)

                ovl_grad = tf.gradients(ovl_out, arg, grad_above)[0]
                tf_grad = tf.gradients(tf_out, arg, grad_above)[0]
                ovl_out, tf_out, ovl_grad, tf_grad = s.run([ovl_out, tf_out, ovl_grad, tf_grad])

                assert np.allclose(ovl_out, tf_out)
                assert np.allclose(ovl_grad, tf_grad)

            test_grad(lambda x: neg(x), lambda x: tf.neg(x))
            test_grad(lambda x: tanh(x), lambda x: tf.tanh(x))
            test_grad(lambda x: sin(x), lambda x: tf.sin(x))
            test_grad(lambda x: cos(x), lambda x: tf.cos(x))
            test_grad(lambda x: tan(x), lambda x: tf.tan(x))
            test_grad(lambda x: sigmoid(x), lambda x: tf.sigmoid(x))
开发者ID:hewlettpackardlabs,项目名称:opveclib,代码行数:29,代码来源:test_math.py


示例4: testSideEffect

  def testSideEffect(self):
    a = tf.constant(1)
    b = tf.constant(1)
    c = tf.add(a, b)
    with tf.control_dependencies([c]):
      d = tf.constant(42)
    n = tf.neg(c)

    shared = []

    def sub(t):
      shared.append(t)
      return t

    c = subscribe.subscribe(c, lambda t: tf.py_func(sub, [t], [t.dtype]))

    with self.test_session() as sess:
      c_out = sess.run([c])
      n_out = sess.run([n])
      d_out = sess.run([d])

    self.assertEquals(n_out, [-2])
    self.assertEquals(c_out, [2])
    self.assertEquals(d_out, [42])
    self.assertEquals(shared, [2, 2, 2])
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:25,代码来源:subscribe_test.py


示例5: w

def w(input_data, cu, kappas_t_1, config):
	
	batch_size = config.batch_size
	mixture_size = config.mixture_size
	vocab_length = config.vocab_length

	# split along dim of mixture size * 3
	hat_alphas_t, hat_betas_t, hat_kappas_t = tf.split(1, 3, input_data)

	alphas_t = tf.exp(hat_alphas_t)
	betas_t = tf.exp(hat_betas_t)
	kappas_t = tf.add(kappas_t_1, tf.exp(hat_kappas_t))

	speech_length = tf.shape(cu)[1]

	u = tf.linspace(1.0, tf.cast(speech_length,tf.float32) , speech_length)
	u = tf.expand_dims(u, 0)
	u = tf.expand_dims(u, 0)
	u = tf.tile(u, [batch_size, mixture_size, 1])

	alphas_t_expanded = tf.tile(tf.expand_dims(alphas_t, -1), [1, 1, speech_length])
	betas_t_expanded = tf.tile(tf.expand_dims(betas_t, -1), [1, 1, speech_length])
	kappas_t_expanded = tf.tile(tf.expand_dims(kappas_t, -1), [1, 1, speech_length])

	calc = tf.square(tf.sub(kappas_t_expanded, u))
	calc = tf.mul(calc, tf.neg(betas_t_expanded))
	calc = tf.exp(calc)
	calc = tf.mul(calc, alphas_t_expanded)

	phi_t = tf.expand_dims(tf.reduce_sum(calc, 1), 1)

	output = tf.squeeze(tf.batch_matmul(phi_t, cu), [1])

	return output, kappas_t, phi_t
开发者ID:jarmstrong2,项目名称:adversarial_TED,代码行数:34,代码来源:windowlayer.py


示例6: get_layers

    def get_layers(self):
        input_vars, predict_op = self.get_predict_op()

        action = tf.placeholder(tf.int32, shape=(None,), name='action')
        reward = tf.placeholder(tf.float32, shape=(None,), name='reward')
        credit = tf.placeholder(tf.float32, shape=(None,), name='credit')
        label_vars = [action, reward, credit]

        if self.options.pg_normalize:
            reward_mean, reward_variance = tfutils.moments(reward)
            normalized = tf.nn.batch_normalization(reward, reward_mean, reward_variance,
                                                   scale=1.0, offset=0.0, variance_epsilon=1e-4)
        else:
            normalized = reward
        opt = tf.train.RMSPropOptimizer(learning_rate=self.options.learning_rate)
        logp = tf.neg(tf.nn.sparse_softmax_cross_entropy_with_logits(predict_op, action),
                      name='action_log_prob')
        signal = tf.mul(logp, normalized * credit, name='signal')
        signal_down = tf.reduce_sum(tf.slice(tf.reshape(signal, [-1, 10]),
                                             [0, 4], [-1, 1]),
                                    [0], name='signal_down')
        if self.options.verbosity >= 5:
            print_node = tf.Print(signal, [signal_down], message='signal_down: ', summarize=10)
            with tf.control_dependencies([print_node]):
                signal = tf.identity(signal)
        loss = tf.reduce_mean(-signal, name='loss')
        var_list = tf.trainable_variables()
        print('Trainable variables:')
        for var in var_list:
            print(var.name)
        train_op = minimize_with_grad_clip(opt, self.options.pg_grad_clip,
                                           loss, var_list=var_list)

        return input_vars, label_vars, train_op, predict_op
开发者ID:futurulus,项目名称:rl-cards,代码行数:34,代码来源:rl_learner.py


示例7: testInitializerFunction

  def testInitializerFunction(self):
    value = [[-42], [133.7]]
    shape = [2, 1]
    with self.test_session():
      initializer = lambda: tf.constant(value)
      with self.assertRaises(ValueError):
        # Checks that dtype must be specified.
        tf.Variable(initializer)

      v1 = tf.Variable(initializer, dtype=tf.float32)
      self.assertEqual(shape, v1.get_shape())
      self.assertAllClose(value, v1.initial_value.eval())
      with self.assertRaises(tf.errors.FailedPreconditionError):
        v1.eval()

      v2 = tf.Variable(tf.neg(v1.initialized_value()), dtype=tf.float32)
      self.assertEqual(v1.get_shape(), v2.get_shape())
      self.assertAllClose(np.negative(value), v2.initial_value.eval())

      # Once v2.initial_value.eval() has been called, v1 has effectively been
      # initialized.
      self.assertAllClose(value, v1.eval())

      with self.assertRaises(tf.errors.FailedPreconditionError):
        v2.eval()
      tf.initialize_all_variables().run()
      self.assertAllClose(np.negative(value), v2.eval())
开发者ID:0ruben,项目名称:tensorflow,代码行数:27,代码来源:variables_test.py


示例8: cross_entropy_loss

def cross_entropy_loss(y, yhat):
  """
  Compute the cross entropy loss in tensorflow.

  y is a one-hot tensor of shape (n_samples, n_classes) and yhat is a tensor
  of shape (n_samples, n_classes). y should be of dtype tf.int32, and yhat should
  be of dtype tf.float32.

  The functions tf.to_float, tf.reduce_sum, and tf.log might prove useful. (Many
  solutions are possible, so you may not need to use all of these functions).

  Note: You are NOT allowed to use the tensorflow built-in cross-entropy
        functions.

  Args:
    y:    tf.Tensor with shape (n_samples, n_classes). One-hot encoded.
    yhat: tf.Tensorwith shape (n_sample, n_classes). Each row encodes a
          probability distribution and should sum to 1.
  Returns:
    out:  tf.Tensor with shape (1,) (Scalar output). You need to construct this
          tensor in the problem.
  """
  ### YOUR CODE HERE
  score = tf.neg(tf.mul(tf.to_float(y), tf.log(yhat))) 
  out = tf.reduce_sum(score) 
  ### END YOUR CODE
  return out
开发者ID:ccclyu,项目名称:Lesson-Assignment-Code,代码行数:27,代码来源:q1_softmax.py


示例9: CreateRegressionNetwork

def CreateRegressionNetwork(input_d, output_d, num_hidden=20, 
              learning_rate=0.01, correlation_loss=False):
  g = tf.Graph()
  with g.as_default():
    x1 = tf.placeholder(tf.float32, shape=(None, input_d), name='x1') # Will be batch_size x input_d
    W1 = tf.Variable(tf.random_uniform([input_d,num_hidden], -1.0, 1.0), name='W1')  # input_d x num_hidden
    b1 = tf.Variable(tf.zeros([num_hidden]), name='bias1')
    y1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(x1,W1), b1), name='y1') # batch_size x num_hidden
  
    W2 = tf.Variable(tf.random_uniform([num_hidden,output_d], -1.0, 1.0), name='W2')
    b2 = tf.Variable(tf.zeros([output_d]), name='b2')
    y2 = tf.nn.bias_add(tf.matmul(y1,W2), b2, name='y2') # Will be batch_size x output_d
    ytrue = tf.placeholder(tf.float32, shape=(None, output_d), name='ytrue') # num_batch x output_d
  
    if correlation_loss:
      # Compute the correlation
      r = PearsonCorrelationTF(ytrue, y2)
      tf.scalar_summary('correlation', r)
      loss = tf.neg(r, name='loss_pearson')
    else:
      # Minimize the mean squared errors.
      loss = tf.reduce_mean(tf.square(y2 - ytrue), name='loss_euclidean')
      tf.scalar_summary('loss', loss)
  
    # https://www.quora.com/Which-optimizer-in-TensorFlow-is-best-suited-for-learning-regression
    # optimizer = tf.train.AdadeltaOptimizer(learning_rate)
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    train = optimizer.minimize(loss)
  
    # Before starting, initialize the variables.  We will 'run' this first.
    init = tf.initialize_all_variables()
    saver = tf.train.Saver()
    merged_summaries = tf.merge_all_summaries()
  return g, train, loss, init, x1, y2, ytrue, merged_summaries, saver
开发者ID:Neuromorphs,项目名称:telluride-decoding-toolbox,代码行数:35,代码来源:TFRegression.py


示例10: build_graph_tf

def build_graph_tf():
	a=tf.constant(2)
	b=tf.constant(3)
	c=tf.add(a,b)
	c2=tf.mul(a,b)
	d=tf.neg(c)
	return a,b,c,c2,d
开发者ID:21hub,项目名称:tdb,代码行数:7,代码来源:test_pure_tf.py


示例11: initializeKnn

 def initializeKnn(self):        
     if self.qualitative_outputs:            
         n_input = self.input_end_column - self.input_start_column + 1            
         self.tf_in = tf.placeholder("float", [None, n_input])
         self.tf_testing = tf.placeholder("float", [n_input])
         
         # Calculate L1 Distance
         self.distance = tf.reduce_sum(tf.abs(tf.add(self.tf_in, tf.neg(self.tf_testing))), reduction_indices=1)
         # Predict: Get min distance index (Nearest neighbor)
         self.prediction = tf.arg_min(self.distance, 0)
         
         init = tf.initialize_all_variables()
         self.sess = tf.Session()
         self.sess.run(init)
         accuracy = 0
         #output part
         for i in range(len(self.testing_data)):
             # Get nearest neighbor
             nn_index = self.sess.run(self.prediction, feed_dict={self.tf_in: self.training_data, self.tf_testing: self.testing_data[i,:]})
             # Calculate accuracy
             if np.argmax(self.training_outputs[nn_index]) == np.argmax(self.testing_outputs[i]):
                 accuracy += 1./len(self.testing_data)
         self.accuracy = accuracy
         self.epochs_for_accuracy = "N/A"
         self.best_accuracy = "N/A"
         self.epochs_for_best_accuracy = "N/A"
         self.trained = True
     else:
         raise ValueError("NOT IMPLEMENTED")
开发者ID:maximegregoire,项目名称:genius,代码行数:29,代码来源:network.py


示例12: _compile

    def _compile(self, optimizer=None):
        """
        compile the tensorflow function "self._objective"
        """
        # Make float32 hack
        float32_hack = False
        if optimizer is not None:
            if tf.float64 not in optimizer._valid_dtypes() and tf.float32 in optimizer._valid_dtypes():
                print("Using float32 hack for Tensorflow optimizers...")
                float32_hack = True

        self._free_vars = tf.Variable(self.get_free_state())
        if float32_hack:
            self._free_vars32 = tf.Variable(self.get_free_state().astype(np.float32))
            self._free_vars = tf.cast(self._free_vars32, tf.float64)

        self.make_tf_array(self._free_vars)
        with self.tf_mode():
            f = self.build_likelihood() + self.build_prior()
            g, = tf.gradients(f, self._free_vars)

        self._minusF = tf.neg( f, name = 'objective' )
        self._minusG = tf.neg( g, name = 'grad_objective' )

        # The optimiser needs to be part of the computational graph, and needs
        # to be initialised before tf.initialise_all_variables() is called.
        if optimizer is None:
            opt_step = None
        else:
            if float32_hack:
                opt_step = optimizer.minimize(tf.cast(self._minusF, tf.float32), var_list=[self._free_vars32])
            else:
                opt_step = optimizer.minimize(self._minusF, var_list=[self._free_vars])
        init = tf.initialize_all_variables()
        self._session.run(init)

        #build tensorflow functions for computing the likelihood and predictions
        print("compiling tensorflow function...")
        sys.stdout.flush()
        def obj(x):
            return self._session.run([self._minusF, self._minusG], feed_dict={self._free_vars: x})
        self._objective = obj
        print("done")
        sys.stdout.flush()
        self._needs_recompile = False

        return opt_step
开发者ID:agarbuno,项目名称:GPflow,代码行数:47,代码来源:model.py


示例13: _compile

    def _compile(self, optimizer=None):
        """
        compile the tensorflow function "self._objective"
        """
        self._graph = tf.Graph()
        self._session = tf.Session(graph=self._graph)
        with self._graph.as_default():
            self._free_vars = tf.Variable(self.get_free_state())

            self.make_tf_array(self._free_vars)
            with self.tf_mode():
                f = self.build_likelihood() + self.build_prior()
                g, = tf.gradients(f, self._free_vars)

            self._minusF = tf.neg(f, name='objective')
            self._minusG = tf.neg(g, name='grad_objective')

            # The optimiser needs to be part of the computational graph, and needs
            # to be initialised before tf.initialise_all_variables() is called.
            if optimizer is None:
                opt_step = None
            else:
                opt_step = optimizer.minimize(self._minusF,
                                              var_list=[self._free_vars])
            init = tf.initialize_all_variables()
        self._session.run(init)

        # build tensorflow functions for computing the likelihood
        if settings.verbosity.tf_compile_verb:
            print("compiling tensorflow function...")
        sys.stdout.flush()

        self._feed_dict_keys = self.get_feed_dict_keys()
        def obj(x):
            feed_dict = {self._free_vars: x}
            self.update_feed_dict(self._feed_dict_keys, feed_dict)
            f, g = self._session.run([self._minusF, self._minusG],
                                     feed_dict=feed_dict)
            return f.astype(np.float64), g.astype(np.float64)

        self._objective = obj
        if settings.verbosity.tf_compile_verb:
            print("done")
        sys.stdout.flush()
        self._needs_recompile = False

        return opt_step
开发者ID:GPflow,项目名称:GPflow,代码行数:47,代码来源:model.py


示例14: __build_graph

    def __build_graph(self):
        self.__graph = tf.Graph()
        with self.__graph.as_default(), self.__graph.device(_device_for_node):
            count_max = tf.constant([self.cooccurrence_cap], dtype=tf.float32,
                                    name='max_cooccurrence_count')
            scaling_factor = tf.constant([self.scaling_factor], dtype=tf.float32,
                                         name="scaling_factor")

            self.__focal_input = tf.placeholder(tf.int32, shape=[self.batch_size],
                                                name="focal_words")
            self.__context_input = tf.placeholder(tf.int32, shape=[self.batch_size],
                                                  name="context_words")
            self.__cooccurrence_count = tf.placeholder(tf.float32, shape=[self.batch_size],
                                                       name="cooccurrence_count")

            focal_embeddings = tf.Variable(
                tf.random_uniform([self.vocab_size, self.embedding_size], 1.0, -1.0),
                name="focal_embeddings")
            context_embeddings = tf.Variable(
                tf.random_uniform([self.vocab_size, self.embedding_size], 1.0, -1.0),
                name="context_embeddings")

            focal_biases = tf.Variable(tf.random_uniform([self.vocab_size], 1.0, -1.0),
                                       name='focal_biases')
            context_biases = tf.Variable(tf.random_uniform([self.vocab_size], 1.0, -1.0),
                                         name="context_biases")

            focal_embedding = tf.nn.embedding_lookup([focal_embeddings], self.__focal_input)
            context_embedding = tf.nn.embedding_lookup([context_embeddings], self.__context_input)
            focal_bias = tf.nn.embedding_lookup([focal_biases], self.__focal_input)
            context_bias = tf.nn.embedding_lookup([context_biases], self.__context_input)

            weighting_factor = tf.minimum(
                1.0,
                tf.pow(
                    tf.div(self.__cooccurrence_count, count_max),
                    scaling_factor))

            embedding_product = tf.reduce_sum(tf.mul(focal_embedding, context_embedding), 1)

            log_cooccurrences = tf.log(tf.to_float(self.__cooccurrence_count))

            distance_expr = tf.square(tf.add_n([
                embedding_product,
                focal_bias,
                context_bias,
                tf.neg(log_cooccurrences)]))

            single_losses = tf.mul(weighting_factor, distance_expr)
            self.__total_loss = tf.reduce_sum(single_losses)
            tf.scalar_summary("GloVe loss", self.__total_loss)
            self.__optimizer = tf.train.AdagradOptimizer(self.learning_rate).minimize(
                self.__total_loss)
            self.__summary = tf.merge_all_summaries()

            self.__combined_embeddings = tf.add(focal_embeddings, context_embeddings,
                                                name="combined_embeddings")
开发者ID:bmckown,项目名称:tensorflow-glove,代码行数:57,代码来源:tf_glove.py


示例15: activation

def activation(type, synapse):
    """Chooses the activation function to use."""
    if type == "sigmoid":
        return tf.sigmoid(synapse)
    elif type == "linear":
        return synapse
    elif type == "tanh":
        return tf.tanh(synapse)
    elif type == "radial":
        return tf.sqrt(tf.exp(tf.neg(tf.square(synapse))))
开发者ID:AidanGG,项目名称:tensorflow_tmva,代码行数:10,代码来源:model.py


示例16: gaussian_kernel

def gaussian_kernel(tensor_a, a_inputs, tensor_b, b_inputs, gamma):
    """Returns the Gaussian kernel matrix of two matrices of vectors
    element-wise."""
    cross = cross_matrices(tensor_a, a_inputs, tensor_b, b_inputs)

    kernel = tf.exp(tf.mul(tf.reduce_sum(tf.square(
        tf.sub(cross[0], cross[1])), reduction_indices=2),
        tf.neg(tf.constant(gamma, dtype=tf.float32))))

    return kernel
开发者ID:AidanGG,项目名称:tensorflow_tmva,代码行数:10,代码来源:svm.py


示例17: binary_cross_entropy

def binary_cross_entropy(prediction, target):
    """
    let o=prediction, t=target
    -(t*log(o) + (1-t)*log(1-o))
    
    Adds a small (1e-12) value to the logarithms to avoid log(0)
    """
    op1 = tf.mul(target, tf.log(prediction + 1e-12))
    op2 = tf.mul(tf.sub(1., target), tf.log(tf.sub(1., prediction) + 1e-12))
    return tf.neg(tf.add(op1, op2))
开发者ID:CellProfiling,项目名称:AutomaticProteinLocalization,代码行数:10,代码来源:tensordnn.py


示例18: gabor

def gabor(n_values=32, sigma=1.0, mean=0.0):
    x = tf.linspace(-3.0, 3.0, n_values)
    z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
                       (2.0 * tf.pow(sigma, 2.0)))) *
         (1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
    gauss_kernel = tf.matmul(
        tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values]))
    x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
    y = tf.reshape(tf.ones_like(x), [1, n_values])
    gabor_kernel = tf.mul(tf.matmul(x, y), gauss_kernel)
    return gabor_kernel
开发者ID:ondrej-tucek,项目名称:tensorflow_tutorials,代码行数:11,代码来源:1_basics.py


示例19: test_1

	def test_1(self):
		"""
		ht->tf
		"""
		a=tf.constant(2)
		b=tf.constant(3)
		c=tdb.python_op(myadd,inputs=[a,b],outputs=[tf.placeholder(tf.int32)]) # a+b
		d=tf.neg(c)
		status,result=tdb.debug([d], feed_dict=None, breakpoints=None, break_immediately=False)	
		self.assertEqual(status, tdb.FINISHED)
		self.assertEqual(result[0],-5)
开发者ID:21hub,项目名称:tdb,代码行数:11,代码来源:test_mixed.py


示例20: loss

def loss(output,target):
    output = tf.squeeze(output,squeeze_dims=[3])
    all_true_probability = output
    all_false_probability = tf.sub(tf.constant(1,dtype=tf.float32),output)
    tf.squeeze(target,squeeze_dims=[0])
    actual_probability = tf.select(target,all_true_probability,all_false_probability)
    log_probability = tf.log(actual_probability)
    total_log_prob = tf.reduce_sum(log_probability,name='log_loss')
    total_log_loss = tf.neg(total_log_prob)
    
    return total_log_loss
开发者ID:kzeznznzeztzhz,项目名称:Smoke-Recognition,代码行数:11,代码来源:conv_net_test_.py



注:本文中的tensorflow.neg函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.negative函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.name_scope函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap