• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.pow函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.pow函数的典型用法代码示例。如果您正苦于以下问题:Python pow函数的具体用法?Python pow怎么用?Python pow使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了pow函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: adam2_old

def adam2_old(params, cost_or_grads, lr=3e-4, mom1=0.9, mom2=0.999, epsilon=1e-8):
    updates = []
    if type(cost_or_grads) is not list:
        gs = tf.gradients(cost_or_grads, params)
    else:
        gs = cost_or_grads

    # all-reduce
    grads1 = [Z.allreduce_mean(g) for g in gs]
    grads2 = [Z.allreduce_mean(tf.square(g)) for g in gs]
    mom2 = tf.maximum(0., 1. - (hvd.size() * (1 - mom2)))

    t = tf.Variable(1., 'adam_t')
    lr_t = lr * tf.sqrt((1. - tf.pow(mom2, t))) / (1. - tf.pow(mom1, t))
    updates.append(t.assign_add(1))

    for p, g1, g2 in zip(params, grads1, grads2):
        mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_mg')
        if mom1 > 0:
            v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_v')
            v_t = mom1 * v + (1. - mom1) * g1
            updates.append(v.assign(v_t))
        else:
            v_t = g1
        mg_t = mom2 * mg + (1. - mom2) * g2
        delta_t = v_t / (tf.sqrt(mg_t) + epsilon)
        p_t = p - lr_t * delta_t
        updates.append(mg.assign(mg_t))
        updates.append(p.assign(p_t))
    return tf.group(*updates)
开发者ID:chinatian,项目名称:glow,代码行数:30,代码来源:optim.py


示例2: update_op

  def update_op(self, has_nan, amax):
    is_nonfinite = tf.logical_or(has_nan, tf.is_inf(amax))
    x = tf.cond(is_nonfinite,
                lambda: tf.pow(2., self.log_max),
                lambda: tf.log(amax) / tf.log(tf.constant(2.)))

    x_hat_assn = tf.assign(self.x_hat, self.beta1 * self.x_hat +
                           (1 - self.beta1) * x)
    b1_corr_assn = tf.assign(self.b1_correction,
                             self.b1_correction * self.beta1)
    with tf.control_dependencies([x_hat_assn, b1_corr_assn]):
      mu = self.x_hat.read_value() / (1 - self.b1_correction.read_value())

    slow_x_hat_assn = tf.assign(self.slow_x_hat, self.beta2 * self.slow_x_hat +
                                (1 - self.beta2) * x)
    xsquared_hat_assn = tf.assign(self.xsquared_hat, self.beta2 * self.xsquared_hat +
                                  (1 - self.beta2) * (x * x))
    b2_corr_assn = tf.assign(self.b2_correction,
                             self.b2_correction * self.beta2)
    with tf.control_dependencies([slow_x_hat_assn, xsquared_hat_assn, b2_corr_assn]):
      e_xsquared = self.xsquared_hat.read_value() / (1 - self.b2_correction.read_value())
      slow_mu = self.slow_x_hat.read_value() / (1 - self.b2_correction.read_value())

    sigma2 = e_xsquared - (slow_mu * slow_mu)
    sigma = tf.sqrt(tf.maximum(sigma2, tf.constant(0.)))

    log_cutoff = sigma * self.overflow_std_dev + mu
    log_difference = 16 - log_cutoff
    proposed_scale = tf.pow(2., log_difference)
    scale_update = tf.assign(self.scale, tf.clip_by_value(proposed_scale, self.scale_min,
                                                          self.scale_max))
    iter_update = tf.assign_add(self.iteration, 1)

    with tf.control_dependencies([scale_update]):
      return tf.identity(iter_update)
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:35,代码来源:automatic_loss_scaler.py


示例3: step_loss

    def step_loss(self, state, action, time):
        # cost:
        x_h = tf.slice(state, [0, self.x_h_field[0]], [-1, 1])
        x_t = tf.slice(state, [0, self.x_t_field[0]], [-1, self.n_t])

        # 0. smooth acceleration policy
        cost_accel = tf.square(action)
        cost_accel_d = tf.mul(tf.pow(self.gamma, time), cost_accel)

        # 1. forcing the host to move forward (until the right point of the roundabout)
        cost_prog = tf.square(self.x_goal - x_h)
        cost_prog_d = tf.mul(tf.pow(self.gamma, time), cost_prog)
        cost_prog_d = tf.squeeze(cost_prog_d, squeeze_dims=[1])

        # 2. keeping distance from vehicles ahead
        # distance to other vehicles
        x_abs_diffs = tf.abs(x_h - x_t)

        # punish only vehicles closer than "require distance"
        cost_acci = tf.nn.relu(self.require_distance - x_abs_diffs)

        # punish only w.r.t vehicles ahead
        cost_acci = tf.mul(cost_acci, tf.to_float(x_h < x_t))

        # sum over all vehicles
        cost_acci = tf.reduce_sum(cost_acci)

        # punish only when host is inside the roundabout (or very close to enter)
        cost_acci = tf.mul(cost_acci, tf.to_float(x_h > -0.5 * self.host_length))

        cost_acci_d = tf.mul(tf.pow(self.gamma, time), cost_acci)
        cost_acci_d = tf.squeeze(cost_acci_d, squeeze_dims=[1])

        return tf.transpose(tf.pack(values=[cost_accel_d, cost_prog_d, cost_acci_d], name='scan_return'))
开发者ID:bentzinir,项目名称:Buffe,代码行数:34,代码来源:roundabout.py


示例4: lppool

def lppool(inpOp, pnorm, kH, kW, dH, dW, padding):
    global pool_counter
    global parameters
    name = 'pool' + str(pool_counter)
    pool_counter += 1
    
    with tf.name_scope('lppool'):
        if pnorm == 2:
            pwr = tf.square(inpOp)
        else:
            pwr = tf.pow(inpOp, pnorm)
          
        subsamp = tf.nn.avg_pool(pwr,
                              ksize=[1, kH, kW, 1],
                              strides=[1, dH, dW, 1],
                              padding=padding,
                              name=name)
        subsamp_sum = tf.mul(subsamp, kH*kW)
        
        if pnorm == 2:
            out = tf.sqrt(subsamp_sum)
        else:
            out = tf.pow(subsamp_sum, 1/pnorm)
    
    return out
开发者ID:minsuu,项目名称:facenet,代码行数:25,代码来源:facenet.py


示例5: run_tf_simulation

    def run_tf_simulation(self, c_in, h_in, timesteps=100, dt=0.005):
        r_e = tf.Variable( tf.zeros([self.N_pairs, self.N_pairs]) )
        r_i = tf.Variable( tf.zeros([self.N_pairs, self.N_pairs]) )
        
        W_EE = tf.placeholder(tf.float32)
        W_EI = tf.placeholder(tf.float32)
        W_IE = tf.placeholder(tf.float32)
        W_II = tf.placeholder(tf.float32)
        k = tf.placeholder(tf.float32)
        n_E = tf.placeholder(tf.float32)
        n_I = tf.placeholder(tf.float32) 
        tau_E = tf.placeholder(tf.float32)
        tau_I = tf.placeholder(tf.float32)
        
        c0 = tf.constant(c_in)
        h0 = tf.constant(h_in)
                
        # Compile functions:
        I_E = c0*h0 + tf.transpose(tf.reshape(tf.reduce_sum(W_EE * r_e, [1,2]), [75,75])) \
            - tf.transpose(tf.reshape(tf.reduce_sum(W_EI * r_i, [1,2]), [75,75]))
        I_I = c0*h0 + tf.transpose(tf.reshape(tf.reduce_sum(W_IE * r_e, [1,2]), [75,75])) \
            - tf.transpose(tf.reshape(tf.reduce_sum(W_II * r_i, [1,2]), [75,75]))

        I_thresh_E = tf.maximum(0., I_E)
        I_thresh_I = tf.maximum(0., I_I)

        r_SS_E = k * tf.pow(I_thresh_E, n_E)
        r_SS_I = k * tf.pow(I_thresh_I, n_I)

        rE_out = r_e + dt*(-r_e+r_SS_E)/tau_E
        rI_out = r_i + dt*(-r_i+r_SS_I)/tau_I
        
        update_rE = tf.assign(r_e, rE_out)
        update_rI = tf.assign(r_i, rI_out)
        
        init = tf.initialize_all_variables()
        
        rE = 0
        rI = 0
        
        fd = {W_EE:self.W_EE.astype(np.float32), 
                  W_EI:self.W_EI.astype(np.float32), 
                  W_IE:self.W_IE.astype(np.float32), 
                  W_II:self.W_II.astype(np.float32),
                  k:self.k.astype(np.float32),
                  n_E:self.n_E.astype(np.float32),
                  n_I:self.n_I.astype(np.float32),
                  tau_E:self.tau_E.astype(np.float32),
                  tau_I:self.tau_I.astype(np.float32)}
        
        with tf.Session() as sess:
            sess.run(init, feed_dict=fd)
            for t in range(timesteps):
                # run the simulation
                sess.run([update_rE, update_rI], feed_dict=fd)
            # fetch the rates
            rE = sess.run([r_e], feed_dict=fd)
            rI = sess.run([r_i], feed_dict=fd)
            
        return rE, rI
开发者ID:benselby,项目名称:v1_modelling,代码行数:60,代码来源:ssn_subpop_tf.py


示例6: init_main_block

    def init_main_block(self):
        self.x_pow_cache = {}
        self.matmul_cache = {}

        self.outputs = self.b

        with tf.name_scope('linear_part') as scope:
            contribution = matmul_wrapper(self.train_x, self.w[0], self.input_type)
        self.outputs += contribution

        for i in range(2, self.order + 1):
            with tf.name_scope('order_{}'.format(i)) as scope:
                raw_dot = matmul_wrapper(self.train_x, self.w[i - 1], self.input_type)
                dot = tf.pow(raw_dot, i)
                initialization_shape = tf.shape(dot)
                for in_pows, out_pows, coef in utils.powers_and_coefs(i):
                    product_of_pows = tf.ones(initialization_shape)
                    for pow_idx in range(len(in_pows)):
                        product_of_pows *= tf.pow(
                            self.pow_matmul(i, in_pows[pow_idx]),
                            out_pows[pow_idx]
                        )
                    dot -= coef * product_of_pows
                contribution = tf.reshape(tf.reduce_sum(dot, [1]), [-1, 1])
                contribution /= float(math.factorial(i))
            self.outputs += contribution

        with tf.name_scope('loss') as scope:
            self.init_loss()

        with tf.name_scope('regularization') as scope:
            self.init_regularization()
开发者ID:lijiankou,项目名称:tffm,代码行数:32,代码来源:core.py


示例7: adam_updates

def adam_updates(params, cost_or_grads, lr=0.001, mom1=0.9, mom2=0.999):
    ''' Adam optimizer '''
    updates = []
    if type(cost_or_grads) is not list:
        grads = tf.gradients(cost_or_grads, params)
    else:
        grads = cost_or_grads
    t = tf.Variable(1., 'adam_t')
    for p, g in zip(params, grads):
        mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_mg')
        if mom1>0:
            v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_v')
            v_t = mom1*v + (1. - mom1)*g
            v_hat = v_t / (1. - tf.pow(mom1,t))
            updates.append(v.assign(v_t))
        else:
            v_hat = g
        mg_t = mom2*mg + (1. - mom2)*tf.square(g)
        mg_hat = mg_t / (1. - tf.pow(mom2,t))
        g_t = v_hat / tf.sqrt(mg_hat + 1e-8)
        p_t = p - lr * g_t
        updates.append(mg.assign(mg_t))
        updates.append(p.assign(p_t))
    updates.append(t.assign_add(1))
    return tf.group(*updates)
开发者ID:bruno-31,项目名称:ImprovedGAN-Tensorflow,代码行数:25,代码来源:nn.py


示例8: loglik_discrete

def loglik_discrete(a, b, y_, u_, output_collection=(), name=None):
    """Returns element-wise Weibull censored discrete log-likelihood.

    Unit-discretized weibull log-likelihood. loss=-loglikelihood.

    .. note::
        All input values must be of same type and shape.

    :param a:alpha. Positive nonzero `Tensor`.
    :type a: `float32` or `float64`.
    :param b:beta.  Positive nonzero `Tensor`.
    :type b: `float32` or `float64`.
    :param y_: time to event. Positive nonzero `Tensor` 
    :type y_: `float32` or `float64`.
    :param u_: indicator. 0.0 if right censored, 1.0 if uncensored `Tensor`
    :type u_: `float32` or `float64`.
    :param output_collection:name of the collection to collect result of this op.
    :type output_collection: Tuple of Strings.
    :param String name: name of the operation.
    :return: A `Tensor` of log-likelihoods of same shape as a, b, y_, u_.
    """

    with tf.name_scope(name, "weibull_loglik_discrete", [a, b, y_, u_]):
        hazard0 = tf.pow(tf.div(y_ + 1e-35, a), b)  # 1e-9 safe, really
        hazard1 = tf.pow(tf.div(y_ + 1.0, a), b)
        loglik = tf.multiply(u_, tf.log(
            tf.exp(hazard1 - hazard0) - 1.0)) - hazard1

        tf.add_to_collection(output_collection, loglik)
    return(loglik)
开发者ID:g6t,项目名称:wtte-rnn,代码行数:30,代码来源:tensorflow.py


示例9: _meshgrid

    def _meshgrid(height, width, fp):
        x_t = tf.matmul(
            tf.ones(shape=tf.stack([height, 1])),
            tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
        y_t = tf.matmul(
            tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
            tf.ones(shape=tf.stack([1, width])))

        x_t_flat = tf.reshape(x_t, (1, -1))
        y_t_flat = tf.reshape(y_t, (1, -1))

        x_t_flat_b = tf.expand_dims(x_t_flat, 0) # [1, 1, h*w]
        y_t_flat_b = tf.expand_dims(y_t_flat, 0) # [1, 1, h*w]

        num_batch = tf.shape(fp)[0]
        px = tf.expand_dims(fp[:,:,0], 2) # [n, nx*ny, 1]
        py = tf.expand_dims(fp[:,:,1], 2) # [n, nx*ny, 1]
        d = tf.sqrt(tf.pow(x_t_flat_b - px, 2.) + tf.pow(y_t_flat_b - py, 2.))
        r = tf.pow(d, 2) * tf.log(d + 1e-6) # [n, nx*ny, h*w]
        x_t_flat_g = tf.tile(x_t_flat_b, tf.stack([num_batch, 1, 1])) # [n, 1, h*w]
        y_t_flat_g = tf.tile(y_t_flat_b, tf.stack([num_batch, 1, 1])) # [n, 1, h*w]
        ones = tf.ones_like(x_t_flat_g) # [n, 1, h*w]

        grid = tf.concat([ones, x_t_flat_g, y_t_flat_g, r], 1) # [n, nx*ny+3, h*w]
        return grid
开发者ID:sarathknv,项目名称:TPS_STN-tensorflow,代码行数:25,代码来源:TPS_STN.py


示例10: rbf

def rbf(x, y=0.0, sigma=1.0, l=1.0):
    """
    Squared-exponential kernel element-wise
    k(x, y) = sigma^2 exp{ -1/(2l^2) (x_i - y_i)^2 }
    """
    return tf.pow(sigma, 2.0) * \
           tf.exp(-1.0/(2.0*tf.pow(l, 2.0)) * tf.pow(x - y , 2.0))
开发者ID:Hulalazz,项目名称:edward,代码行数:7,代码来源:util.py


示例11: disjunction_of_literals

def disjunction_of_literals(literals, label="no_label"):
    list_of_literal_tensors = [lit.tensor for lit in literals]
    literals_tensor = tf.concat(1,list_of_literal_tensors)
    if default_tnorm == "product":
        result = 1.0-tf.reduce_prod(1.0-literals_tensor, 1, keep_dims=True)
    if default_tnorm == "yager2":
        result = tf.minimum(1.0, tf.sqrt(tf.reduce_sum(tf.square(literals_tensor), 1, keep_dims=True)))
    if default_tnorm == "luk":
        print "data aggregator is lukas"
        result = tf.minimum(1.0, tf.reduce_sum(literals_tensor, 1, keep_dims=True))
        PR(result)
    if default_tnorm == "goedel":
        result = tf.reduce_max(literals_tensor, 1, keep_dims=True, name=label)
    if default_aggregator == "product":
        return tf.reduce_prod(result, keep_dims=True)
    if default_aggregator == "mean":
        print "data aggregator is mean"
        return tf.reduce_mean(result, keep_dims=True, name=label)
    if default_aggregator == "gmean":
        return tf.exp(tf.mul(tf.reduce_sum(tf.log(result), keep_dims=True),
                             tf.inv(tf.to_float(tf.size(result)))), name=label)
    if default_aggregator == "hmean":
        print "data aggregator is hmean"
        return tf.div(tf.to_float(tf.size(result)), tf.reduce_sum(tf.inv(result), keep_dims=True))
    if default_aggregator == "min":
        print "data aggregator is min"
        return tf.reduce_min(result, keep_dims=True, name=label)
    if default_aggregator == "qmean":
        print "data aggregator is qmean"
        return tf.sqrt(tf.reduce_mean(tf.square(result), keep_dims=True), name=label)
    if default_aggregator == "cmean":
        print "data aggregator is cmean"
        return tf.pow(tf.reduce_mean(tf.pow(result, 3), keep_dims=True), tf.inv(tf.to_float(3)), name=label)
开发者ID:ivanDonadello,项目名称:knowPic,代码行数:33,代码来源:logictensornetworks.py


示例12: _logloss

	def _logloss(self):
		'''
		Gaussian Log loss
		'''

		alpha = self.alpha

		fx = tf.matmul(self.design_, self.weights) - self.offset
		#fx = tf.reshape(fx, [-1, self.num_features, self.num_neurons])
		#fx = tf.reduce_sum(fx, reduction_indices = [1])- self.offset
		
		lam = self.non_lin(fx) 
		lam_ = tf.mul(self.scale,lam)+ self.eps
		
		#returns a separate loss for each neuron
		self.loss = tf.reduce_sum(tf.pow(tf.log(self.obs_) - lam_, 2), reduction_indices = [0])

		if self.reg == 'l2':	
			self.loss += alpha*tf.reduce_sum(tf.matmul(self.weights, self.weights, transpose_a = True))
			self.loss += alpha*tf.reduce_sum(tf.pow(self.scale, 2))
			self.loss += alpha*tf.reduce_sum(tf.pow(self.offset, 2))

		if self.reg == 'l1': 
			self.loss += alpha*tf.reduce_sum(self.weights + self.offset + self.scale )
		
		return self.loss
开发者ID:achristensen56,项目名称:PyGLM,代码行数:26,代码来源:group_glm.py


示例13: adam

def adam(params, cost_or_grads, alpha=3e-4, hps=None, epsilon=1e-8):
    updates = []
    if type(cost_or_grads) is not list:
        gs = tf.gradients(cost_or_grads, params)
    else:
        gs = cost_or_grads

    beta2 = 1-1./(hps.train_its*hps.polyak_epochs)

    # all-reduce
    grads = [Z.allreduce_mean(g) for g in gs]

    t = tf.Variable(1., 'adam_t')
    alpha_t = alpha * tf.sqrt((1. - tf.pow(beta2, t))) / \
        (1. - tf.pow(hps.beta1, t))
    updates.append(t.assign_add(1))

    for w, g in zip(params, grads):
        mom2 = tf.Variable(tf.zeros(w.get_shape()), w.name + '_adam_m2')
        if hps.beta1 > 0:
            mom1 = tf.Variable(tf.zeros(w.get_shape()), w.name + '_adam_m1')
            mom1_new = hps.beta1 * mom1 + (1. - hps.beta1) * g
            updates.append(mom1.assign(mom1_new))
        else:
            mom1_new = g
        m2_new = beta2 * mom2 + (1. - beta2) * tf.square(g)
        delta_t = mom1_new / (tf.sqrt(m2_new) + epsilon)
        w_new = hps.weight_decay * w - alpha_t * delta_t
        updates.append(mom2.assign(m2_new))
        updates.append(w.assign(w_new))

    # Polyak averaging
    polyak_avg_op, polyak_swap_op, ema = polyak(params, beta2)
    train_op = tf.group(polyak_avg_op, *updates)
    return train_op, polyak_swap_op, ema
开发者ID:chinatian,项目名称:glow,代码行数:35,代码来源:optim.py


示例14: to_tf

 def to_tf(self, vecs):
     prefix = join_name(self.name_prefix, self.name)
     a = tf.get_variable(join_name(prefix, 'a'), initializer=self.a)
     k = tf.get_variable(join_name(prefix, 'k'), initializer=self.k)
     x = vecs[prefix]
     pow_x_a = tf.pow(x, a)
     return pow_x_a / (tf.pow(k, a) + pow_x_a)
开发者ID:wikimedia,项目名称:wikimedia-discovery-relevanceForge,代码行数:7,代码来源:function_score.py


示例15: _build_fm

 def _build_fm(self):
     """Construct the factorization machine part for the model.
     This is a traditional 2-order FM module.
     
     Returns:
         obj: prediction score made by factorization machine.
     """
     with tf.variable_scope("fm_part") as scope:
         x = tf.SparseTensor(
             self.iterator.fm_feat_indices,
             self.iterator.fm_feat_values,
             self.iterator.fm_feat_shape,
         )
         xx = tf.SparseTensor(
             self.iterator.fm_feat_indices,
             tf.pow(self.iterator.fm_feat_values, 2),
             self.iterator.fm_feat_shape,
         )
         fm_output = 0.5 * tf.reduce_sum(
             tf.pow(tf.sparse_tensor_dense_matmul(x, self.embedding), 2)
             - tf.sparse_tensor_dense_matmul(xx, tf.pow(self.embedding, 2)),
             1,
             keep_dims=True,
         )
         return fm_output
开发者ID:David-Li-L,项目名称:recommenders,代码行数:25,代码来源:xDeepFM.py


示例16: hnet_transformation

def hnet_transformation(gt_pts, transformation_coeffcient, name):
    """

    :param gt_pts:
    :param transformation_coeffcient:
    :param name:
    :return:
    """
    with tf.variable_scope(name):
        # 首先映射原始标签点对
        transformation_coeffcient = tf.concat([transformation_coeffcient, [1.0]], axis=-1)
        H_indices = tf.constant([[0], [1], [2], [4], [5], [7], [8]])
        H_shape = tf.constant([9])
        H = tf.scatter_nd(H_indices, transformation_coeffcient, H_shape)
        H = tf.reshape(H, shape=[3, 3])

        gt_pts = tf.transpose(gt_pts)
        pts_projects = tf.matmul(H, gt_pts)

        # 求解最小二乘二阶多项式拟合参数矩阵
        Y = tf.transpose(pts_projects[1, :])
        X = tf.transpose(pts_projects[0, :])
        Y_One = tf.add(tf.subtract(Y, Y), tf.constant(1.0, tf.float32))
        Y_stack = tf.stack([tf.pow(Y, 3), tf.pow(Y, 2), Y, Y_One], axis=1)
        w = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(Y_stack), Y_stack)),
                                tf.transpose(Y_stack)),
                      tf.expand_dims(X, -1))

        # 利用二阶多项式参数求解拟合位置
        x_preds = tf.matmul(Y_stack, w)
        preds = tf.transpose(tf.stack([tf.squeeze(x_preds, -1), Y, Y_One], axis=1))
        preds_fit = tf.stack([tf.squeeze(x_preds, -1), Y], axis=1)
        x_transformation_back = tf.matmul(tf.matrix_inverse(H), preds)

    return x_transformation_back
开发者ID:dandancat123,项目名称:bilibli_notes2,代码行数:35,代码来源:lanenet_hnet_loss.py


示例17: hnet_loss

def hnet_loss(gt_pts, transformation_coeffcient, name):
    """
    
    :param gt_pts: 原始的标签点对 [x, y, 1] 
    :param transformation_coeffcient: 映射矩阵参数(6参数矩阵) [[a, b, c], [0, d, e], [0, f, 1]]
    :param name:
    :return: 
    """
    with tf.variable_scope(name):
        # 首先映射原始标签点对
        transformation_coeffcient = tf.concat([transformation_coeffcient, [1.0]], axis=-1)
        H_indices = tf.constant([[0], [1], [2], [4], [5], [7], [8]])
        H_shape = tf.constant([9])
        H = tf.scatter_nd(H_indices, transformation_coeffcient, H_shape)
        H = tf.reshape(H, shape=[3, 3])

        gt_pts = tf.transpose(gt_pts)
        pts_projects = tf.matmul(H, gt_pts)

        # 求解最小二乘二阶多项式拟合参数矩阵
        Y = tf.transpose(pts_projects[1, :])
        X = tf.transpose(pts_projects[0, :])
        Y_One = tf.add(tf.subtract(Y, Y), tf.constant(1.0, tf.float32))
        Y_stack = tf.stack([tf.pow(Y, 3), tf.pow(Y, 2), Y, Y_One], axis=1)
        w = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(Y_stack), Y_stack)),
                                tf.transpose(Y_stack)),
                      tf.expand_dims(X, -1))
        # 利用二阶多项式参数求解拟合位置并反算到原始投影空间计算损失
        x_preds = tf.matmul(Y_stack, w)
        preds = tf.transpose(tf.stack([tf.squeeze(x_preds, -1), Y, Y_One], axis=1))
        x_transformation_back = tf.matmul(tf.matrix_inverse(H), preds)

        loss = tf.reduce_mean(tf.pow(gt_pts[0, :] - x_transformation_back[0, :], 2))

    return loss
开发者ID:dandancat123,项目名称:bilibli_notes2,代码行数:35,代码来源:lanenet_hnet_loss.py


示例18: normalized_loss

 def normalized_loss(self, expected, predicted):
     predicted = np.minimum(predicted, 1-10**-15)
     predicted = np.maximum(predicted, 10**-15)
     w2 = tf.reduce_sum(tf.pow(self.w2, 2))
     w3 = tf.reduce_sum(tf.pow(self.w3, 2))
     l2 = self.params.normalization*(w2*w3)/self.params.hidden_units
     return -tf.reduce_sum(expected*tf.log(predicted)) + l2
开发者ID:klangner,项目名称:telstra,代码行数:7,代码来源:nn.py


示例19: test_0d

def test_0d():
    x1 = tf.Variable(tf.random_normal([1], dtype=tf.float32))
    x2 = tf.Variable(tf.random_normal([1], dtype=tf.float32))
    y = tf.pow(x1, tf.constant(2.0)) + tf.constant(2.0) * x1 * x2 + \
        tf.constant(3.0) * tf.pow(x2, tf.constant(2.0)) + \
        tf.constant(4.0) * x1 + tf.constant(5.0) * x2 + tf.constant(6.0)
    _test(y, [x1], val_true=np.array([[2.0]]))
    _test(y, [x2], val_true=np.array([[6.0]]))
开发者ID:appcoreopc,项目名称:edward,代码行数:8,代码来源:test_hessian.py


示例20: gabor

def gabor(n_values=32, sigma=1.0, mean=0.0):
	x = tf.linspace(-3.0, 3.0, n_values)
	z = (tf.exp(tf.negative(tf.pow(x - mean, 2.0)/ (2.0 * tf.pow(sigma, 2.0)))) * (1.0 / (sigma * tf.sqrt(2.0 * 3.145))))
	gauss_kernel = tf.matmul(tf.reshape(z, [n_values, 1]), tf.reshape(z,[1, n_values]))
	x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
	y = tf.reshape(tf.ones_like(x), [1, n_values])
	gabor_kernel = tf.multiply(tf.matmul(x ,y), gauss_kernel)
	return gabor_kernel
开发者ID:stonecoder19,项目名称:machine_learning,代码行数:8,代码来源:basics_tensor.py



注:本文中的tensorflow.pow函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.py_func函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.placeholder_with_default函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap