• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.real函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.real函数的典型用法代码示例。如果您正苦于以下问题:Python real函数的具体用法?Python real怎么用?Python real使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了real函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: antenna_jones

    def antenna_jones(lm, stokes, alpha, ref_freq):
        """
        Compute the jones terms for each antenna.

        lm, stokes and alpha are the source variables.
        """

        # Compute the complex phase
        cplx_phase = rime.phase(lm, D.uvw, D.frequency, CT=CT)

        # Check for nans/infs in the complex phase
        phase_msg = ("Check that '1 - l**2  - m**2 >= 0' holds "
                    "for all your lm coordinates. This is required "
                    "for 'n = sqrt(1 - l**2 - m**2) - 1' "
                    "to be finite.")

        phase_real = tf.check_numerics(tf.real(cplx_phase), phase_msg)
        phase_imag = tf.check_numerics(tf.imag(cplx_phase), phase_msg)

        # Compute the square root of the brightness matrix
        # (as well as the sign)
        bsqrt, sgn_brightness = rime.b_sqrt(stokes, alpha,
            D.frequency, ref_freq, CT=CT,
            polarisation_type=polarisation_type)

        # Check for nans/infs in the bsqrt
        bsqrt_msg = ("Check that your stokes parameters "
                    "satisfy I**2 >= Q**2 + U**2 + V**2. "
                    "Montblanc performs a cholesky decomposition "
                    "of the brightness matrix and the above must "
                    "hold for this to produce valid values.")

        bsqrt_real = tf.check_numerics(tf.real(bsqrt), bsqrt_msg)
        bsqrt_imag = tf.check_numerics(tf.imag(bsqrt), bsqrt_msg)

        # Compute the direction dependent effects from the beam
        ejones = rime.e_beam(lm, D.frequency,
            D.pointing_errors, D.antenna_scaling,
            beam_sin, beam_cos,
            D.beam_extents, D.beam_freq_map, D.ebeam)

        deps = [phase_real, phase_imag, bsqrt_real, bsqrt_imag]
        deps = [] # Do nothing for now

        # Combine the brightness square root, complex phase,
        # feed rotation and beam dde's
        with tf.control_dependencies(deps):
            antenna_jones = rime.create_antenna_jones(bsqrt, cplx_phase,
                                                    feed_rotation, ejones, FT=FT)
            return antenna_jones, sgn_brightness
开发者ID:ska-sa,项目名称:montblanc,代码行数:50,代码来源:RimeSolver.py


示例2: get_mu_tensor

  def get_mu_tensor(self):
    const_fact = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
    coef = tf.Variable([-1.0, 3.0, 0.0, 1.0], dtype=tf.float32, name="cubic_solver_coef")
    coef = tf.scatter_update(coef, tf.constant(2), -(3 + const_fact) )        
    roots = tf.py_func(np.roots, [coef], Tout=tf.complex64, stateful=False)
    
    # filter out the correct root
    root_idx = tf.logical_and(tf.logical_and(tf.greater(tf.real(roots), tf.constant(0.0) ),
      tf.less(tf.real(roots), tf.constant(1.0) ) ), tf.less(tf.abs(tf.imag(roots) ), 1e-5) )
    # in case there are two duplicated roots satisfying the above condition
    root = tf.reshape(tf.gather(tf.gather(roots, tf.where(root_idx) ), tf.constant(0) ), shape=[] )
    tf.assert_equal(tf.size(root), tf.constant(1) )

    dr = self._h_max / self._h_min
    mu = tf.maximum(tf.real(root)**2, ( (tf.sqrt(dr) - 1)/(tf.sqrt(dr) + 1) )**2)    
    return mu
开发者ID:tigercut,项目名称:MobileNet,代码行数:16,代码来源:yellowfin.py


示例3: _inference

    def _inference(self, x, dropout):
        with tf.name_scope('conv1'):
            # Transform to Fourier domain
            x_2d = tf.reshape(x, [-1, 28, 28])
            x_2d = tf.complex(x_2d, 0)
            xf_2d = tf.fft2d(x_2d)
            xf = tf.reshape(xf_2d, [-1, NFEATURES])
            xf = tf.expand_dims(xf, 1)  # NSAMPLES x 1 x NFEATURES
            xf = tf.transpose(xf)  # NFEATURES x 1 x NSAMPLES
            # Filter
            Wreal = self._weight_variable([int(NFEATURES/2), self.F, 1])
            Wimg = self._weight_variable([int(NFEATURES/2), self.F, 1])
            W = tf.complex(Wreal, Wimg)
            xf = xf[:int(NFEATURES/2), :, :]
            yf = tf.matmul(W, xf)  # for each feature
            yf = tf.concat([yf, tf.conj(yf)], axis=0)
            yf = tf.transpose(yf)  # NSAMPLES x NFILTERS x NFEATURES
            yf_2d = tf.reshape(yf, [-1, 28, 28])
            # Transform back to spatial domain
            y_2d = tf.ifft2d(yf_2d)
            y_2d = tf.real(y_2d)
            y = tf.reshape(y_2d, [-1, self.F, NFEATURES])
            # Bias and non-linearity
            b = self._bias_variable([1, self.F, 1])
#            b = self._bias_variable([1, self.F, NFEATURES])
            y += b  # NSAMPLES x NFILTERS x NFEATURES
            y = tf.nn.relu(y)
        with tf.name_scope('fc1'):
            W = self._weight_variable([self.F*NFEATURES, NCLASSES])
            b = self._bias_variable([NCLASSES])
            y = tf.reshape(y, [-1, self.F*NFEATURES])
            y = tf.matmul(y, W) + b
        return y
开发者ID:hyzcn,项目名称:cnn_graph,代码行数:33,代码来源:models.py


示例4: fft_conv

    def fft_conv(self, source, filters, width, height, stride, activation='relu', name='fft_conv'):
        # This function implements a convolution using a spectrally parameterized filter with the normal
        # tf.nn.conv2d() convolution operation. This is done by transforming the spectral filter to spatial
        # via tf.batch_ifft2d()

        channels = source.get_shape().as_list()[3]

        with tf.variable_scope(name):
            init = self.random_spatial_to_spectral(channels, filters, height, width)

            if name in self.initialization:
                init = self.initialization[name]

            # Option 1: Over-Parameterize fully in the spectral domain
            # w_real = tf.Variable(init.real, dtype=tf.float32, name='real')
            # w_imag = tf.Variable(init.imag, dtype=tf.float32, name='imag')
            # w = tf.cast(tf.complex(w_real, w_imag), tf.complex64)

            # Option 2: Parameterize only 'free' parameters in the spectral domain to enforce conjugate symmetry
            #           This is very slow.
            w = self.spectral_to_variable(init)

            b = tf.Variable(tf.constant(0.1, shape=[filters]))

        # Transform the spectral parameters into a spatial filter
        # and reshape for tf.nn.conv2d
        complex_spatial_filter = tf.batch_ifft2d(w)
        spatial_filter = tf.real(complex_spatial_filter)
        spatial_filter = tf.transpose(spatial_filter, [2, 3, 0, 1])

        conv = tf.nn.conv2d(source, spatial_filter, strides=[1, stride, stride, 1], padding='SAME')
        output = tf.nn.bias_add(conv, b)
        output = tf.nn.relu(output) if activation is 'relu' else output

        return output, spatial_filter, w
开发者ID:el3ment,项目名称:spectral_representations_for_convolutional_neural_networks,代码行数:35,代码来源:main.py


示例5: __call__

    def __call__(self, inputs, state, scope=None ):
        with tf.variable_scope(scope or type(self).__name__):
            unitary_hidden_state, secondary_cell_hidden_state = tf.split(1,2,state)


            mat_in = tf.get_variable('mat_in', [self.input_size, self.state_size*2])
            mat_out = tf.get_variable('mat_out', [self.state_size*2, self.output_size])
            in_proj = tf.matmul(inputs, mat_in)            
            in_proj_c = tf.complex(tf.split(1,2,in_proj))
            out_state = modReLU( in_proj_c + 
                ulinear(unitary_hidden_state, self.state_size),
                tf.get_variable(name='bias', dtype=tf.float32, shape=tf.shape(unitary_hidden_state), initializer = tf.constant_initalizer(0.)),
                scope=scope)


        with tf.variable_scope('unitary_output'):
            '''computes data linear, unitary linear and summation -- TODO: should be complex output'''
            unitary_linear_output_real = linear.linear([tf.real(out_state), tf.imag(out_state), inputs], True, 0.0)
        

        with tf.variable_scope('scale_nonlinearity'):
            modulus = tf.complex_abs(unitary_linear_output_real)
            rescale = tf.maximum(modulus + hidden_bias, 0.) / (modulus + 1e-7)

        #transition to data shortcut connection


        #out_ = tf.matmul(tf.concat(1,[tf.real(out_state), tf.imag(out_state), ] ), mat_out) + out_bias

        #hidden state is complex but output is completely real
        return out_, out_state #complex 
开发者ID:Liubinggunzu,项目名称:tensorflow_with_latest_papers,代码行数:31,代码来源:unitary_rnn_cell_modern.py


示例6: sparse_dot_product0

def sparse_dot_product0(emb, tuples, use_matmul=True, output_type='real'):
    """
    Compute the dot product of complex vectors.
    It uses complex vectors but tensorflow does not optimize in the complex space (or there is a bug in the gradient
    propagation with complex numbers...)
    :param emb: embeddings
    :param tuples: indices at which we compute dot products
    :return: scores (dot products)
    """
    n_t = tuples.get_shape()[0].value
    rk = emb.get_shape()[1].value
    emb_sel_a = tf.gather(emb, tuples[:, 0])
    emb_sel_b = tf.gather(emb, tuples[:, 1])
    if use_matmul:
        pred_cplx = tf.squeeze(tf.batch_matmul(
                tf.reshape(emb_sel_a, [n_t, rk, 1]),
                tf.reshape(emb_sel_b, [n_t, rk, 1]), adj_x=True))
    else:
        pred_cplx = tf.reduce_sum(tf.mul(tf.conj(emb_sel_a), emb_sel_b), 1)
    if output_type == 'complex':
        return pred_cplx
    elif output_type == 'real':
        return tf.real(pred_cplx) + tf.imag(pred_cplx)
    elif output_type == 'real':
        return tf.abs(pred_cplx)
    elif output_type == 'angle':
        raise NotImplementedError('No argument or inverse-tanh function for complex number in Tensorflow')
    else:
        raise NotImplementedError()
开发者ID:Peratham,项目名称:factorix,代码行数:29,代码来源:hermitian.py


示例7: _compareMulGradient

  def _compareMulGradient(self, data):
    # data is a float matrix of shape [n, 4].  data[:, 0], data[:, 1],
    # data[:, 2], data[:, 3] are real parts of x, imaginary parts of
    # x, real parts of y and imaginary parts of y.
    with self.test_session():
      inp = tf.convert_to_tensor(data)
      xr, xi, yr, yi = tf.split(1, 4, inp)

      def vec(x):  # Reshape to a vector
        return tf.reshape(x, [-1])
      xr, xi, yr, yi = vec(xr), vec(xi), vec(yr), vec(yi)

      def cplx(r, i):  # Combine to a complex vector
        return tf.complex(r, i)
      x, y = cplx(xr, xi), cplx(yr, yi)
      # z is x times y in complex plane.
      z = x * y
      # Defines the loss function as the sum of all coefficients of z.
      loss = tf.reduce_sum(tf.real(z) + tf.imag(z))
      epsilon = 0.005
      jacob_t, jacob_n = tf.test.compute_gradient(inp,
                                                  list(data.shape),
                                                  loss,
                                                  [1],
                                                  x_init_value=data,
                                                  delta=epsilon)
    self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
开发者ID:BranYang,项目名称:tensorflow,代码行数:27,代码来源:cwise_ops_test.py


示例8: get_reconstructed_image

    def get_reconstructed_image(self, real, imag, name=None):
        """
        :param real:
        :param imag:
        :param name:
        :return:
        """
        complex_k_space_label = tf.complex(real=tf.squeeze(real), imag=tf.squeeze(imag), name=name+"_complex_k_space")
        rec_image_complex = tf.expand_dims(tf.ifft2d(complex_k_space_label), axis=1)
        
        rec_image_real = tf.reshape(tf.real(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])
        rec_image_imag = tf.reshape(tf.imag(rec_image_complex), shape=[-1, 1, self.dims_out[1], self.dims_out[2]])

        # Shifting
        top, bottom = tf.split(rec_image_real, num_or_size_splits=2, axis=2)
        top_left, top_right = tf.split(top, num_or_size_splits=2, axis=3)
        bottom_left, bottom_right = tf.split(bottom, num_or_size_splits=2, axis=3)

        top_shift = tf.concat(axis=3, values=[bottom_right, bottom_left])
        bottom_shift = tf.concat(axis=3, values=[top_right, top_left])
        shifted_image = tf.concat(axis=2, values=[top_shift, bottom_shift])


        # Shifting
        top_imag, bottom_imag = tf.split(rec_image_imag, num_or_size_splits=2, axis=2)
        top_left_imag, top_right_imag = tf.split(top_imag, num_or_size_splits=2, axis=3)
        bottom_left_imag, bottom_right_imag = tf.split(bottom_imag, num_or_size_splits=2, axis=3)

        top_shift_imag = tf.concat(axis=3, values=[bottom_right_imag, bottom_left_imag])
        bottom_shift_imag = tf.concat(axis=3, values=[top_right_imag, top_left_imag])
        shifted_image_imag = tf.concat(axis=2, values=[top_shift_imag, bottom_shift_imag])

        shifted_image_two_channels = tf.stack([shifted_image[:,0,:,:], shifted_image_imag[:,0,:,:]], axis=1)
        return shifted_image_two_channels
开发者ID:shohad25,项目名称:thesis,代码行数:34,代码来源:k_space_wgan_gl_g2_unet_Gloss.py


示例9: visualize_data_transformations

def visualize_data_transformations():
    records = glob.glob(os.path.join(utils.working_dir, 'train_fragment_*.tfrecords'))
    dataset = tf.data.TFRecordDataset(records)
    dataset = dataset.map(parse_tfrecord_raw)
    dataset = dataset.repeat()
    dataset = dataset.shuffle(buffer_size=10)
    dataset = dataset.prefetch(2)
    it = dataset.make_one_shot_iterator()

    data_x = tf.placeholder(tf.float32, shape=(utils.sample_rate * utils.audio_clip_len,))
    data_y = tf.placeholder(tf.float32, shape=(utils.timesteps,))
    stfts = tf.contrib.signal.stft(data_x, frame_length=utils.frame_length, frame_step=utils.frame_step,
                                   fft_length=4096)
    power_stfts = tf.real(stfts * tf.conj(stfts))
    magnitude_spectrograms = tf.abs(stfts)
    power_magnitude_spectrograms = tf.abs(power_stfts)

    num_spectrogram_bins = magnitude_spectrograms.shape[-1].value

    # scale frequency to mel scale and put into bins to reduce dimensionality
    lower_edge_hertz, upper_edge_hertz = 30.0, 17000.0
    num_mel_bins = utils.mel_bins_base * 4
    linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(
        num_mel_bins, num_spectrogram_bins, utils.sample_rate, lower_edge_hertz,
        upper_edge_hertz)
    mel_spectrograms = tf.tensordot(magnitude_spectrograms, linear_to_mel_weight_matrix, 1)
    mel_spectrograms.set_shape(
        magnitude_spectrograms.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))

    # log scale the mel bins to better represent human loudness perception
    log_offset = 1e-6
    log_mel_spectrograms = tf.log(mel_spectrograms + log_offset)

    # compute first order differential and concat. "It indicates a raise or reduction of the energy for each
    # frequency bin at a frame relative to its predecessor"
    first_order_diff = tf.abs(
        tf.subtract(log_mel_spectrograms, tf.manip.roll(log_mel_spectrograms, shift=1, axis=1)))
    mel_fod = tf.concat([log_mel_spectrograms, first_order_diff], 1)

    with tf.Session() as sess:
        while True:
            try:
                raw_x, raw_y = sess.run(it.get_next())
                np_stfts = sess.run(power_stfts, feed_dict={data_x: raw_x})
                np_magnitude_spectrograms = sess.run(power_magnitude_spectrograms, feed_dict={data_x: raw_x})
                np_mel_spectrograms = sess.run(mel_spectrograms, feed_dict={data_x: raw_x})
                np_log_mel_spectrograms = sess.run(log_mel_spectrograms, feed_dict={data_x: raw_x})
                np_mel_fod = sess.run(mel_fod, feed_dict={data_x: raw_x})

                utils.plot_signal_transforms(raw_x,
                                            np_stfts,
                                            np_magnitude_spectrograms,
                                            np_mel_spectrograms,
                                            np_log_mel_spectrograms,
                                            np_mel_fod)
                print('wank')

            except tf.errors.OutOfRangeError:
                break
开发者ID:nearlyeveryone,项目名称:bpm,代码行数:59,代码来源:bpm_estimator.py


示例10: bilinear_pool

    def bilinear_pool(self, x1, x2):

        p1 = tf.matmul(x1, self.C[0])
        p2 = tf.matmul(x2, self.C[1])
        pc1 = tf.complex(p1, tf.zeros_like(p1))
        pc2 = tf.complex(p2, tf.zeros_like(p2))

        conved = tf.batch_ifft(tf.batch_fft(pc1) * tf.batch_fft(pc2))
        return tf.real(conved)
开发者ID:shmsw25,项目名称:mcb-model-for-vqa,代码行数:9,代码来源:CBP.py


示例11: compute_spectrograms

    def compute_spectrograms(self, waveforms, labels=None):
        
        """Computes spectrograms for a batch of waveforms."""
        
        s = self.settings
        
        # Set final dimension of waveforms, which comes to us as `None`.
        self._set_waveforms_shape(waveforms)

        # Compute STFTs.
        waveforms = tf.cast(waveforms, tf.float32)
        stfts = tf.contrib.signal.stft(
            waveforms, self.window_size, self.hop_size,
            fft_length=self.dft_size, window_fn=self.window_fn)
        
        # Slice STFTs along frequency axis.
        stfts = stfts[..., self.freq_start_index:self.freq_end_index]
        
        # Get STFT magnitudes squared, i.e. squared spectrograms.
        grams = tf.real(stfts * tf.conj(stfts))
        # gram = tf.abs(stft) ** 2
        
        # Take natural log of squared spectrograms. Adding an epsilon
        # avoids log-of-zero errors.
        grams = tf.log(grams + s.spectrogram_log_epsilon)
        
        # Clip spectrograms if indicated.
        if s.spectrogram_clipping_enabled:
            grams = tf.clip_by_value(
                grams, s.spectrogram_clipping_min, s.spectrogram_clipping_max)
            
        # Normalize spectrograms if indicated.
        if s.spectrogram_normalization_enabled:
            grams = \
                s.spectrogram_normalization_scale_factor * grams + \
                s.spectrogram_normalization_offset
        
        # Reshape spectrograms for input into Keras neural network.
        grams = self._reshape_grams(grams)
        
        # Create features dictionary.
        features = {self.output_feature_name: grams}
        
        if labels is None:
            
            return features
        
        else:
            # have labels
        
            # Reshape labels into a single 2D column.
            labels = tf.reshape(labels, (-1, 1))
            
            return features, labels
开发者ID:HaroldMills,项目名称:Vesper,代码行数:54,代码来源:dataset_utils.py


示例12: _compareRealImag

 def _compareRealImag(self, cplx, use_gpu):
   np_real, np_imag = np.real(cplx), np.imag(cplx)
   with self.test_session(use_gpu=use_gpu) as sess:
     inx = tf.convert_to_tensor(cplx)
     tf_real = tf.real(inx)
     tf_imag = tf.imag(inx)
     tf_real_val, tf_imag_val = sess.run([tf_real, tf_imag])
   self.assertAllEqual(np_real, tf_real_val)
   self.assertAllEqual(np_imag, tf_imag_val)
   self.assertShapeEqual(np_real, tf_real)
   self.assertShapeEqual(np_imag, tf_imag)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:11,代码来源:cwise_ops_test.py


示例13: one_bp_iteration

 def one_bp_iteration(self, xe_v2c_pre_iter, H_sumC_to_V, H_sumV_to_C, xe_0):
     xe_tanh = tf.tanh(tf.to_double(tf.truediv(xe_v2c_pre_iter, [2.0])))
     xe_tanh = tf.to_float(xe_tanh)
     xe_tanh_temp = tf.sign(xe_tanh)
     xe_sum_log_img = tf.matmul(H_sumC_to_V, tf.multiply(tf.truediv((1 - xe_tanh_temp), [2.0]), [3.1415926]))
     xe_sum_log_real = tf.matmul(H_sumC_to_V, tf.log(1e-8 + tf.abs(xe_tanh)))
     xe_sum_log_complex = tf.complex(xe_sum_log_real, xe_sum_log_img)
     xe_product = tf.real(tf.exp(xe_sum_log_complex))
     xe_product_temp = tf.multiply(tf.sign(xe_product), -2e-7)
     xe_pd_modified = tf.add(xe_product, xe_product_temp)
     xe_v_sumc = tf.multiply(self.atanh(xe_pd_modified), [2.0])
     xe_c_sumv = tf.add(xe_0, tf.matmul(H_sumV_to_C, xe_v_sumc))
     return xe_v_sumc, xe_c_sumv
开发者ID:liangfei-info,项目名称:Iterative-BP-CNN,代码行数:13,代码来源:BP_Decoder.py


示例14: _checkGrad

 def _checkGrad(self, func, x, y, use_gpu=False):
     with self.test_session(use_gpu=use_gpu):
         inx = tf.convert_to_tensor(x)
         iny = tf.convert_to_tensor(y)
         # func is a forward or inverse FFT function (batched or unbatched)
         z = func(tf.complex(inx, iny))
         # loss = sum(|z|^2)
         loss = tf.reduce_sum(tf.real(z * tf.conj(z)))
         ((x_jacob_t, x_jacob_n), (y_jacob_t, y_jacob_n)) = tf.test.compute_gradient(
             [inx, iny], [list(x.shape), list(y.shape)], loss, [1], x_init_value=[x, y], delta=1e-2
         )
     self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
     self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2)
开发者ID:figpope,项目名称:tensorflow,代码行数:13,代码来源:fft_ops_test.py


示例15: c2q1d

def c2q1d(x):
    """ An internal function to convert a 1D Complex vector back to a real
    array,  which is twice the height of x.
    """
    # Input has shape [batch, r, c, 2]
    r, c = x.get_shape().as_list()[1:3]
    x1 = tf.real(x)
    x2 = tf.imag(x)
    # Stack 2 inputs of shape [batch, r, c] to [batch, r, 2, c]
    y = tf.stack([x1, x2], axis=-2)
    # Reshaping interleaves the results
    y = tf.reshape(y, [-1, 2 * r, c])

    return y
开发者ID:rjw57,项目名称:dtcwt,代码行数:14,代码来源:transform1d.py


示例16: __call__

 def __call__(self, inputs, state, scope=None ):
     zero_initer = tf.constant_initializer(0.)
     with tf.variable_scope(scope or type(self).__name__):
         mat_in = tf.get_variable('W_in', [self.input_size, self.state_size*2])
         mat_out = tf.get_variable('W_out', [self.state_size*2, self.output_size])
         in_proj = tf.matmul(inputs, mat_in)
         in_proj_c = tf.complex( in_proj[:, :self.state_size], in_proj[:, self.state_size:] )
         out_state = modrelu_c( in_proj_c + 
             ulinear_c(state,transform=self.transform),
             tf.get_variable(name='B', dtype=tf.float32, shape=[self.state_size], initializer=zero_initer)
             )
         out_bias = tf.get_variable(name='B_out', dtype=tf.float32, shape=[self.output_size], initializer = zero_initer)
         out = tf.matmul( tf.concat(1,[tf.real(out_state), tf.imag(out_state)] ), mat_out ) + out_bias
     return out, out_state
开发者ID:khaotik,项目名称:char-rnn-tensorflow,代码行数:14,代码来源:urnn.py


示例17: _compareGradient

 def _compareGradient(self, x):
     # x[:, 0] is real, x[:, 1] is imag.  We combine real and imag into
     # complex numbers. Then, we extract real and imag parts and
     # computes the squared sum. This is obviously the same as sum(real
     # * real) + sum(imag * imag). We just want to make sure the
     # gradient function is checked.
     with self.test_session():
         inx = tf.convert_to_tensor(x)
         real, imag = tf.split(1, 2, inx)
         real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
         cplx = tf.complex(real, imag)
         cplx = tf.conj(cplx)
         loss = tf.reduce_sum(tf.square(tf.real(cplx))) + tf.reduce_sum(tf.square(tf.imag(cplx)))
         epsilon = 1e-3
         jacob_t, jacob_n = tf.test.compute_gradient(inx, list(x.shape), loss, [1], x_init_value=x, delta=epsilon)
     self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
开发者ID:peace195,项目名称:tensorflow,代码行数:16,代码来源:cwise_ops_test.py


示例18: __init__

  def __init__(self, **kwargs):
    """
    """
    def _interleaveVectors(vec1, vec2):
        vec1 = tf.expand_dims(vec1, 3)
        vec2 = tf.expand_dims(vec2, 3)
        interleaved = tf.concat([vec1, vec2], 3)
        interleaved = tf.reshape(interleaved, (tf.shape(vec1)[0], tf.shape(vec1)[1], tf.shape(vec1)[2] * 2))
        return interleaved
    super(ComplexToAlternatingRealLayer, self).__init__(**kwargs)

    input_placeholder = self.input_data.get_placeholder_as_batch_major()

    real_value = tf.real(input_placeholder)
    imag_value = tf.imag(input_placeholder)
    self.output.placeholder = _interleaveVectors(real_value, imag_value)
    self.output.size_placeholder = {0: self.input_data.size_placeholder[self.input_data.time_dim_axis_excluding_batch]}
开发者ID:rwth-i6,项目名称:returnn,代码行数:17,代码来源:TFNetworkSigProcLayer.py


示例19: bilinear_pool

def bilinear_pool(x2, x1, output_size):
    """ Computes approximation of bilinear pooling with respect to x1, x2.
    For detailed explaination, see the paper (https://arxiv.org/abs/1511.06062)

    Args:
      x1: A `Tensor` with shape (batch_size, x1_size).
      x2: A `Tensor` with shape ((batch_size, x2_size).
      output_size: Output projection size. (`int`)

    Returns:
       A Tensor with shape (batch_size, output_size).
    """
    p1 = count_sketch(x1, output_size)
    p2 = count_sketch(x2, output_size)
    pc1 = tf.complex(p1, tf.zeros_like(p1))
    pc2 = tf.complex(p2, tf.zeros_like(p2))

    conved = tf.batch_ifft(tf.batch_fft(pc1) * tf.batch_fft(pc2))
    return tf.real(conved)
开发者ID:shmsw25,项目名称:mcb-model-for-vqa,代码行数:19,代码来源:count_sketch.py


示例20: compute_tensorflow_spectrogram

def compute_tensorflow_spectrogram(waveform, window_size, hop_size):
    
    waveform_ = tf.placeholder(tf.float32)
    window_fn = functools.partial(tf.signal.hann_window, periodic=True)
    stft = tf.signal.stft(
        waveform_, window_size, hop_size, window_fn=window_fn)
    gram = tf.real(stft * tf.conj(stft))
    
    with tf.Session() as sess:
        
        print('Computing TensorFlow spectrogram...')
        start_time = time.time()
        g = sess.run(gram, feed_dict={waveform_: waveform})
        end_time = time.time()
        print('Done.')
        
    report_performance(g, start_time, end_time)
        
    return g
开发者ID:HaroldMills,项目名称:Vesper,代码行数:19,代码来源:test_tensorflow_spectrogram.py



注:本文中的tensorflow.real函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.reduce_all函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.read_file函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap