• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.truncated_normal函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.truncated_normal函数的典型用法代码示例。如果您正苦于以下问题:Python truncated_normal函数的具体用法?Python truncated_normal怎么用?Python truncated_normal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了truncated_normal函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testNoCSE

 def testNoCSE(self):
   with self.test_session(use_gpu=True):
     shape = [2, 3, 4]
     rnd1 = tf.truncated_normal(shape, 0.0, 1.0, tf.float32)
     rnd2 = tf.truncated_normal(shape, 0.0, 1.0, tf.float32)
     diff = rnd2 - rnd1
     self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:7,代码来源:random_ops_test.py


示例2: fc_layers

    def fc_layers(self):
        # fc1
        with tf.name_scope('fc1') as scope:
            shape = int(np.prod(self.pool5.get_shape()[1:]))
            fc1w = tf.Variable(tf.truncated_normal([shape, 4096],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc1b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
                                 trainable=True, name='biases')
            pool5_flat = tf.reshape(self.pool5, [-1, shape])
            fc1l = tf.nn.bias_add(tf.matmul(pool5_flat, fc1w), fc1b)
            self.fc1 = tf.nn.relu(fc1l)
            self.parameters += [fc1w, fc1b]

        # fc2
        with tf.name_scope('fc2') as scope:
            fc2w = tf.Variable(tf.truncated_normal([4096, 4096],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc2b = tf.Variable(tf.constant(1.0, shape=[4096], dtype=tf.float32),
                                 trainable=True, name='biases')
            fc2l = tf.nn.bias_add(tf.matmul(self.fc1, fc2w), fc2b)
            self.fc2 = tf.nn.relu(fc2l)
            self.parameters += [fc2w, fc2b]

        # fc3
        with tf.name_scope('fc3') as scope:
            fc3w = tf.Variable(tf.truncated_normal([4096, 1000],
                                                         dtype=tf.float32,
                                                         stddev=1e-1), name='weights')
            fc3b = tf.Variable(tf.constant(1.0, shape=[1000], dtype=tf.float32),
                                 trainable=True, name='biases')
            self.fc3l = tf.nn.bias_add(tf.matmul(self.fc2, fc3w), fc3b)
            self.parameters += [fc3w, fc3b]
开发者ID:muhammadzak,项目名称:TensorFlow-Book,代码行数:34,代码来源:vgg16.py


示例3: __init__

    def __init__(self, input_size, num_hidden, minibatch_size, name='lstmcell'):
        """
        Constructs an LSTM Cell

        Parameters:
        ----------
        input_size: int
            the size of the single input vector to the cell
        num_hidden: int
            the number of hidden nodes in the cell
        minibatch_size: int
            the number of the input vectors in the input matrix
        """

        LSTMCell.created_count += 1

        self.id = name + '_' + str(LSTMCell.created_count) if name == 'lstmcell' else name

        self.input_size = input_size
        self.num_hidden = num_hidden
        self.minibatch_size = minibatch_size

        self.input_weights = tf.Variable(tf.truncated_normal([self.input_size, self.num_hidden * 4], -0.1, 0.1), name=self.id + '_wi')
        self.output_weights = tf.Variable(tf.truncated_normal([self.num_hidden, self.num_hidden * 4], -0.1, 0.1), name=self.id + '_wo')
        self.bias = tf.Variable(tf.zeros([self.num_hidden * 4]), name=self.id + '_b')

        self.prev_output = tf.Variable(tf.zeros([self.minibatch_size, self.num_hidden]), trainable=False, name=self.id+'_o')
        self.prev_state = tf.Variable(tf.zeros([self.minibatch_size, self.num_hidden]), trainable=False, name=self.id+'_s')
开发者ID:Mostafa-Samir,项目名称:2048-Deep-RL,代码行数:28,代码来源:layer.py


示例4: autoencoder

def autoencoder(d,c=5,tied_weights=False):
	'''
	An autoencoder network with one hidden layer (containing the encoding),
	and sigmoid activation functions.

	Args:
		d: dimension of input.
		c: dimension of code.
		tied_weights: True if w1^T=w2
	Returns:
		Dictionary containing input placeholder Tensor and loss Variable
	Raises:
	'''
	inputs = tf.placeholder(tf.float32, shape=[None,d], name='input')

	w1 = tf.Variable(tf.truncated_normal([d,c], stddev=1.0/math.sqrt(d)))
	b1 = tf.Variable(tf.zeros([c]))

	w2 = tf.Variable(tf.truncated_normal([c,d], stddev=1.0/math.sqrt(c)))
	# TODO: Implement tied weights
	b2 = tf.Variable(tf.zeros([d]))

	code = tf.nn.sigmoid(tf.matmul(inputs, w1)+b1, name='encoding')
	reconstruction = tf.nn.sigmoid(tf.matmul(code, w2)+b2, name='reconstruction')
	loss = tf.reduce_mean(tf.square(reconstruction - inputs))
	tf.scalar_summary('loss', loss)
	return {'inputs': inputs, 'loss': loss}
开发者ID:jaisanliang,项目名称:Machine-Learning,代码行数:27,代码来源:autoencoder.py


示例5: runNN

def runNN (train_x, train_y, test_x, test_y, numHidden):
	print "NN({})".format(numHidden)
	session = tf.InteractiveSession()

	x = tf.placeholder("float", shape=[None, train_x.shape[1]])
	y_ = tf.placeholder("float", shape=[None, 2])

	W1 = tf.Variable(tf.truncated_normal([train_x.shape[1],numHidden], stddev=0.01))
	b1 = tf.Variable(tf.truncated_normal([numHidden], stddev=0.01))
	W2 = tf.Variable(tf.truncated_normal([numHidden,2], stddev=0.01))
	b2 = tf.Variable(tf.truncated_normal([2], stddev=0.01))

	z = tf.nn.relu(tf.matmul(x,W1) + b1)
	y = tf.nn.softmax(tf.matmul(z,W2) + b2)

	cross_entropy = -tf.reduce_sum(y_*tf.log(tf.clip_by_value(y,1e-10,1.0)))
	#cross_entropy = -tf.reduce_sum(y_*tf.log(y))
	train_step = tf.train.MomentumOptimizer(learning_rate=.001, momentum=0.1).minimize(cross_entropy)
	#train_step = tf.train.AdamOptimizer(learning_rate=.01).minimize(cross_entropy)

	session.run(tf.initialize_all_variables())
	for i in range(NUM_EPOCHS):
		offset = i*BATCH_SIZE % (train_x.shape[0] - BATCH_SIZE)
		train_step.run({x: train_x[offset:offset+BATCH_SIZE, :], y_: makeLabels(train_y[offset:offset+BATCH_SIZE])})
		if i % 100 == 0:
			util.showProgress(cross_entropy, x, y, y_, test_x, test_y)
	session.close()
开发者ID:jwhitehill,项目名称:MultiEnrollmentProjectWithDustin,代码行数:27,代码来源:run_tensorflow.py


示例6: create_subsample_layers

def create_subsample_layers():
    print('Defining parameters ...')

    for op in conv_ops:
        if 'fulcon' in op:
            #we don't create weights biases for fully connected layers because we need to calc the
            #fan_out of the last convolution/pooling (subsampling) layer
            #as that's gonna be fan_in for the 1st hidden layer
            break
        if 'conv' in op:
            print('\tDefining weights and biases for %s (weights:%s)'%(op,hyparams[op]['weights']))
            print('\t\tWeights:%s'%hyparams[op]['weights'])
            print('\t\tBias:%d'%hyparams[op]['weights'][3])
            weights[op]=tf.Variable(
                tf.truncated_normal(hyparams[op]['weights'],
                                    stddev=2./min(5,hyparams[op]['weights'][0])
                                    )
            )
            biases[op] = tf.Variable(tf.constant(np.random.random()*0.001,shape=[hyparams[op]['weights'][3]]))
        if 'incept' in op:
            print('\n\tDefining the weights and biases for the Incept Module')
            inc_hyparams = hyparams[op]
            for k,v in inc_hyparams.items():
                if 'conv' in k:
                    w_key = op+'_'+k
                    print('\t\tParameters for %s'%w_key)
                    print('\t\t\tWeights:%s'%inc_hyparams[k]['weights'])
                    print('\t\t\tBias:%d'%inc_hyparams[k]['weights'][3])
                    weights[w_key] = tf.Variable(
                        tf.truncated_normal(inc_hyparams[k]['weights'],
                                            stddev=2./min(5,inc_hyparams[k]['weights'][0])
                                            )
                    )
                    biases[w_key] = tf.Variable(tf.constant(np.random.random()*0.0,shape=[inc_hyparams[k]['weights'][3]]))
开发者ID:thushv89,项目名称:ConvNets,代码行数:34,代码来源:conv_net_plot.py


示例7: inference

def inference(images):
    """
    Build the MNIST model
    """

    # Hidden 1
    with tf.name_scope('hidden1'):
        weights = tf.Variable(
            tf.truncated_normal([IMAGE_PIXELS, LAYER_SIZE],
                                stddev= 1.0 / math.sqrt(float(IMAGE_PIXELS))),
        name='weights')
        biases = tf.Variable(tf.zeros([LAYER_SIZE]),
                             name='biases')
        hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
        # Add summary ops to collect data
        tf.histogram_summary('weights', weights)
        tf.histogram_summary('biases', biases)

    # Output Layer - is this correct? does this layer have any weights?
    with tf.name_scope('softmax_linear'):
        weights = tf.Variable(
            tf.truncated_normal([LAYER_SIZE, NUM_CLASSES],
                                stddev=1.0 / math.sqrt(float(LAYER_SIZE))),
            name='weights')
        biases = tf.Variable(tf.zeros([NUM_CLASSES]),
                             name='biases')
        logits = logSoftMax(tf.matmul(hidden1, weights) + biases)
        return logits
开发者ID:mdrumond,项目名称:tensorflow,代码行数:28,代码来源:simpleDnn_model.py


示例8: inference

def inference(images):
  """Build the MNIST model up to where it may be used for inference.
  Args:
    images: Images placeholder, from inputs().
    hidden1_units: Size of the first hidden layer.
    hidden2_units: Size of the second hidden layer.
  Returns:
    softmax_linear: Output tensor with the computed logits.
  """
  # Hidden 1
  with tf.name_scope('conv1'):
      images=tf.reshape(images,[-1,32,32,3])
      conv1_w=tf.Variable(tf.truncated_normal([5,5,3,8]),name='weights')
      conv1_b=tf.Variable(tf.zeros([8]),name='bias')
      conv1=tf.nn.relu(tf.nn.conv2d(images,conv1_w,[1,1,1,1],'VALID')+conv1_b)
  with tf.name_scope('conv2'):
      conv2_w = tf.Variable(tf.truncated_normal([5, 5, 8, 8]), name='weights')
      conv2_b = tf.Variable(tf.zeros([8]), name='bias')
      conv2 = tf.nn.relu(tf.nn.conv2d(conv1, conv2_w, strides=[1, 1, 1, 1],padding='VALID') + conv2_b)
      conv2_flatten=tf.reshape(conv2,[-1,24*24*8])
  with tf.name_scope('softmax_linear'):
    weights = tf.Variable(
        tf.truncated_normal([24*24*8, NUM_CLASSES]),name='weights')
    biases = tf.Variable(tf.zeros([NUM_CLASSES]),name='biases')
    logits = tf.matmul(conv2_flatten, weights) + biases
  return logits
开发者ID:yuanzhihang1,项目名称:hand3,代码行数:26,代码来源:net.py


示例9: __init__

  def __init__(self, embedding_length):
    self._embedding_length = embedding_length
    self._named_tensors = {}

    for n in xrange(10):
      # Note: the examples only have the numbers 0 through 9 as terminal nodes.
      name = 'terminal_' + str(n)
      self._named_tensors[name] = tf.Variable(
          tf.truncated_normal([embedding_length],
                              dtype=tf.float32,
                              stddev=1),
          name=name)

    self._combiner_weights = {}
    self._loom_ops = {}
    for name in calculator_pb2.CalculatorExpression.OpCode.keys():
      weights_var = tf.Variable(
          tf.truncated_normal([2 * embedding_length, embedding_length],
                              dtype=tf.float32,
                              stddev=1),
          name=name)
      self._combiner_weights[name] = weights_var
      self._loom_ops[name] = CombineLoomOp(2, embedding_length, weights_var)

    self._loom = loom.Loom(
        named_tensors=self._named_tensors,
        named_ops=self._loom_ops)

    self._output = self._loom.output_tensor(
        loom.TypeShape('float32', [embedding_length]))
开发者ID:wangbosdqd,项目名称:fold,代码行数:30,代码来源:model.py


示例10: train

def train(mnist):
    x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')
    weights1 = tf.Variable(tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))
    bias1 = tf.Variable(tf.constant(0.0, shape=[LAYER1_NODE]))
    weights2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
    bias2 = tf.Variable(tf.constant(0.0, shape=[OUTPUT_NODE]))
    layer1 = tf.nn.relu(tf.matmul(x, weights1) + bias1)
    y = tf.matmul(layer1, weights2) + bias2
    global_step = tf.Variable(0, trainable=False)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
    loss = tf.reduce_mean(cross_entropy)
    train_op=tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(loss, global_step=global_step)
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_,1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
        test_feed = {x: mnist.test.images, y_: mnist.test.labels}     

        for i in range(TRAINING_STEPS):
            if i % 1000 == 0:
                validate_acc = sess.run(accuracy, feed_dict=validate_feed)
                print("After %d training step(s), validation accuracy using average model is %g " % (i, validate_acc))
            

            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            sess.run(train_op, feed_dict={x: xs, y_: ys})


        test_acc = sess.run(accuracy, feed_dict=test_feed)
        print("After %d training step(s), test accuracy using average model is %g" % (TRAINING_STEPS, test_acc))
开发者ID:xenron,项目名称:sandbox-da-TensorFlow,代码行数:33,代码来源:114.py


示例11: __init__

    def __init__(self, optimizer, categories, num_of_terms, num_of_hidden_nodes):
        self.optimizer           = optimizer
        self.categories          = categories
        self.num_of_categories   = len(self.categories)
        self.num_of_terms        = num_of_terms
        self.num_of_hidden_nodes = num_of_hidden_nodes

        self.input_ph      = tf.placeholder(tf.float32, [None, self.num_of_terms], name="input")
        self.supervisor_ph = tf.placeholder(tf.float32, [None, self.num_of_categories], name="supervisor")

        with tf.name_scope("inference") as scope:
            weight1_var    = tf.Variable(tf.truncated_normal([self.num_of_terms, self.num_of_hidden_nodes], stddev=0.1), name="weight1")
            weight2_var    = tf.Variable(tf.truncated_normal([self.num_of_hidden_nodes, self.num_of_categories], stddev=0.1), name="weight2")
            bias1_var      = tf.Variable(tf.zeros([self.num_of_hidden_nodes]), name="bias1")
            bias2_var      = tf.Variable(tf.zeros([self.num_of_categories]), name="bias2")
            hidden_op      = tf.nn.relu(tf.matmul(self.input_ph, weight1_var) + bias1_var)
            self.output_op = tf.nn.softmax(tf.matmul(hidden_op, weight2_var) + bias2_var)

        with tf.name_scope("loss") as scope:
            cross_entropy = -tf.reduce_sum(self.supervisor_ph * tf.log(self.output_op))
            l2_sqr        = tf.nn.l2_loss(weight1_var) + tf.nn.l2_loss(weight2_var)
            lambda_2      = 0.01
            self.loss_op  = cross_entropy + lambda_2 * l2_sqr
            tf.scalar_summary("loss", self.loss_op)

        with tf.name_scope("training") as scope:
            self.training_op = self.optimizer.minimize(self.loss_op)

        with tf.name_scope("accuracy") as scope:
            correct_prediction = tf.equal(tf.argmax(self.output_op, 1), tf.argmax(self.supervisor_ph, 1))
            self.accuracy_op   = tf.reduce_mean(tf.cast(correct_prediction, "float"))
            tf.scalar_summary("accuracy", self.accuracy_op)

        self.summary_op = tf.merge_all_summaries()
开发者ID:nayutaya,项目名称:20160228-gdg-kobe,代码行数:34,代码来源:mlp.py


示例12: mnist_inference

def mnist_inference(images, hidden1_units):
    """Build the MNIST model up to where it may be used for inference.
    Args:
        images: Images placeholder.
        hidden1_units: Size of the first hidden layer.
    Returns:
        logits: Output tensor with the computed logits.
    """
    # Hidden 1
    with tf.name_scope('hidden1'):
        weights = tf.Variable(
            tf.truncated_normal([IMAGE_PIXELS, hidden1_units],
                                stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))),
            name='weights')
        biases = tf.Variable(tf.zeros([hidden1_units]),
                             name='biases')
        hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
    # Linear
    with tf.name_scope('softmax_linear'):
        weights = tf.Variable(
            tf.truncated_normal([hidden1_units, NUM_CLASSES],
                                stddev=1.0 / math.sqrt(float(hidden1_units))),
            name='weights')
        biases = tf.Variable(tf.zeros([NUM_CLASSES]),
                             name='biases')
        logits = tf.matmul(hidden1, weights) + biases

    # Uncomment the following line to see what we have constructed.
    # tf.train.write_graph(tf.get_default_graph().as_graph_def(),
    #                      "/tmp", "inference.pbtxt", as_text=True)
    return logits
开发者ID:ccortezb,项目名称:pipeline,代码行数:31,代码来源:mnist_onehlayer.py


示例13: inference

def inference(images, hidden1_units):
    """Build the MNIST model up to where it may be used for inference.
    
    Args:
      images: Images placeholder, from inputs().
      hidden1_units: Size of the first hidden layer.    
    Returns:
      softmax_linear: Output tensor with the computed logits.
    """
    # Hidden 1
    with tf.name_scope('hidden1'):
        '''
        A Variable is a modifiable tensor that lives in TensorFlow's graph of interacting operations.
        It can be used and even modified by the computation. For machine learning applications,
        one generally has the model parameters be Variables.
        '''  
        weights = tf.Variable(
            tf.truncated_normal([IMAGE_PIXELS, hidden1_units], stddev=1.0 / np.sqrt(float(IMAGE_PIXELS))), name='weights')
        biases = tf.Variable(tf.zeros([hidden1_units]), name='biases')
        hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
    # Linear
    with tf.name_scope('softmax_linear'):
        weights = tf.Variable(
            tf.truncated_normal([hidden1_units, NUM_CLASSES], stddev=1.0 / np.sqrt(float(hidden1_units))), name='weights')
        biases = tf.Variable(tf.zeros([NUM_CLASSES]), name='biases')
        logits = tf.matmul(hidden1, weights) + biases
        
        return logits
开发者ID:lidalei,项目名称:DataMining,代码行数:28,代码来源:nn_with_learning_rate.py


示例14: get_inference

def get_inference(images_ph, dropout_keep_prob_ph):
    #subtract average image
    with tf.variable_scope('centering') as scope:
        mean = tf.constant(vgg.average_image, dtype=tf.float32, name='avg_image')
        images_ph = tf.sub(images_ph, mean, name='subtract_avg')

    #get layers from vgg19
    vgg_layers = vgg.get_VGG_layers(images_ph, dropout_keep_prob_ph, train_fc_layers=True)

    #################################################
    ### Add more layers for semantic segmentation ###
    #################################################

    # convolution on top of pool4 to 21 chammenls (to make coarse predictions)
    with tf.variable_scope('conv9') as scope:
        conv9 = conv_layer(vgg_layers['pool4'], 21, 1, 'conv9')

    # convolution on top of conv7 (fc7) to 21 chammenls (to make coarse predictions)
    with tf.variable_scope('conv8') as scope:
        conv8 = conv_layer(vgg_layers['dropout2'], 21, 1, 'conv8')

    # 2x upsampling from last layer
    with tf.variable_scope('deconv1') as scope:
        shape = tf.shape(conv8)
        out_shape = tf.pack([shape[0], shape[1]*2, shape[2]*2, 21])
        weights = tf.Variable(tf.truncated_normal(mean=MEAN, stddev=0.1, shape=(4, 4, 21, 21)), name='weights')
        deconv1 = tf.nn.conv2d_transpose( value=conv8,
                                          filter=weights,
                                          output_shape=out_shape,
                                          strides=(1, 2, 2, 1),
                                          padding='SAME',
                                          name='deconv1')

        # slice 2x upsampled tensor in the last layer to fit pool4
        shape = tf.shape(conv9)
        size = tf.pack([-1, shape[1], shape[2], -1])
        deconv1 = tf.slice(deconv1, begin=[0,0,0,0], size=size, name="deconv1_slice")

    # combine preductions from last layer and pool4
    with tf.variable_scope('combined_pred') as scope:
        combined_pred = tf.add(deconv1, conv9, name="combined_pred")

    # 16x upsampling
    with tf.variable_scope('deconv2') as scope:
        shape = tf.shape(combined_pred)
        out_shape = tf.pack([shape[0], shape[1]*16, shape[2]*16, 21])
        weights = tf.Variable(tf.truncated_normal(mean=MEAN, stddev=0.1, shape=(32, 32, 21, 21)), name='weights')
        deconv2 = tf.nn.conv2d_transpose(value=combined_pred,
                                          filter=weights,
                                          output_shape=out_shape,
                                          strides=(1, 16, 16, 1),
                                          padding='SAME',
                                          name='deconv2')

        # slice upsampled tensor to original shape
        orig_shape = tf.shape(images_ph)
        size = tf.pack([-1, orig_shape[1], orig_shape[2], -1])
        logits = tf.slice(deconv2, begin=[0,0,0,0], size=size, name='logits')

    return logits
开发者ID:dmancevo,项目名称:semantic_segmentation,代码行数:60,代码来源:sem_segm.py


示例15: create_new_conv_layer

def create_new_conv_layer(input_data, num_input_channels, num_filters, filter_shape, pool_shape, name):
    # setup the filter input shape for tf.nn.conv_2d
    conv_filt_shape = [filter_shape[0], filter_shape[1], num_input_channels, num_filters]

    # initialise weights and bias for the filter
    weights = tf.Variable(tf.truncated_normal(conv_filt_shape, stddev=0.03), name=name+'_W')
    bias = tf.Variable(tf.truncated_normal([num_filters]), name=name+'_b')

    # setup the convolutional layer operation
    out_layer = tf.nn.conv2d(input_data, weights, [1, 1, 1, 1], padding='SAME')

    # add the bias
    out_layer += bias

    # apply a ReLU non-linear activation
    out_layer = tf.nn.relu(out_layer)

    # now perform max pooling
    # ksize is the argument which defines the size of the max pooling window (i.e. the area over which the maximum is
    # calculated).  It must be 4D to match the convolution - in this case, for each image we want to use a 2 x 2 area
    # applied to each channel
    ksize = [1, pool_shape[0], pool_shape[1], 1]
    # strides defines how the max pooling area moves through the image - a stride of 2 in the x direction will lead to
    # max pooling areas starting at x=0, x=2, x=4 etc. through your image.  If the stride is 1, we will get max pooling
    # overlapping previous max pooling areas (and no reduction in the number of parameters).  In this case, we want
    # to do strides of 2 in the x and y directions.
    strides = [1, 2, 2, 1]
    out_layer = tf.nn.max_pool(out_layer, ksize=ksize, strides=strides, padding='SAME')

    return out_layer
开发者ID:greatabel,项目名称:MachineLearning,代码行数:30,代码来源:i0convolutional_neural_network_tutorial.py


示例16: init_model

    def init_model(self):
        # input layer (8 x 8)
        self.x = tf.placeholder(tf.float32, [None, 8, 8])

        # flatten (64)
        x_flat = tf.reshape(self.x, [-1, 64])

        # fully connected layer (32)
        W_fc1 = tf.Variable(tf.truncated_normal([64, 64], stddev=0.01))
        b_fc1 = tf.Variable(tf.zeros([64]))
        h_fc1 = tf.nn.relu(tf.matmul(x_flat, W_fc1) + b_fc1)

        # output layer (n_actions)
        W_out = tf.Variable(tf.truncated_normal([64, self.n_actions], stddev=0.01))
        b_out = tf.Variable(tf.zeros([self.n_actions]))
        self.y = tf.matmul(h_fc1, W_out) + b_out

        # loss function
        self.y_ = tf.placeholder(tf.float32, [None, self.n_actions])
        self.loss = tf.reduce_mean(tf.square(self.y_ - self.y))

        # train operation
        optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
        self.training = optimizer.minimize(self.loss)

        # saver
        self.saver = tf.train.Saver()

        # session
        self.sess = tf.Session()
        self.sess.run(tf.global_variables_initializer())
开发者ID:algolab-inc,项目名称:tf-dqn-simple,代码行数:31,代码来源:dqn_agent.py


示例17: _intermediate_controller

    def _intermediate_controller(self, register_output):
        """
        Trainable model with a, b, c, and f outputs.
        """
        inputs = tf.transpose(self._interpret_register(register_output),
            name="interpreted_registers")

        with tf.name_scope("h1"):
            w = tf.Variable(tf.truncated_normal([self.R_num_registers,
                self.num_h1_units],
                stddev=1.0 / math.sqrt(float(self.R_num_registers))),
                name="w")
            b = tf.Variable(tf.zeros([self.num_h1_units]),
                name="b")
            h1 = tf.nn.relu(tf.matmul(inputs, w) + b, name="h1")

        with tf.name_scope("a"):
            w = tf.Variable(tf.truncated_normal(
                [self.num_h1_units,
                (self.R_num_registers + self.Q_num_modules)],
                stddev=1.0 / math.sqrt(float(self.num_h1_units))),
                name="w")
            b = tf.Variable(tf.zeros([(self.R_num_registers + \
                self.Q_num_modules)]),
                name="b")
            logit_a = tf.matmul(h1, w) + b

        with tf.name_scope("b"):
            w = tf.Variable(tf.truncated_normal(
                [self.num_h1_units,
                (self.R_num_registers + self.Q_num_modules)],
                stddev=1.0 / math.sqrt(float(self.num_h1_units))),
                name="w")
            b = tf.Variable(tf.zeros([(self.R_num_registers + \
                self.Q_num_modules)]),
                name="b")
            logit_b = tf.matmul(h1, w) + b

        with tf.name_scope("c"):
            w = tf.Variable(tf.truncated_normal(
                [self.num_h1_units,
                (self.R_num_registers + self.Q_num_modules)],
                stddev=1.0 / math.sqrt(float(self.num_h1_units))),
                name="w")
            b = tf.Variable(tf.zeros([(self.R_num_registers + \
                self.Q_num_modules)]),
                name="b")
            logit_c = tf.matmul(h1, w) + b

        with tf.name_scope("f"):
            w = tf.Variable(tf.truncated_normal(
                [self.num_h1_units, 1],
                stddev=1.0 / math.sqrt(float(self.num_h1_units))),
                name="w")
            b = tf.Variable(tf.zeros([1]),
                name="b")
            logit_x = tf.matmul(h1, w) + b
            f = tf.nn.sigmoid(logit_x)

        return logit_a, logit_b, logit_c, f
开发者ID:mjvogelsong,项目名称:tf-neural-ram,代码行数:60,代码来源:model.py


示例18: __init__

    def __init__(self, ob_dim):
        self.sess = tf.Session()
        self.sess.__enter__()

        self.sy_obs = tf.placeholder(tf.float32, [None,ob_dim])
        self.sy_vals = tf.placeholder(tf.float32, [None])

        beta = 0.0000
        num_h = 5
        w1 = tf.Variable(tf.truncated_normal([ob_dim,num_h], stddev=1.0/ob_dim**0.5))
        b1 = tf.Variable(tf.zeros(num_h))
        h1 = tf.nn.relu(tf.matmul(self.sy_obs,w1)+b1)

        w2 = tf.Variable(tf.truncated_normal([num_h,num_h], stddev=1.0/num_h**0.5))
        b2 = tf.Variable(tf.zeros(num_h))
        h2 = tf.nn.relu(tf.matmul(h1,w2)+b2)
        #w2 = tf.Variable(tf.truncated_normal([num_h,1], stddev=1.0/num_h**0.5))
        #b2 = tf.Variable(tf.zeros(1))
        #self.pred_vals = tf.reshape(tf.matmul(h1,w2)+b2,shape=[-1])

        w3 = tf.Variable(tf.truncated_normal([num_h,1], stddev=1.0/num_h**0.5))
        b3 = tf.Variable(tf.zeros(1))
        self.pred_vals = tf.reshape(tf.matmul(h2,w3)+b3,shape=[-1])

        loss = tf.reduce_mean(self.sy_vals-self.pred_vals)
        reg = tf.nn.l2_loss(w1)+tf.nn.l2_loss(w2)
        loss = tf.reduce_mean(loss+beta*reg)
        self.train_step = tf.train.AdamOptimizer().minimize(loss)

        init = tf.global_variables_initializer()
        self.sess.run(init)
开发者ID:jaisanliang,项目名称:Machine-Learning,代码行数:31,代码来源:main.py


示例19: drawGraph

    def drawGraph(self, n_row, n_latent, n_col):
        with tf.name_scope('matDecomp'):
            self._p = tf.placeholder(tf.float32, shape=[None, n_col])
            self._c = tf.placeholder(tf.float32, shape=[None, n_col])
            self._lambda = tf.placeholder(tf.float32)
            self._index = tf.placeholder(tf.float32, shape=[None, n_row])
            self._A = tf.Variable(tf.truncated_normal([n_row, n_latent]))
            self._B = tf.Variable(tf.truncated_normal([n_latent, n_col]))
            self._h = tf.matmul(tf.matmul(self._index, self._A), self._B) 
            
            weighted_loss = tf.reduce_mean(tf.mul(self._c, tf.squared_difference(self._p, self._h)))
            self._weighted_loss = weighted_loss
            l2_A = tf.reduce_sum(tf.square(self._A))
            l2_B = tf.reduce_sum(tf.square(self._B))
            n_w = tf.constant(n_row * n_latent + n_latent * n_col, tf.float32)
            l2 = tf.truediv(tf.add(l2_A, l2_B), n_w)
            reg_term = tf.mul(self._lambda, l2)
            self._loss = tf.add(weighted_loss, reg_term)
            
            self._mask = tf.placeholder(tf.float32, shape=[n_row, n_col])
            one = tf.constant(1, tf.float32)
            pred = tf.cast(tf.greater_equal(tf.matmul(self._A, self._B), one), tf.float32)
            cor = tf.mul(tf.cast(tf.equal(pred, self._p), tf.float32), self._c)
            self._vali_err = tf.reduce_sum(tf.mul(cor, self._mask))

            self._saver = tf.train.Saver([v for v in tf.all_variables() if v.name.find('matDecomp') != -1])
            tf.scalar_summary('training_weighted_loss_l2', self._loss)
            tf.scalar_summary('validation_weighted_loss', self._weighted_loss)
            merged = tf.merge_all_summaries()
开发者ID:cning,项目名称:ehc,代码行数:29,代码来源:model.py


示例20: encode

def encode(images,texts):

  #img_hidden
  with tf.name_scope('img_hidden'):
    weights = tf.Variable(
        tf.truncated_normal([const.INP_IMAGE_DIM, const.HIDDEN_IMG_DIM],
                            stddev=1.0 / math.sqrt(float(const.HIDDEN_IMG_DIM))),
        name='weights')

    biases = tf.Variable(tf.zeros([const.HIDDEN_IMG_DIM]),
                         name='biases')
    hidden_img = tf.nn.relu(tf.matmul(images, weights) + biases)
  # Text_Hidden
  with tf.name_scope('txt_hidden'):


    weights = tf.Variable(
         tf.truncated_normal([const.INP_TXT_DIM, const.HIDDEN_TXT_DIM],
                             stddev=1.0 / math.sqrt(float(const.HIDDEN_TXT_DIM))),
         name='weights')
    biases = tf.Variable(tf.zeros([const.HIDDEN_TXT_DIM]),
                         name='biases')
    hidden_txt = tf.nn.relu(tf.matmul(texts, weights) + biases)
  # Linear
  with tf.name_scope('join_layer'):

    hidden_con = tf.concat([hidden_img,hidden_txt],axis=-1,name="hidden_con");
    weights = tf.Variable(
        tf.truncated_normal([const.HIDDEN_CON_DIM, const.SHARED_DIM],
                            stddev=1.0 / math.sqrt(float(const.SHARED_DIM))),
        name='weights')
    biases = tf.Variable(tf.zeros([const.SHARED_DIM]),
                         name='biases')
    representive = tf.matmul(hidden_con, weights) + biases
  return representive
开发者ID:cuongtransc,项目名称:it6311-cross-media,代码行数:35,代码来源:component.py



注:本文中的tensorflow.truncated_normal函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.truncated_normal_initializer函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.truediv函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap