本文整理汇总了Python中tensorflow.variable_scope函数的典型用法代码示例。如果您正苦于以下问题:Python variable_scope函数的具体用法?Python variable_scope怎么用?Python variable_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了variable_scope函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: inference_small_config
def inference_small_config(x, c):
c["bottleneck"] = False
c["ksize"] = 3
c["stride"] = 1
with tf.variable_scope("scale1"):
c["conv_filters_out"] = 16
c["block_filters_internal"] = 16
c["stack_stride"] = 1
x = conv(x, c)
x = bn(x, c)
x = activation(x)
x = stack(x, c)
with tf.variable_scope("scale2"):
c["block_filters_internal"] = 32
c["stack_stride"] = 2
x = stack(x, c)
with tf.variable_scope("scale3"):
c["block_filters_internal"] = 64
c["stack_stride"] = 2
x = stack(x, c)
# post-net
x = tf.reduce_mean(x, reduction_indices=[1, 2], name="avg_pool")
if c["num_classes"] != None:
with tf.variable_scope("fc"):
x = fc(x, c)
return x
开发者ID:yaowenwu,项目名称:tensorflow-resnet,代码行数:31,代码来源:resnet.py
示例2: build_graph
def build_graph(self, input, output):
input, output = input / 128.0 - 1, output / 128.0 - 1
with argscope([Conv2D, Conv2DTranspose], kernel_initializer=tf.truncated_normal_initializer(stddev=0.02)):
with tf.variable_scope('gen'):
fake_output = self.generator(input)
with tf.variable_scope('discrim'):
real_pred = self.discriminator(input, output)
fake_pred = self.discriminator(input, fake_output)
self.build_losses(real_pred, fake_pred)
errL1 = tf.reduce_mean(tf.abs(fake_output - output), name='L1_loss')
self.g_loss = tf.add(self.g_loss, LAMBDA * errL1, name='total_g_loss')
add_moving_summary(errL1, self.g_loss)
# tensorboard visualization
if IN_CH == 1:
input = tf.image.grayscale_to_rgb(input)
if OUT_CH == 1:
output = tf.image.grayscale_to_rgb(output)
fake_output = tf.image.grayscale_to_rgb(fake_output)
visualize_tensors('input,output,fake', [input, output, fake_output], max_outputs=max(30, BATCH))
self.collect_variables()
开发者ID:tobyma,项目名称:tensorpack,代码行数:25,代码来源:Image2Image.py
示例3: _extract_feature_ids
def _extract_feature_ids(self, state, network_states, during_training):
"""Extracts feature IDs and advances a batch using the oracle path.
Args:
state: MasterState from the 'AdvanceMaster' op that advances the
underlying master to this component.
network_states: Dictionary of component NetworkState objects.
during_training: Whether the graph is being constructed during training.
Returns:
state handle: Final state after advancing.
"""
logging.info('Building component: %s', self.spec.name)
if during_training:
stride = state.current_batch_size * self.training_beam_size
else:
stride = state.current_batch_size * self.inference_beam_size
with tf.variable_scope(self.name, reuse=True):
state.handle, ids = extract_fixed_feature_ids(self, state, stride)
with tf.variable_scope(self.name, reuse=True):
tensors = self.network.create(
ids, [], None, None, during_training, stride=stride)
update_network_states(self, tensors, network_states, stride)
return state.handle
开发者ID:NoPointExc,项目名称:models,代码行数:27,代码来源:bulk_component.py
示例4: add_model_vars
def add_model_vars(self):
'''
You model contains the following parameters:
embedding: tensor(vocab_size, embed_size)
W1: tensor(2* embed_size, embed_size)
b1: tensor(1, embed_size)
U: tensor(embed_size, output_size)
bs: tensor(1, output_size)
Hint: Add the tensorflow variables to the graph here and *reuse* them while building
the compution graphs for composition and projection for each tree
Hint: Use a variable_scope "Composition" for the composition layer, and
"Projection") for the linear transformations preceding the softmax.
'''
embed_size = self.config.embed_size
vocab_size = len(self.vocab)
output_size = self.config.label_size
with tf.variable_scope('Composition'):
### YOUR CODE HERE
embedding = tf.get_variable("embedding", shape=(vocab_size, embed_size))
W1 = tf.get_variable("W1", shape=(2 * embed_size, embed_size))
b1 = tf.get_variable("b1", shape=(1, embed_size))
### END YOUR CODE
with tf.variable_scope('Projection'):
### YOUR CODE HERE
U = tf.get_variable("U", shape=(embed_size, output_size))
bs = tf.get_variable("bs", shape=(1, output_size))
### END YOUR CODE
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.config.lr)
# dummy_total is a simple sum to ensure that the variables for the AdamOptimizer
# are created for initialization and before restore the variables later.
# It should never actually get executed.
dummy_total = tf.constant(0.0)
for v in tf.trainable_variables(): dummy_total +=tf.reduce_sum(v)
self.dummy_minimizer = self.optimizer.minimize(dummy_total)
开发者ID:h1bernate,项目名称:cs224d,代码行数:35,代码来源:rnn_tuner_l2.py
示例5: __call__
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with tf.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(1, 2, state)
concat = _linear([inputs, h], 4 * self._num_units, True, 0.,
self.weights_init, self.trainable, self.restore,
self.reuse)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(1, 4, concat)
new_c = (c * self._inner_activation(f + self._forget_bias) +
self._inner_activation(i) *
self._activation(j))
new_h = self._activation(new_c) * self._inner_activation(o)
if self._state_is_tuple:
new_state = _rnn_cell.LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(1, [new_c, new_h])
# Retrieve RNN Variables
with tf.variable_scope('Linear', reuse=True):
self.W = tf.get_variable('Matrix')
self.b = tf.get_variable('Bias')
return new_h, new_state
开发者ID:mixml,项目名称:tflearn,代码行数:31,代码来源:recurrent.py
示例6: transformer_layers_sharded
def transformer_layers_sharded(dp,
ps_devices,
inputs,
num_layers,
hparams,
self_attention_bias=None,
enc_output=None,
attention_type=AttentionType.GLOBAL,
name="transformer"):
"""Multi layer transformer, sharded by the data parallelism dp."""
x = inputs
extra_loss = tf.constant(0.0)
moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")]
expert_fn = expert_utils.ffn_expert_fn(
hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size)
x = dp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout)
for layer in range(num_layers):
with tf.variable_scope("%s_layer_%d" % (name, layer)):
# self-attention
if attention_type == AttentionType.LOCAL_2D:
y = dp(local_attention_2d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="masked_local_attention_2d"))
elif attention_type == AttentionType.LOCAL_1D:
y = dp(local_attention_1d(common_layers.layer_preprocess(x, hparams),
hparams,
attention_type="local_mask_right",
q_padding="LEFT", kv_padding="LEFT"))
elif attention_type == AttentionType.GLOCAL:
y = dp(local_global_attention(
common_layers.layer_preprocess(x, hparams), self_attention_bias,
hparams, q_padding="LEFT", kv_padding="LEFT"))
elif attention_type == AttentionType.GLOBAL:
self_attention_bias = dp(get_self_attention_bias(x))
y = dp(full_self_attention(common_layers.layer_preprocess(x, hparams),
self_attention_bias, hparams,
q_padding="LEFT", kv_padding="LEFT"))
x = common_layers.layer_postprocess(x, y, hparams)
if enc_output is not None:
y = dp(encdec_attention_1d(common_layers.layer_preprocess(x, hparams),
enc_output, None, hparams))
x = dp(common_layers.layer_postprocess, x, y, hparams)
with tf.variable_scope("ffn"):
if str(layer) in hparams.moe_layers_decoder.split(","):
y, loss = expert_utils.distributed_moe(
dp,
ps_devices,
common_layers.layer_preprocess(x, hparams),
hparams.mode == tf.estimator.ModeKeys.TRAIN,
input_size=hparams.hidden_size,
expert_fn=expert_fn,
num_experts=hparams.moe_num_experts,
k=hparams.moe_k,
loss_coef=hparams.moe_loss_coef)
extra_loss += loss
x = dp(common_layers.layer_postprocess, x, y, hparams)
else:
y = dp(ffn_layer, common_layers.layer_preprocess(x, hparams), hparams)
x = dp(common_layers.layer_postprocess, x, y, hparams)
return dp(common_layers.layer_preprocess, x, hparams), extra_loss
开发者ID:kltony,项目名称:tensor2tensor,代码行数:60,代码来源:common_image_attention.py
示例7: testBlockGRUToGRUCellSingleStep
def testBlockGRUToGRUCellSingleStep(self):
with self.test_session(use_gpu=self._use_gpu, graph=tf.Graph()) as sess:
batch_size = 4
cell_size = 5
input_size = 6
seed = 1994
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=seed)
# Inputs
x = tf.zeros([batch_size, input_size])
h = tf.zeros([batch_size, cell_size])
# Values for the inputs.
x_value = np.random.rand(batch_size, input_size)
h_value = np.random.rand(batch_size, cell_size)
# Output from the basic GRU cell implementation.
with tf.variable_scope("basic", initializer=initializer):
output = tf.nn.rnn_cell.GRUCell(cell_size)(x, h)
sess.run([tf.initialize_all_variables()])
basic_res = sess.run([output], {x: x_value, h: h_value})
# Output from the block GRU cell implementation.
with tf.variable_scope("block", initializer=initializer):
output = gru_ops.GRUBlockCell(cell_size)(x, h)
sess.run([tf.initialize_all_variables()])
block_res = sess.run([output], {x: x_value, h: h_value})
self.assertEqual(len(block_res), len(basic_res))
for block, basic in zip(block_res, basic_res):
self.assertAllClose(block, basic)
开发者ID:damienmg,项目名称:tensorflow,代码行数:32,代码来源:gru_ops_test.py
示例8: start_session
def start_session(self):
"""
Creates the session.
"""
self.input_layer_mats = ["W_input", "b_input"]
self.hidden_layer_mats = []
for i in xrange(self.num_hidden):
self.hidden_layer_mats.append("W" + str(i))
self.hidden_layer_mats.append("b" + str(i))
self.output_layer_mats = ["W_output", "b_output"]
self.weight_mats = self.input_layer_mats + self.hidden_layer_mats + self.output_layer_mats
with tf.variable_scope("network") as scope:
self.create_model_trainable()
with tf.variable_scope("target") as scope:
self.create_model_target()
init = tf.initialize_all_variables()
session = tf.Session()
session.run(init)
return session
开发者ID:arushir,项目名称:dqn,代码行数:26,代码来源:cnn_target.py
示例9: __load_model
def __load_model(self):
# Initial memory value for recurrence.
self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))
# choose RNN/GRU/LSTM cell
with tf.variable_scope("train_test", reuse=True):
self.cell = rnn_cell.LSTMCell(self.memory_dim)
# embedding model
if not self.attention:
with tf.variable_scope("train_test"):
self.dec_outputs, self.dec_memory = seq2seq.embedding_rnn_seq2seq(\
self.enc_inp, self.dec_inp, self.cell, \
self.vocab_size, self.vocab_size, self.seq_length)
with tf.variable_scope("train_test", reuse = True):
self.dec_outputs_tst, _ = seq2seq.embedding_rnn_seq2seq(\
self.enc_inp, self.dec_inp, self.cell, \
self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
else:
with tf.variable_scope("train_test"):
self.dec_outputs, self.dec_memory = seq2seq.embedding_attention_seq2seq(\
self.enc_inp, self.dec_inp, self.cell, \
self.vocab_size, self.vocab_size, self.seq_length)
with tf.variable_scope("train_test", reuse = True):
self.dec_outputs_tst, _ = seq2seq.embedding_attention_seq2seq(\
self.enc_inp, self.dec_inp, self.cell, \
self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
开发者ID:githubgzc,项目名称:deep-summarization,代码行数:28,代码来源:lstm_simple.py
示例10: inference
def inference(input_tensor,train,regularizer):
#第一层卷积
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable("weight",
[CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv1_biases = tf.get_variable("biases",[CONV1_DEEP],
initializer=tf.constant_initializer(0.0))
conv1 = tf.nn.conv2d(input_tensor,conv1_weights,
strides=[1,1,1,1],padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
#第二层池化
with tf.name_scope('layer2-pool1'):
pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],
strides=[1,2,2,1],padding='SAME')
#第三层卷积
with tf.variable_scope('layer3-conv2'):
conv2_weights = tf.get_variable("weight",
[CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],
initializer=tf.truncated_normal_initializer(stddev=0.1))
conv2_biases = tf.get_variable("biases",[CONV2_DEEP],
initializer=tf.constant_initializer(0.0))
conv2 = tf.nn.conv2d(pool1,conv2_weights,
strides=[1,1,1,1],padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))
#第四层池化
with tf.name_scope('layer4-pool2'):
pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],
strides=[1,2,2,1],padding='SAME')
pool_shape = pool2.get_shape().as_list()
nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
reshaped = tf.reshape(pool2,[pool_shape[0],nodes])
#第五层全连接层
with tf.variable_scope('layer5-fc1'):
fc1_weights = tf.get_variable("weight",[nodes,FC_SIZE],
initializer=tf.truncated_normal_initializer(stddev=0.1))
#只有全连接层的权重需要加入正则化
if regularizer != None:
tf.add_to_collection('losses',regularizer(fc1_weights))
fc1_biases = tf.get_variable("bias",[FC_SIZE],
initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights) + fc1_biases)
if train: fc1 = tf.nn.dropout(fc1,0.5)
#第六层全连接层
with tf.variable_scope('layer6-fc2'):
fc2_weights = tf.get_variable("weight",[FC_SIZE,NUM_LABELS],
initializer=tf.truncated_normal_initializer(stddev=0.1))
#只有全连接层的权重需要加入正则化
if regularizer != None:
tf.add_to_collection('losses',regularizer(fc2_weights))
fc2_biases = tf.get_variable("bias",[NUM_LABELS],
initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc1,fc2_weights) + fc2_biases
return logit
开发者ID:yyzahuopu,项目名称:Deep-learning,代码行数:60,代码来源:mnist_inferenceCNN.py
示例11: testLSTMBasicToBlockPeeping
def testLSTMBasicToBlockPeeping(self):
with self.test_session(use_gpu=self._use_gpu) as sess:
batch_size = 2
input_size = 3
cell_size = 4
sequence_length = 5
inputs = []
for _ in range(sequence_length):
inp = tf.convert_to_tensor(
np.random.randn(batch_size, input_size),
dtype=tf.float32)
inputs.append(inp)
initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=19890212)
with tf.variable_scope("basic", initializer=initializer):
cell = tf.nn.rnn_cell.LSTMCell(cell_size,
use_peepholes=True,
state_is_tuple=True)
outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)
sess.run([tf.initialize_all_variables()])
basic_outputs = sess.run(outputs)
basic_grads = sess.run(tf.gradients(outputs, inputs))
basic_wgrads = sess.run(tf.gradients(outputs, tf.trainable_variables()))
with tf.variable_scope("block", initializer=initializer):
w = tf.get_variable("w",
shape=[input_size + cell_size, cell_size * 4],
dtype=tf.float32)
b = tf.get_variable("b",
shape=[cell_size * 4],
dtype=tf.float32,
initializer=tf.zeros_initializer)
wci = tf.get_variable("wci", shape=[cell_size], dtype=tf.float32)
wcf = tf.get_variable("wcf", shape=[cell_size], dtype=tf.float32)
wco = tf.get_variable("wco", shape=[cell_size], dtype=tf.float32)
_, _, _, _, _, _, outputs = fused_lstm(
tf.convert_to_tensor(sequence_length,
dtype=tf.int64),
inputs,
w,
b,
wci=wci,
wcf=wcf,
wco=wco,
cell_clip=0,
use_peephole=True)
sess.run([tf.initialize_all_variables()])
block_outputs = sess.run(outputs)
block_grads = sess.run(tf.gradients(outputs, inputs))
block_wgrads = sess.run(tf.gradients(outputs, [w, b, wci, wcf, wco]))
self.assertAllClose(basic_outputs, block_outputs)
self.assertAllClose(basic_grads, block_grads)
for basic, block in zip(basic_wgrads, block_wgrads):
self.assertAllClose(basic, block, rtol=1e-2, atol=1e-2)
开发者ID:10imaging,项目名称:tensorflow,代码行数:60,代码来源:lstm_ops_test.py
示例12: __init__
def __init__(self,sess,n_features,n_actions,lr=0.001):
self.sess = sess
self.s = tf.placeholder(tf.float32,[1,n_features],name='state')
self.a = tf.placeholder(tf.int32,None,name='act')
self.td_error = tf.placeholder(tf.float32,None,"td_error")
with tf.variable_scope('Actor'):
l1 = tf.layers.dense(
inputs = self.s,
units = 20,
activation = tf.nn.relu,
kernel_initializer = tf.random_normal_initializer(mean=0,stddev=0.1),
bias_initializer = tf.constant_initializer(0.1),
name = 'l1'
)
self.acts_prob = tf.layers.dense(
inputs = l1,
units = n_actions,
activation = tf.nn.softmax,
kernel_initializer = tf.random_normal_initializer(mean=0,stddev=0.1),
bias_initializer = tf.constant_initializer(0.1),
name = 'acts_prob'
)
with tf.variable_scope('exp_v'):
log_prob = tf.log(self.acts_prob[0,self.a])
self.exp_v = tf.reduce_mean(log_prob * self.td_error)
with tf.variable_scope('train'):
self.train_op = tf.train.AdamOptimizer(lr).minimize(-self.exp_v)
开发者ID:huyuxiang,项目名称:tensorflow_practice,代码行数:34,代码来源:Actor.py
示例13: testBasicLSTMCell
def testBasicLSTMCell(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 8])
g, out_m = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(2)] * 2)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m], {x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem = np.array([[0.68967271, 0.68967271,
0.44848421, 0.44848421,
0.39897051, 0.39897051,
0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem)
with tf.variable_scope("other", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3]) # Test BasicLSTMCell with input_size != num_units.
m = tf.zeros([1, 4])
g, out_m = tf.nn.rnn_cell.BasicLSTMCell(2, input_size=3)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m], {x.name: np.array([[1., 1., 1.]]),
m.name: 0.1 * np.ones([1, 4])})
self.assertEqual(len(res), 2)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:26,代码来源:rnn_cell_test.py
示例14: cnn_model
def cnn_model(X, y):
"""2 layer Convolutional network to predict from sequence of words
to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = skflow.ops.categorical_variable(X, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = skflow.ops.conv2d(word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')
# Add a RELU for non linearity.
conv1 = tf.nn.relu(conv1)
# Max pooling across output of Convlution+Relu.
pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],
strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = skflow.ops.conv2d(pool1, N_FILTERS, FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
return skflow.models.logistic_regression(pool2, y)
开发者ID:2er0,项目名称:tensorflow,代码行数:28,代码来源:text_classification_cnn.py
示例15: __call__
def __call__(self, features, labels, params):
"""Creates the model graph. See the model_fn documentation in
tf.contrib.learn.Estimator class for a more detailed explanation.
"""
with tf.variable_scope("model"):
with tf.variable_scope(self.name):
return self._build(features, labels, params)
开发者ID:clren,项目名称:conv_seq2seq,代码行数:7,代码来源:model_base.py
示例16: _cnn_to_mlp
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False):
with tf.variable_scope(scope, reuse=reuse):
out = inpt
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu)
conv_out = layers.flatten(out)
with tf.variable_scope("action_value"):
action_out = conv_out
for hidden in hiddens:
action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
action_out = layers.layer_norm(action_out, center=True, scale=True)
action_out = tf.nn.relu(action_out)
action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)
if dueling:
with tf.variable_scope("state_value"):
state_out = conv_out
for hidden in hiddens:
state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
if layer_norm:
state_out = layers.layer_norm(state_out, center=True, scale=True)
state_out = tf.nn.relu(state_out)
state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
q_out = state_score + action_scores_centered
else:
q_out = action_scores
return q_out
开发者ID:Divyankpandey,项目名称:baselines,代码行数:35,代码来源:models.py
示例17: forward_propagation
def forward_propagation(images):
with tf.variable_scope('conv1') as scope:
W_conv1 = weight_variable([5, 5, 3, 32])
b_conv1 = bias_variable([32])
image_matrix = tf.reshape(images, [-1, 1750, 1750, 3])
h_conv1 = tf.nn.sigmoid(conv2d(image_matrix, W_conv1) + b_conv1)
_activation_summary(h_conv1)
h_pool1 = max_pool_5x5(h_conv1)
with tf.variable_scope('conv2') as scope:
W_conv2 = weight_variable([5, 5, 32, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.sigmoid(conv2d(h_pool1, W_conv2) + b_conv2)
_activation_summary(h_conv2)
h_pool2 = max_pool_5x5(h_conv2)
with tf.variable_scope('conv3') as scope:
W_conv3 = weight_variable([5, 5, 64, 128])
b_conv3 = bias_variable([128])
h_conv3 = tf.nn.sigmoid(conv2d(h_pool2, W_conv3) + b_conv3)
_activation_summary(h_conv3)
h_pool3 = max_pool_5x5(h_conv3)
with tf.variable_scope('local3') as scope:
W_fc1 = weight_variable([14 * 14 * 128, 256])
b_fc1 = bias_variable([256])
h_pool3_flat = tf.reshape(h_pool3, [-1, 14 * 14 * 128])
h_fc1 = tf.nn.sigmoid(tf.matmul(h_pool3_flat, W_fc1) + b_fc1)
_activation_summary(h_fc1)
keep_prob = tf.Variable(1.0)
W_fc2 = weight_variable([256, 4])
b_fc2 = bias_variable([4])
y_conv = tf.nn.softmax(tf.matmul(h_fc1, W_fc2) + b_fc2)
_activation_summary(y_conv)
return y_conv
开发者ID:StructML,项目名称:Neural-Network-Prostate,代码行数:35,代码来源:Process.py
示例18: testWithScopes
def testWithScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.test_session() as sess:
initializer = tf.truncated_normal_initializer(stddev=.1)
with tf.variable_scope('my_model/my_layer0'):
var0 = tf.contrib.framework.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
with tf.variable_scope('my_model/my_layer1'):
var1 = tf.contrib.framework.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {'my_model/my_layer0/my_var0': init_value0,
'my_model/my_layer1/my_var1': init_value1}
init_fn = tf.contrib.framework.assign_from_values_fn(var_names_to_values)
# Initialize the variables.
sess.run(tf.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
开发者ID:jeffzheng1,项目名称:tensorflow,代码行数:28,代码来源:variables_test.py
示例19: _conv_layers
def _conv_layers(self,x):
conv_layers = Layers(x)
# Convolutional layers
res_blocks = [1,3,4,23,3]
output_channels = [64,256,512,1024,2048]
with tf.variable_scope('scale0'):
conv_layers.conv2d(filter_size=7,output_channels=output_channels[0],stride=2,padding='SAME',b_value=None)
conv_layers.maxpool(k=3)
with tf.variable_scope('scale1'):
conv_layers.res_layer(filter_size=3, output_channels=output_channels[1], stride=2)
for block in range(res_blocks[1]-1):
conv_layers.conv_layers.res_layer(filter_size=3, output_channels=output_channels[1], stride=1)
with tf.variable_scope('scale2'):
conv_layers.res_layer(filter_size=3, output_channels=output_channels[2], stride=2)
for block in range(res_blocks[2]-1):
conv_layers.conv_layers.res_layer(filter_size=3, output_channels=output_channels[2], stride=1)
with tf.variable_scope('scale3'):
conv_layers.res_layer(filter_size=3, output_channels=output_channels[3], stride=2)
for block in range(res_blocks[3]-1):
conv_layers.conv_layers.res_layer(filter_size=3, output_channels=output_channels[3], stride=1)
with tf.variable_scope('scale4'):
conv_layers.res_layer(filter_size=3, output_channels=output_channels[4], stride=2)
for block in range(res_blocks[4]-1):
conv_layers.conv_layers.res_layer(filter_size=3, output_channels=output_channels[4], stride=1)
conv_layers.avgpool(globe=True)
# Fully Connected Layer
conv_layers.fc(output_nodes=10)
return conv_layers.get_output()
开发者ID:zymale,项目名称:tf-Faster-RCNN,代码行数:33,代码来源:ff_resnet101.py
示例20: testInitFromCheckpointWithScopes
def testInitFromCheckpointWithScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0],
dtype=np.float32).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0],
dtype=np.float32).reshape((2, 1, 2))
var_names_to_values = {'layer0/v0': init_value0, 'layer1/v1': init_value1}
model_dir = os.path.join(self.get_temp_dir(), 'model')
with self.test_session() as sess:
model_path = self.create_checkpoint_from_values(var_names_to_values,
model_dir)
with tf.variable_scope('my_model/my_layer0'):
var0 = tf.contrib.framework.variables.variable('my_var0',
shape=init_value0.shape)
with tf.variable_scope('my_model/my_layer1'):
var1 = tf.contrib.framework.variables.variable('my_var1',
shape=init_value1.shape)
vars_to_restore = {'layer0/v0': var0, 'layer1/v1': var1}
op, feed_dict = tf.contrib.framework.variables.assign_from_checkpoint(
model_path,
vars_to_restore)
# Initialize the variables.
sess.run(tf.global_variables_initializer())
# Perform the assignment.
sess.run(op, feed_dict)
# Request and test the variable values:
self.assertAllEqual(init_value0, var0.eval())
self.assertAllEqual(init_value1, var1.eval())
开发者ID:jeffzheng1,项目名称:tensorflow,代码行数:32,代码来源:variables_test.py
注:本文中的tensorflow.variable_scope函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论