本文整理汇总了Python中tensorflow.python.keras._impl.keras.backend.floatx函数的典型用法代码示例。如果您正苦于以下问题:Python floatx函数的具体用法?Python floatx怎么用?Python floatx使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了floatx函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: next
def next(self):
"""For python 2.x.
Returns:
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array, current_index, current_batch_size = next(
self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
batch_x = np.zeros(
tuple([current_batch_size] + list(self.x.shape)[1:]), dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
开发者ID:1000sprites,项目名称:tensorflow,代码行数:33,代码来源:image.py
示例2: test_on_batch
def test_on_batch(model, inputs, targets, sample_weights=None):
"""Calculates the loss for one input batch.
Arguments:
model: Model whose loss has to be calculated.
inputs: Input batch data.
targets: Target batch data.
sample_weights: Sample weight batch data.
Returns:
total loss, loss and metrics associated with each output.
"""
inputs = [
ops.convert_to_tensor(val, dtype=backend.floatx()) for val in inputs]
targets = [
ops.convert_to_tensor(val, dtype=backend.floatx()) for val in targets]
sample_weights = [
ops.convert_to_tensor(val, dtype=backend.floatx())
if val is not None else None for val in sample_weights]
outs, loss, loss_metrics = _process_single_batch(
model, inputs, targets, sample_weights=sample_weights, training=False)
if not isinstance(outs, list):
outs = [outs]
metrics_results = _eager_metrics_fn(
model, outs, targets)
if not isinstance(loss, list):
loss = [loss]
return loss + loss_metrics + metrics_results
开发者ID:kimr843,项目名称:tensorflow,代码行数:28,代码来源:training_eager.py
示例3: _get_batches_of_transformed_samples
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix, index=j, hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.num_classes), dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
开发者ID:Kongsea,项目名称:tensorflow,代码行数:35,代码来源:image.py
示例4: weighted
def weighted(y_true, y_pred, weights, mask=None):
"""Wrapper function.
Arguments:
y_true: `y_true` argument of `fn`.
y_pred: `y_pred` argument of `fn`.
weights: Weights tensor.
mask: Mask tensor.
Returns:
Scalar tensor.
"""
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = math_ops.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# apply sample weighting
if weights is not None:
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
score_array *= weights
score_array /= K.mean(
math_ops.cast(math_ops.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:33,代码来源:training_utils.py
示例5: test_on_batch
def test_on_batch(model, ins):
"""Calculates the loss for one input batch.
Arguments:
model: Given model on which loss is calculated.
ins: Input and output batch numpy arrays.
Returns:
total loss, loss and metrics associated with each output.
"""
ins_batch_converted = []
for ib in ins:
ins_batch_converted.append(ops.convert_to_tensor(ib, dtype=K.floatx()))
eager_model_inputs = []
eager_model_outputs = []
for i in range(len(model.inputs)):
eager_model_inputs.append(ins_batch_converted[i])
for i in range(len(model.inputs), len(ins_batch_converted)):
eager_model_outputs.append(ins_batch_converted[i])
outs, loss, loss_metrics = _process_single_batch(
eager_model_inputs, eager_model_outputs, model, training=False)
if not isinstance(outs, list):
outs = [outs]
metric_names, metrics_results = _eager_metrics_fn(
model, outs, eager_model_outputs)
model.metrics_names.append(metric_names)
if not isinstance(loss, list):
loss = [loss]
return loss + loss_metrics + metrics_results
开发者ID:dananjayamahesh,项目名称:tensorflow,代码行数:29,代码来源:training_eager.py
示例6: add_weight
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
constraint=None):
"""Adds a weight variable to the layer.
Arguments:
name: String, the name for the weight variable.
shape: The shape tuple of the weight.
dtype: The dtype of the weight.
initializer: An Initializer instance (callable).
regularizer: An optional Regularizer instance.
trainable: A boolean, whether the weight should
be trained via backprop or not (assuming
that the layer itself is also trainable).
constraint: An optional Constraint instance.
Returns:
The created weight variable.
"""
if dtype is None:
dtype = K.floatx()
weight = self.add_variable(name, shape,
dtype=dtype,
initializer=initializers.get(initializer),
regularizer=regularizers.get(regularizer),
constraint=constraints.get(constraint),
trainable=trainable)
return weight
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:33,代码来源:base_layer.py
示例7: __init__
def __init__(self, **kwargs):
# These properties should be set by the user via keyword arguments.
# note that 'dtype', 'input_shape' and 'batch_input_shape'
# are only applicable to input layers: do not pass these keywords
# to non-input layers.
allowed_kwargs = {
'activity_regularizer',
'input_shape',
'batch_input_shape',
'batch_size',
'dtype',
'name',
'trainable',
'weights',
}
# Validate optional keyword arguments.
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood:', kwarg)
# Get layer name.
name = kwargs.get('name')
# Get `trainable` status.
trainable = kwargs.get('trainable', True)
# Get `dtype`.
dtype = kwargs.get('dtype')
if dtype is None:
dtype = K.floatx()
# Call super, which will set all properties common to Keras layers
# and core TF layers.
super(Layer, self).__init__(
name=name, dtype=dtype, trainable=trainable,
activity_regularizer=kwargs.get('activity_regularizer'))
self._uses_inputs_arg = True
# Add properties that are Keras-only for now.
self.supports_masking = False
# Manage input shape information if passed.
if 'input_shape' in kwargs or 'batch_input_shape' in kwargs:
# In this case we will later create an input layer
# to insert before the current layer
if 'batch_input_shape' in kwargs:
batch_input_shape = tuple(kwargs['batch_input_shape'])
elif 'input_shape' in kwargs:
if 'batch_size' in kwargs:
batch_size = kwargs['batch_size']
else:
batch_size = None
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
self._batch_input_shape = batch_input_shape
# Manage initial weight values if passed.
if 'weights' in kwargs:
self._initial_weights = kwargs['weights']
else:
self._initial_weights = None
开发者ID:syed-ahmed,项目名称:tensorflow,代码行数:60,代码来源:base_layer.py
示例8: get_updates
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. /
(1. + self.decay * K.cast(self.iterations, K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr / (1. - K.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
# zero init of 1st moment
ms = [K.zeros(shape) for shape in shapes]
# zero init of exponentially weighted infinity norm
us = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = K.maximum(self.beta_2 * u, K.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(u, u_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:35,代码来源:optimizers.py
示例9: __init__
def __init__(self,
input_dim,
output_dim,
embeddings_initializer='uniform',
embeddings_regularizer=None,
activity_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
input_length=None,
**kwargs):
if 'input_shape' not in kwargs:
if input_length:
kwargs['input_shape'] = (input_length,)
else:
kwargs['input_shape'] = (None,)
dtype = kwargs.pop('dtype', K.floatx())
super(Embedding, self).__init__(dtype=dtype, **kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.input_length = input_length
开发者ID:kimr843,项目名称:tensorflow,代码行数:26,代码来源:embeddings.py
示例10: img_to_array
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
Arguments:
img: PIL Image instance.
data_format: Image data format.
Returns:
A 3D Numpy array.
Raises:
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
开发者ID:DILASSS,项目名称:tensorflow,代码行数:32,代码来源:image.py
示例11: dropped_inputs
def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed):
alpha_p = -alpha * scale
kept_idx = K.greater_equal(K.random_uniform(noise_shape, seed=seed),
rate)
kept_idx = K.cast(kept_idx, K.floatx())
a = ((1 - rate) * (1 + rate * alpha_p ** 2)) ** -0.5
b = -a * alpha_p * rate
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
return a * x + b
开发者ID:1000sprites,项目名称:tensorflow,代码行数:9,代码来源:noise.py
示例12: batch_predict_loop
def batch_predict_loop(model, inputs, batch_size, verbose=0):
"""Predict function for eager execution when input is arrays or tensors.
Arguments:
model: Instance of `Model`.
inputs: List of input arrays.
batch_size: Integer batch size.
verbose: Verbosity mode.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions (if the model has multiple outputs).
"""
outs = []
num_samples = training_utils.check_num_samples(inputs, batch_size)
if verbose == 1:
progbar = generic_utils.Progbar(target=num_samples)
batches = generic_utils.make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
inputs_batch = slice_arrays(inputs, batch_ids)
inputs_batch = [
ops.convert_to_tensor(val, dtype=backend.floatx())
for val in inputs_batch
]
if len(inputs_batch) == 1:
if model._expects_training_arg:
batch_outs = model.call(inputs_batch[0], training=False)
else:
batch_outs = model.call(inputs_batch[0])
else:
if model._expects_training_arg:
batch_outs = model.call(inputs_batch, training=False)
else:
batch_outs = model.call(inputs_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
# Pre-allocate the results arrays.
for batch_out in batch_outs:
dims = batch_out.shape[1:].dims
dims_list = [d.value for d in dims]
shape = (num_samples,) + tuple(dims_list)
outs.append(np.zeros(shape, dtype=batch_out.dtype.as_numpy_dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
开发者ID:KiaraStarlab,项目名称:tensorflow,代码行数:56,代码来源:training_eager.py
示例13: _preprocess_symbolic_input
def _preprocess_symbolic_input(x, data_format, mode):
"""Preprocesses a tensor encoding a batch of images.
Arguments:
x: Input tensor, 3D or 4D.
data_format: Data format of the image tensor.
mode: One of "caffe", "tf" or "torch".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
- torch: will scale pixels between 0 and 1 and then
will normalize each channel with respect to the
ImageNet dataset.
Returns:
Preprocessed tensor.
"""
global _IMAGENET_MEAN
if mode == 'tf':
x /= 127.5
x -= 1.
return x
if mode == 'torch':
x /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
else:
if data_format == 'channels_first':
# 'RGB'->'BGR'
if K.ndim(x) == 3:
x = x[::-1, ...]
else:
x = x[:, ::-1, ...]
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
mean = [103.939, 116.779, 123.68]
std = None
if _IMAGENET_MEAN is None:
_IMAGENET_MEAN = constant_op.constant(-np.array(mean), dtype=K.floatx())
# Zero-center by mean pixel
if K.dtype(x) != K.dtype(_IMAGENET_MEAN):
x = K.bias_add(x, math_ops.cast(_IMAGENET_MEAN, K.dtype(x)), data_format)
else:
x = K.bias_add(x, _IMAGENET_MEAN, data_format)
if std is not None:
x /= std
return x
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:55,代码来源:imagenet_utils.py
示例14: build
def build(self, input_shape=None):
if input_shape and not self.inputs:
batch_shape = tuple(input_shape)
dtype = K.floatx()
x = Input(
batch_shape=batch_shape, dtype=dtype, name=self.name + '_input')
self.inputs = [x]
for layer in self._layers:
x = layer(x)
self.outputs = [x]
if self.inputs:
self._init_graph_network(self.inputs, self.outputs, name=self.name)
self.built = True
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:14,代码来源:sequential.py
示例15: array_to_img
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
Arguments:
x: Input Numpy array.
data_format: Image data format.
scale: Whether to rescale image values
to be within [0, 255].
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0) # pylint: disable=g-no-augmented-assignment
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
开发者ID:DILASSS,项目名称:tensorflow,代码行数:48,代码来源:image.py
示例16: get_updates
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
t = math_ops.cast(self.iterations, K.floatx()) + 1
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (
1. - 0.5 *
(math_ops.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * math_ops.square(g)
v_t_prime = v_t / (1. - math_ops.pow(self.beta_2, t))
m_t_bar = (
1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(state_ops.assign(m, m_t))
self.updates.append(state_ops.assign(v, v_t))
p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:45,代码来源:optimizers.py
示例17: __init__
def __init__(self,
x,
y,
image_data_generator,
batch_size=32,
shuffle=False,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png'):
if y is not None and len(x) != len(y):
raise ValueError('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
logging.warning(
'NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' + str(self.x.shape) +
' (' + str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0], batch_size, shuffle,
seed)
开发者ID:DILASSS,项目名称:tensorflow,代码行数:45,代码来源:image.py
示例18: dropped_inputs
def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed): # pylint: disable=missing-docstring
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = K.greater_equal(
K.random_uniform(noise_shape, seed=seed), rate)
kept_idx = K.cast(kept_idx, K.floatx())
# Get affine transformation params
a = ((1 - rate) * (1 + rate * alpha_p**2))**-0.5
b = -a * alpha_p * rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
# Do affine transformation
return a * x + b
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:18,代码来源:noise.py
示例19: test_loop
def test_loop(model, inputs, targets,
sample_weights=None,
batch_size=None,
verbose=0,
steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model: Model instance that is being evaluated in Eager mode.
inputs: List of input arrays.
targets: List of target arrays.
sample_weights: Optional list of sample weight arrays.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
with backend.learning_phase_scope(0):
feed_data = inputs + targets
if sample_weights:
feed_data += sample_weights
num_samples = training_utils.check_num_samples(
feed_data, batch_size=batch_size, steps=steps, steps_name='steps')
outs = []
if verbose == 1:
progbar = generic_utils.Progbar(target=num_samples)
batches = generic_utils.make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
inputs_batch = slice_arrays(inputs, batch_ids)
targets_batch = slice_arrays(targets, batch_ids)
if sample_weights:
sample_weights_batch = slice_arrays(sample_weights, batch_ids)
else:
sample_weights_batch = None
inputs_batch = [
ops.convert_to_tensor(val, dtype=backend.floatx())
for val in inputs_batch]
targets_batch = [
ops.convert_to_tensor(val, dtype=backend.floatx())
for val in targets_batch]
if sample_weights:
sample_weights_batch = [
ops.convert_to_tensor(val, dtype=backend.floatx())
if val is not None else None
for val in sample_weights_batch]
loss_outs, loss, loss_metrics = _model_loss(
model,
inputs_batch,
targets_batch,
sample_weights=sample_weights_batch,
training=False)
metrics_results = _eager_metrics_fn(model, loss_outs, targets_batch)
batch_outs = []
for _, v in zip(model.metrics_names,
[backend.mean(loss)] + loss_metrics + metrics_results):
batch_outs.append(tensor_util.constant_value(v))
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i in range(len(outs)):
outs[i] /= num_samples
if len(outs) == 1:
return outs[0]
return outs
开发者ID:kimr843,项目名称:tensorflow,代码行数:86,代码来源:training_eager.py
示例20: predict_loop
def predict_loop(model, inputs,
batch_size=32,
verbose=0,
steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model:
inputs: List of input arrays.
batch_size: integer batch size.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
with backend.learning_phase_scope(0):
num_samples = training_utils.check_num_samples(
inputs, batch_size, steps, 'steps')
if verbose == 1:
if steps is not None:
progbar = generic_utils.Progbar(target=steps)
else:
progbar = generic_utils.Progbar(target=num_samples)
outs = []
batches = generic_utils.make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
inputs_batch = slice_arrays(inputs, batch_ids)
inputs_batch = [
ops.convert_to_tensor(val, dtype=backend.floatx())
for val in inputs_batch]
if len(inputs_batch) == 1:
if model._expects_training_arg:
batch_outs = model.call(inputs_batch[0], training=False)
else:
batch_outs = model.call(inputs_batch[0])
else:
if model._expects_training_arg:
batch_outs = model.call(inputs_batch, training=False)
else:
batch_outs = model.call(inputs_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
# Pre-allocate the results arrays.
for batch_out in batch_outs:
dims = batch_out.shape[1:].dims
dims_list = [d.value for d in dims]
shape = (num_samples,) + tuple(dims_list)
outs.append(np.zeros(shape, dtype=batch_out.dtype.as_numpy_dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
开发者ID:kimr843,项目名称:tensorflow,代码行数:67,代码来源:training_eager.py
注:本文中的tensorflow.python.keras._impl.keras.backend.floatx函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论