本文整理汇总了Python中nnabla.functions.softmax_cross_entropy函数的典型用法代码示例。如果您正苦于以下问题:Python softmax_cross_entropy函数的具体用法?Python softmax_cross_entropy怎么用?Python softmax_cross_entropy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了softmax_cross_entropy函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: ce_loss_with_uncertainty
def ce_loss_with_uncertainty(ctx, pred, y_l, log_var):
r = F.randn(0., 1., log_var.shape)
r = F.pow_scalar(F.exp(log_var), 0.5) * r
h = pred + r
with nn.context_scope(ctx):
loss_ce = F.mean(F.softmax_cross_entropy(h, y_l))
return loss_ce
开发者ID:kzky,项目名称:works,代码行数:7,代码来源:cnn_model_060.py
示例2: test_graph_logreg
def test_graph_logreg(seed):
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4], need_grad=True)
w = nn.Variable([12, 5], need_grad=True)
b = nn.Variable([5], need_grad=True)
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
w.d = rng.randn(*w.shape)
b.d = rng.randn(*b.shape)
t.d = rng.randint(0, 5, size=t.shape)
nn.set_default_context(nn.Context())
# Forwardprop by definintion
with nn.auto_forward():
z = F.affine(x, w, b, 1)
l = F.softmax_cross_entropy(z, t, 1)
L = F.mean(l)
# Backprop
# Diff should be initialized since they are always accumulated
x.g = 0
w.g = 0
b.g = 0
L.backward(clear_buffer=True)
x.g = rng.randn(*x.shape)
inputs = [x, w, b]
from nbla_test_utils import \
compute_analytical_and_numerical_grad_graph as grads
agrad, ngrad = grads(L, inputs, 1e-3)
assert np.allclose(ngrad, agrad, atol=1e-2)
开发者ID:zwsong,项目名称:nnabla,代码行数:33,代码来源:test_graph.py
示例3: test_graph_model
def test_graph_model(model, seed):
np.random.seed(313)
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4, 4], need_grad=True)
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
t.d = rng.randint(0, 5, size=t.shape)
nn.set_default_context(nn.Context())
# Forwardprop by definintion
nn.clear_parameters()
if model == "mlp":
with nn.parameter_scope('fc1'):
z = PF.affine(x, 3)
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
elif model == "recurrent":
with nn.parameter_scope('fc1'):
z = PF.affine(x, 3)
z2 = F.relu(z, inplace=True)
h = z2
for _ in range(2):
with nn.parameter_scope('fc2'):
h = PF.affine(h, 3)
h = F.relu(h, inplace=True)
with nn.parameter_scope('fc3'):
z3 = PF.affine(h, 5)
elif model == "convolution":
with nn.parameter_scope('conv1'):
z = PF.convolution(x, 3, (2, 2))
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
else:
raise ValueError()
l = F.softmax_cross_entropy(z3, t, 1)
L = F.mean(l)
# Forwardprop
L.forward(clear_no_need_grad=True)
# Backprop
# Diff should be initialized since they are always accumulated
x.grad.zero()
L.backward(clear_buffer=True)
x.g = rng.randn(*x.shape)
parameters = nn.get_parameters()
for param in parameters.values():
param.grad.zero()
inputs = [x] + list(parameters.values())
from nbla_test_utils import \
compute_analytical_and_numerical_grad_graph as grads
agrad, ngrad = grads(L, inputs, 1e-3)
assert np.allclose(ngrad, agrad, atol=1.05e-2)
开发者ID:zwsong,项目名称:nnabla,代码行数:57,代码来源:test_graph.py
示例4: test_forward_backward
def test_forward_backward():
batch_size, m, h, w = 4, 3, 32, 32
extension_module = "cpu"
device_id = 0
ctx = extension_context(extension_module, device_id=device_id)
x_l_data = np.random.randn(batch_size, m, h, w)
y_l_data = (np.random.rand(batch_size, 1) * 10).astype(np.int32)
x_l = nn.Variable(x_l_data.shape)
y_l = nn.Variable(y_l_data.shape)
x_l.d = x_l_data
y_l.d = y_l_data
pred = cnn_model_003(ctx, x_l)
with nn.context_scope(ctx):
loss = F.mean(F.softmax_cross_entropy(pred, y_l))
loss.forward()
loss.backward()
开发者ID:kzky,项目名称:works,代码行数:18,代码来源:test_cnn_model_003.py
示例5: get_model
def get_model(args, num_classes, test=False, tiny=False):
"""
Create computation graph and variables.
Args:
tiny: Tiny ImageNet mode if True.
"""
data_size = 320
nn_in_size = 224
if tiny:
data_size = 64
nn_in_size = 56
image = nn.Variable([args.batch_size, 3, data_size, data_size])
label = nn.Variable([args.batch_size, 1])
pimage = image_preprocess(image, nn_in_size)
pred, hidden = model_resnet.resnet_imagenet(
pimage, num_classes, args.num_layers, args.shortcut_type, test=test, tiny=tiny)
loss = F.mean(F.softmax_cross_entropy(pred, label))
Model = namedtuple('Model', ['image', 'label', 'pred', 'loss', 'hidden'])
return Model(image, label, pred, loss, hidden)
开发者ID:zwsong,项目名称:nnabla,代码行数:21,代码来源:classification.py
示例6: test_graph_clear_buffer
def test_graph_clear_buffer(seed):
np.random.seed(313)
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4, 4])
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
t.d = rng.randint(0, 5, size=t.shape)
# Network definition
nn.set_default_context(nn.Context())
nn.clear_parameters()
x1 = x + 1
x2 = x1 - 1
with nn.parameter_scope('conv1'):
z = PF.convolution(x2, 3, (2, 2))
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
l = F.softmax_cross_entropy(z3, t, 1)
L = F.mean(l)
# Forwardprop
import tempfile
import os
tmpd = tempfile.mkdtemp()
nn.save_parameters(os.path.join(tmpd, 'parameter.h5'))
first = False
for cnng in [False, True]:
for cb in [False, True]:
_ = nn.load_parameters(os.path.join(tmpd, 'parameter.h5'))
for v in nn.get_parameters().values():
v.grad.zero()
L.forward(clear_no_need_grad=cnng)
L.backward(clear_buffer=cb)
if not first:
first = True
g = list(nn.get_parameters().values())[0].g.copy()
else:
g2 = list(nn.get_parameters().values())[0].g.copy()
assert np.all(g == g2)
开发者ID:zwsong,项目名称:nnabla,代码行数:40,代码来源:test_graph.py
示例7: ce_loss
def ce_loss(ctx, pred, y_l):
with nn.context_scope(ctx):
loss_ce = F.mean(F.softmax_cross_entropy(pred, y_l))
return loss_ce
开发者ID:kzky,项目名称:works,代码行数:4,代码来源:cnn_model_060.py
示例8: main
def main():
"""
Main script.
Steps:
* Get and set context.
* Load Dataset
* Initialize DataIterator.
* Create Networks
* Net for Labeled Data
* Net for Unlabeled Data
* Net for Test Data
* Create Solver.
* Training Loop.
* Test
* Training
* by Labeled Data
* Calculate Cross Entropy Loss
* by Unlabeled Data
* Estimate Adversarial Direction
* Calculate LDS Loss
"""
args = get_args()
# Get context.
from nnabla.contrib.context import extension_context
extension_module = args.context
if args.context is None:
extension_module = 'cpu'
logger.info("Running in %s" % extension_module)
ctx = extension_context(extension_module, device_id=args.device_id)
nn.set_default_context(ctx)
shape_x = (1, 28, 28)
n_h = args.n_units
n_y = args.n_class
# Load MNist Dataset
from mnist_data import MnistDataSource
with MnistDataSource(train=True) as d:
x_t = d.images
t_t = d.labels
with MnistDataSource(train=False) as d:
x_v = d.images
t_v = d.labels
x_t = np.array(x_t / 256.0).astype(np.float32)
x_t, t_t = x_t[:args.n_train], t_t[:args.n_train]
x_v, t_v = x_v[:args.n_valid], t_v[:args.n_valid]
# Create Semi-supervised Datasets
x_l, t_l, x_u, _ = split_dataset(x_t, t_t, args.n_labeled, args.n_class)
x_u = np.r_[x_l, x_u]
x_v = np.array(x_v / 256.0).astype(np.float32)
# Create DataIterators for datasets of labeled, unlabeled and validation
di_l = DataIterator(args.batchsize_l, [x_l, t_l])
di_u = DataIterator(args.batchsize_u, [x_u])
di_v = DataIterator(args.batchsize_v, [x_v, t_v])
# Create networks
# feed-forward-net building function
def forward(x, test=False):
return mlp_net(x, n_h, n_y, test)
# Net for learning labeled data
xl = nn.Variable((args.batchsize_l,) + shape_x, need_grad=False)
hl = forward(xl, test=False)
tl = nn.Variable((args.batchsize_l, 1), need_grad=False)
loss_l = F.mean(F.softmax_cross_entropy(hl, tl))
# Net for learning unlabeled data
xu = nn.Variable((args.batchsize_u,) + shape_x, need_grad=False)
r = nn.Variable((args.batchsize_u,) + shape_x, need_grad=True)
eps = nn.Variable((args.batchsize_u,) + shape_x, need_grad=False)
loss_u, yu = vat(xu, r, eps, forward, distance)
# Net for evaluating valiation data
xv = nn.Variable((args.batchsize_v,) + shape_x, need_grad=False)
hv = forward(xv, test=True)
tv = nn.Variable((args.batchsize_v, 1), need_grad=False)
# Create solver
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Monitor trainig and validation stats.
import nnabla.monitor as M
monitor = M.Monitor(args.model_save_path)
monitor_verr = M.MonitorSeries("Test error", monitor, interval=240)
monitor_time = M.MonitorTimeElapsed("Elapsed time", monitor, interval=240)
# Training Loop.
t0 = time.time()
for i in range(args.max_iter):
# Validation Test
if i % args.val_interval == 0:
n_error = calc_validation_error(
#.........这里部分代码省略.........
开发者ID:zwsong,项目名称:nnabla,代码行数:101,代码来源:vat.py
示例9: train
def train():
"""
Main script.
Steps:
* Parse command line arguments.
* Specify a context for computation.
* Initialize DataIterator for MNIST.
* Construct a computation graph for training and validation.
* Initialize a solver and set parameter variables to it.
* Create monitor instances for saving and displaying training stats.
* Training loop
* Computate error rate for validation data (periodically)
* Get a next minibatch.
* Execute forwardprop on the training graph.
* Compute training error
* Set parameter gradients zero
* Execute backprop.
* Solver updates parameters by using gradients computed by backprop.
"""
args = get_args()
# Get context.
from nnabla.contrib.context import extension_context
extension_module = args.context
if args.context is None:
extension_module = 'cpu'
logger.info("Running in %s" % extension_module)
ctx = extension_context(extension_module, device_id=args.device_id)
nn.set_default_context(ctx)
# Create CNN network for both training and testing.
mnist_cnn_prediction = mnist_lenet_prediction
if args.net == 'resnet':
mnist_cnn_prediction = mnist_resnet_prediction
# TRAIN
# Create input variables.
image = nn.Variable([args.batch_size, 1, 28, 28])
label = nn.Variable([args.batch_size, 1])
# Create prediction graph.
pred = mnist_cnn_prediction(image, test=False)
pred.persistent = True
# Create loss function.
loss = F.mean(F.softmax_cross_entropy(pred, label))
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, 1, 28, 28])
vlabel = nn.Variable([args.batch_size, 1])
# Create predition graph.
vpred = mnist_cnn_prediction(vimage, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
# Create monitor.
from nnabla.monitor import Monitor, MonitorSeries, MonitorTimeElapsed
monitor = Monitor(args.monitor_path)
monitor_loss = MonitorSeries("Training loss", monitor, interval=10)
monitor_err = MonitorSeries("Training error", monitor, interval=10)
monitor_time = MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = MonitorSeries("Test error", monitor, interval=10)
# Initialize DataIterator for MNIST.
data = data_iterator_mnist(args.batch_size, True)
vdata = data_iterator_mnist(args.batch_size, False)
# Training loop.
for i in range(args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(args.val_iter):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
ve += categorical_error(vpred.d, vlabel.d)
monitor_verr.add(i, ve / args.val_iter)
if i % args.model_save_interval == 0:
nn.save_parameters(os.path.join(
args.model_save_path, 'params_%06d.h5' % i))
# Training forward
image.d, label.d = data.next()
solver.zero_grad()
loss.forward(clear_no_need_grad=True)
loss.backward(clear_buffer=True)
solver.weight_decay(args.weight_decay)
solver.update()
e = categorical_error(pred.d, label.d)
monitor_loss.add(i, loss.d.copy())
monitor_err.add(i, e)
monitor_time.add(i)
ve = 0.0
for j in range(args.val_iter):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
ve += categorical_error(vpred.d, vlabel.d)
monitor_verr.add(i, ve / args.val_iter)
#.........这里部分代码省略.........
开发者ID:zwsong,项目名称:nnabla,代码行数:101,代码来源:classification.py
示例10: cifar10_resnet23_loss
def cifar10_resnet23_loss(pred, label):
loss = F.mean(F.softmax_cross_entropy(pred, label))
return loss
开发者ID:kzky,项目名称:works,代码行数:3,代码来源:cnn_model_058.py
注:本文中的nnabla.functions.softmax_cross_entropy函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论