本文整理汇总了Python中st2.cifar10.datasets.Separator类的典型用法代码示例。如果您正苦于以下问题:Python Separator类的具体用法?Python Separator怎么用?Python Separator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Separator类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: main
def main(args):
# Settings
device_id = args.device_id
batch_size = 100
batch_size_eval = 100
n_l_train_data = 4000
n_train_data = 50000
n_cls = 10
learning_rate = 1. * 1e-3
n_epoch = 300
act = F.relu
iter_epoch = n_train_data / batch_size
n_iter = n_epoch * iter_epoch
extension_module = args.context
# Model
## supervised
batch_size, m, h, w = batch_size, 3, 32, 32
ctx = extension_context(extension_module, device_id=device_id)
x_l = nn.Variable((batch_size, m, h, w))
y_l = nn.Variable((batch_size, 1))
pred = cnn_model_003(ctx, x_l)
loss_ce = ce_loss(ctx, pred, y_l)
loss_er = er_loss(ctx, pred)
loss_supervised = loss_ce + loss_er
## stochastic regularization
x_u0 = nn.Variable((batch_size, m, h, w), need_grad=False)
x_u1 = nn.Variable((batch_size, m, h, w), need_grad=False)
pred_x_u0 = cnn_model_003(ctx, x_u0)
pred_x_u1 = cnn_model_003(ctx, x_u1)
loss_sr = sr_loss(ctx, pred_x_u0, pred_x_u1)
loss_er0 = er_loss(ctx, pred_x_u0)
loss_er1 = er_loss(ctx, pred_x_u1)
loss_unsupervised = loss_sr + loss_er0 + loss_er1
## autoencoder
path = args.model_path
nn.load_parameters(path)
x_u0_rc = cnn_ae_model_000(ctx, x_u0, act=F.relu, test=True)
x_u1_rc = cnn_ae_model_000(ctx, x_u1, act=F.relu, test=True)
x_u0_rc.need_grad = False
x_u1_rc.need_grad = False
pred_x_u0_rc = cnn_model_003(ctx, x_u0_rc, test=False)
pred_x_u1_rc = cnn_model_003(ctx, x_u1_rc, test=False)
loss_sr_rc = sr_loss(ctx, pred_x_u0_rc, pred_x_u1_rc)
loss_er0_rc = er_loss(ctx, pred_x_u0_rc)
loss_er1_rc = er_loss(ctx, pred_x_u1_rc)
loss_unsupervised_rc = loss_sr_rc + loss_er0_rc + loss_er1_rc
loss_unsupervised += loss_unsupervised_rc
## evaluate
batch_size_eval, m, h, w = batch_size, 3, 32, 32
x_eval = nn.Variable((batch_size_eval, m, h, w))
pred_eval = cnn_model_003(ctx, x_eval, test=True)
# Solver
with nn.context_scope(ctx):
solver = S.Adam(alpha=learning_rate)
solver.set_parameters(nn.get_parameters())
# Dataset
## separate dataset
home = os.environ.get("HOME")
fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz")
separator = Separator(n_l_train_data)
separator.separate_then_save(fpath)
l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz")
u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
# data reader
data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path,
batch_size=batch_size,
n_cls=n_cls,
da=True,
shape=True)
# Training loop
print("# Training loop")
epoch = 1
st = time.time()
acc_prev = 0.
for i in range(n_iter):
# Get data and set it to the varaibles
x_l0_data, x_l1_data, y_l_data = data_reader.get_l_train_batch()
x_u0_data, x_u1_data, y_u_data = data_reader.get_u_train_batch()
x_l.d, _ , y_l.d= x_l0_data, x_l1_data, y_l_data
x_u0.d, x_u1.d= x_u0_data, x_u1_data
# Train
loss_supervised.forward(clear_no_need_grad=True)
solver.zero_grad()
loss_supervised.backward(clear_buffer=True)
solver.update()
loss_unsupervised.forward(clear_no_need_grad=True)
solver.zero_grad()
loss_unsupervised.backward(clear_buffer=True)
#.........这里部分代码省略.........
开发者ID:kzky,项目名称:works,代码行数:101,代码来源:exp016.py
示例2: main
def main(args):
# Settings
device_id = args.device_id
batch_size = args.batch_size
batch_size_eval = args.batch_size_eval
n_l_train_data = 4000
n_train_data = 50000
n_cls = 10
learning_rate = 1. * 1e-3
n_epoch = 300
act = F.relu
iter_epoch = n_train_data / batch_size
n_iter = n_epoch * iter_epoch
extension_module = args.context
# Model
## supervised
batch_size, m, h, w = batch_size, 3, 32, 32
ctx = extension_context(extension_module, device_id=device_id)
x_l = nn.Variable((batch_size, m, h, w))
y_l = nn.Variable((batch_size, 1))
pred, log_var = cnn_model_003(ctx, x_l)
one = F.constant(1., log_var.shape)
loss_ce = ce_loss_with_uncertainty(ctx, pred, y_l, log_var)
reg_sigma = sigma_regularization(ctx, log_var, one)
loss_supervised = loss_ce + reg_sigma
## stochastic regularization
x_u0 = nn.Variable((batch_size, m, h, w))
x_u1 = nn.Variable((batch_size, m, h, w))
pred_x_u0, log_var0 = cnn_model_003(ctx, x_u0)
pred_x_u1, log_var1 = cnn_model_003(ctx, x_u1)
loss_sr = sr_loss_with_uncertainty(ctx,
pred_x_u0, pred_x_u1, log_var0, log_var1)
loss_er0 = er_loss(ctx, pred_x_u0)
loss_er1 = er_loss(ctx, pred_x_u1)
reg_sigma0 = sigma_regularization(ctx, log_var0, one)
reg_sigma1 = sigma_regularization(ctx, log_var1, one)
loss_unsupervised = loss_sr + loss_er0 + loss_er1 \
+ reg_sigma0 + reg_sigma1
## evaluate
batch_size_eval, m, h, w = batch_size, 3, 32, 32
x_eval = nn.Variable((batch_size_eval, m, h, w))
pred_eval, _ = cnn_model_003(ctx, x_eval, test=True)
# Solver
with nn.context_scope(ctx):
solver_l= S.Adam(alpha=learning_rate)
solver_l.set_parameters(nn.get_parameters())
solver_u= S.Adam(alpha=learning_rate)
solver_u.set_parameters(nn.get_parameters())
# Dataset
## separate dataset
home = os.environ.get("HOME")
fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz")
separator = Separator(n_l_train_data)
separator.separate_then_save(fpath)
l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz")
u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
# data reader
data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path,
batch_size=batch_size,
n_cls=n_cls,
da=True,
shape=True)
# Training loop
print("# Training loop")
epoch = 1
st = time.time()
acc_prev = 0.
for i in range(n_iter):
# Get data and set it to the varaibles
x_l0_data, x_l1_data, y_l_data = data_reader.get_l_train_batch()
x_u0_data, x_u1_data, y_u_data = data_reader.get_u_train_batch()
x_l.d, _ , y_l.d= x_l0_data, x_l1_data, y_l_data
x_u0.d, x_u1.d= x_u0_data, x_u1_data
# Train
## for supervised loss
loss_supervised.forward(clear_no_need_grad=True)
solver_l.zero_grad()
loss_supervised.backward(clear_buffer=True)
solver_l.update()
## for unsupervised loss
loss_unsupervised.forward(clear_no_need_grad=True)
solver_u.zero_grad()
loss_unsupervised.backward(clear_buffer=True)
solver_u.update()
# Evaluate
if (i+1) % iter_epoch == 0:
# Get data and set it to the varaibles
x_data, y_data = data_reader.get_test_batch()
#.........这里部分代码省略.........
开发者ID:kzky,项目名称:works,代码行数:101,代码来源:exp037.py
示例3: main
def main(args):
# Settings
device_id = args.device_id
batch_sizes = [16, 32, 64]
batch_size_eval = 64
c, h, w = 3, 32, 32
n_l_train_data = 4000
n_train_data = 50000
n_cls = 10
learning_rate = 1. * 1e-3
n_epoch = 300
act = F.relu
iter_epoch = n_train_data / int(np.mean(batch_sizes)) # approximate epoch
n_iter = n_epoch * iter_epoch
extension_module = args.context
# Model (Batch-Stochastic)
ctx = extension_context(extension_module, device_id=device_id)
## supervised
x_list, y_list, preds, losses_ce = batch_stochastic_supervised_network(
ctx, batch_sizes, c, h, w)
## stochastic regularization
x0_list, x1_list, _, losses_sr = batch_stochastic_unsupervised_network(
ctx, batch_sizes, c, h, w)
## evaluate
batch_size_eval, m, h, w = batch_size_eval, c, h, w
x_eval = nn.Variable((batch_size_eval, m, h, w))
pred_eval = cnn_model_003(ctx, x_eval, test=True)
# Solver
with nn.context_scope(ctx):
solver = S.Adam(alpha=learning_rate)
solver.set_parameters(nn.get_parameters())
# Dataset
## separate dataset
home = os.environ.get("HOME")
fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz")
separator = Separator(n_l_train_data)
separator.separate_then_save(fpath)
l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz")
u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
# data reader
data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path,
batch_size=batch_sizes[0],
n_cls=n_cls,
da=True,
shape=True)
# Training loop
print("# Training loop")
epoch = 1
st = time.time()
acc_prev = 0.
iter_ = 0
for i in range(n_iter):
idx = np.random.choice(np.arange(0, len(batch_sizes)))
idx_u = np.random.choice(np.arange(0, len(batch_sizes)))
# Get data
bs = batch_sizes[idx]
bs_u = batch_sizes[idx_u]
x_l0_data, x_l1_data, y_l_data = data_reader.get_l_train_batch(bs)
x_u0_data, x_u1_data, y_u_data = data_reader.get_u_train_batch(bs_u)
# Set it to the varaibles
x_l = x_list[idx]
y_l = y_list[idx]
x_u0 = x0_list[idx_u]
x_u1 = x1_list[idx_u]
x_l.d, _ , y_l.d= x_l0_data, x_l1_data, y_l_data
x_u0.d, x_u1.d= x_u0_data, x_u1_data
# Train
loss_ce = losses_ce[idx]
loss_sr = losses_sr[idx_u]
loss_ce.forward(clear_no_need_grad=True)
loss_sr.forward(clear_no_need_grad=True)
solver.zero_grad()
loss_ce.backward(clear_buffer=True)
loss_sr.backward(clear_buffer=True)
solver.update()
# Evaluate
if (i+1) % iter_epoch == 0: # approximate epoch
# Get data and set it to the varaibles
x_data, y_data = data_reader.get_test_batch()
# Evaluation loop
ve = 0.
iter_val = 0
for k in range(0, len(x_data), batch_size_eval):
x_eval.d = get_test_data(x_data, k, batch_size_eval)
label = get_test_data(y_data, k, batch_size_eval)
pred_eval.forward(clear_buffer=True)
ve += categorical_error(pred_eval.d, label)
#.........这里部分代码省略.........
开发者ID:kzky,项目名称:works,代码行数:101,代码来源:exp023.py
示例4: main
def main(args):
# Settings
device_id = args.device_id
batch_size = args.batch_size
batch_size_eval = args.batch_size_eval
n_l_train_data = 4000
n_train_data = 50000
n_cls = 10
learning_rate = 1. * 1e-3
n_epoch = 300
act = F.relu
iter_epoch = int(n_train_data / batch_size)
n_iter = n_epoch * iter_epoch
extension_module = args.context
# Model
views = [global_view, spatial_view, feature_view]
## supervised
batch_size, m, h, w = batch_size, 3, 32, 32
ctx = extension_context(extension_module, device_id=device_id)
x_l = nn.Variable((batch_size, m, h, w))
y_l = nn.Variable((batch_size, 1))
feature = cnn_model_003(ctx, x_l)
loss_supervised = []
for view in views:
pred = view(ctx, feature)
loss_ce = ce_loss(ctx, pred, y_l)
loss_er = er_loss(ctx, pred)
loss_supervised += [loss_ce, loss_er]
loss_supervised = reduce(lambda x, y: x+y, loss_supervised)
## cross view loss
x_u0 = nn.Variable((batch_size, m, h, w))
x_u1 = nn.Variable((batch_size, m, h, w))
feature_x_u0 = cnn_model_003(ctx, x_u0)
feature_x_u1 = cnn_model_003(ctx, x_u1)
pred_x_u0 = []
pred_x_u1 = []
loss_er = []
loss_unsupervised = []
for view in views:
pred = view(ctx, feature_x_u0)
pred_x_u0 += [pred]
loss_er +=[er_loss(ctx, pred)]
pred = view(ctx, feature_x_u1)
pred_x_u1 += [pred]
loss_er += [er_loss(ctx, pred)]
for pred_a, pred_b in itertools.product(pred_x_u0, pred_x_u1): # multi-view
if pred_a == pred_b:
continue
loss_unsupervised += [sr_loss(ctx, pred_a, pred_b)]
loss_unsupervised = reduce(lambda x, y: x+y, loss_unsupervised) \
+ reduce(lambda x, y: x+y, loss_er)
## evaluate
batch_size_eval, m, h, w = batch_size, 3, 32, 32
x_eval = nn.Variable((batch_size_eval, m, h, w))
feature_eval = cnn_model_003(ctx, x_eval, test=True)
pred_eval = []
for view in views:
pred_eval += [view(ctx, feature_eval)]
# Solver
with nn.context_scope(ctx):
solver = S.Adam(alpha=learning_rate)
solver.set_parameters(nn.get_parameters())
# Dataset
## separate dataset
home = os.environ.get("HOME")
fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz")
separator = Separator(n_l_train_data)
separator.separate_then_save(fpath)
l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz")
u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
# data reader
data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path,
batch_size=batch_size,
n_cls=n_cls,
da=True,
shape=True)
# Training loop
print("# Training loop")
epoch = 1
st = time.time()
acc_prev = 0.
ve_best = 1.
save_path_prev = ""
for i in range(n_iter):
# Get data and set it to the varaibles
x_l0_data, x_l1_data, y_l_data = data_reader.get_l_train_batch()
x_u0_data, x_u1_data, y_u_data = data_reader.get_u_train_batch()
x_l.d, _ , y_l.d= x_l0_data, x_l1_data, y_l_data
x_u0.d, x_u1.d= x_u0_data, x_u1_data
#.........这里部分代码省略.........
开发者ID:kzky,项目名称:works,代码行数:101,代码来源:exp005_003.py
示例5: main
def main(args):
# Settings
device_id = args.device_id
batch_size = args.batch_size
batch_size_eval = args.batch_size_eval
n_l_train_data = 4000
n_train_data = 50000
n_cls = 10
learning_rate = 1. * 1e-3
n_epoch = 300
act = F.swish
iter_epoch = int(n_train_data / batch_size)
n_iter = n_epoch * iter_epoch
extension_module = args.context
# Model
## supervised
batch_size, m, h, w = batch_size, 3, 32, 32
ctx = extension_context(extension_module, device_id=device_id)
x_l = nn.Variable((batch_size, m, h, w))
y_l = nn.Variable((batch_size, 1))
pred = cnn_model_003(ctx, x_l)
loss_ce = ce_loss(ctx, pred, y_l)
loss_er = er_loss(ctx, pred)
loss_supervised = loss_ce + loss_er
## stochastic regularization
x_u0 = nn.Variable((batch_size, m, h, w))
x_u1 = nn.Variable((batch_size, m, h, w))
pred_x_u0 = cnn_model_003(ctx, x_u0)
pred_x_u1 = cnn_model_003(ctx, x_u1)
loss_sr = sr_loss(ctx, pred_x_u0, pred_x_u1)
loss_er0 = er_loss(ctx, pred_x_u0)
loss_er1 = er_loss(ctx, pred_x_u1)
loss_unsupervised = loss_sr + loss_er0 + loss_er1
## evaluate
batch_size_eval, m, h, w = batch_size, 3, 32, 32
x_eval = nn.Variable((batch_size_eval, m, h, w))
pred_eval = cnn_model_003(ctx, x_eval, test=True)
# Solver
with nn.context_scope(ctx):
solver = S.Adam(alpha=learning_rate)
solver.set_parameters(nn.get_parameters())
# Dataset
## separate dataset
home = os.environ.get("HOME")
fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz")
separator = Separator(n_l_train_data)
separator.separate_then_save(fpath)
l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz")
u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
# data reader
data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path,
batch_size=batch_size,
n_cls=n_cls,
da=False,
shape=True)
# Training loop
print("# Training loop")
epoch = 1
st = time.time()
acc_prev = 0.
ve_best = 1.
save_path_prev = ""
for i in range(n_iter):
# Get data and set it to the varaibles
x_l0_data, x_l1_data, y_l_data = data_reader.get_l_train_batch()
x_u0_data, x_u1_data, y_u_data = data_reader.get_u_train_batch()
x_l.d, _ , y_l.d= x_l0_data, x_l1_data, y_l_data
x_u0.d, x_u1.d= x_u0_data, x_u1_data
# Train
loss_supervised.forward(clear_no_need_grad=True)
loss_unsupervised.forward(clear_no_need_grad=True)
solver.zero_grad()
loss_supervised.backward(clear_buffer=True)
loss_unsupervised.backward(clear_buffer=True)
solver.update()
# Evaluate
if int((i+1) % iter_epoch) == 0:
# Get data and set it to the varaibles
x_data, y_data = data_reader.get_test_batch()
# Evaluation loop
ve = 0.
iter_val = 0
for k in range(0, len(x_data), batch_size_eval):
x_eval.d = get_test_data(x_data, k, batch_size_eval)
label = get_test_data(y_data, k, batch_size_eval)
pred_eval.forward(clear_buffer=True)
ve += categorical_error(pred_eval.d, label)
#.........这里部分代码省略.........
开发者ID:kzky,项目名称:works,代码行数:101,代码来源:exp005_002.py
示例6: main
def main(args):
# Settings
device_id = args.device_id
batch_size = args.batch_size
batch_size_eval = args.batch_size_eval
n_l_train_data = 4000
n_train_data = 50000
n_cls = 10
learning_rate = 1. * 1e-3
n_epoch = 300
act = F.relu
iter_epoch = n_train_data / batch_size
n_iter = n_epoch * iter_epoch
extension_module = args.context
# Model
## supervised cnn
batch_size, m, h, w = batch_size, 3, 32, 32
ctx = extension_context(extension_module, device_id=device_id)
x_l = nn.Variable((batch_size, m, h, w))
x_l.persistent = True
y_l = nn.Variable((batch_size, 1))
y_l.persistent = True
pred = cnn_model_003(ctx, "cnn", x_l)
loss_ce = ce_loss(ctx, pred, y_l)
loss_er = er_loss(ctx, pred)
loss_supervised = loss_ce + loss_er
## supervised resnet
pred_res = cifar10_resnet23_prediction(ctx, "resnet", x_l)
loss_res_ce = ce_loss(ctx, pred_res, y_l)
loss_res_supervised = loss_res_ce
## stochastic regularization for cnn
x_u0 = nn.Variable((batch_size, m, h, w))
x_u0.persistent = True
x_u1 = nn.Variable((batch_size, m, h, w))
pred_x_u0 = cnn_model_003(ctx, "cnn", x_u0)
pred_x_u0.persistent = True
pred_x_u1 = cnn_model_003(ctx, "cnn", x_u1)
loss_sr = sr_loss(ctx, pred_x_u0, pred_x_u1)
loss_er0 = er_loss(ctx, pred_x_u0)
loss_er1 = er_loss(ctx, pred_x_u1)
loss_unsupervised = loss_sr + loss_er0 + loss_er1
## knowledge transfer for resnet
pred_res_x_u0 = cifar10_resnet23_prediction(ctx, "resnet", x_u0)
loss_res_unsupervised = kl_divergence(ctx, pred_res_x_u0, pred_x_u0)
## evaluate
batch_size_eval, m, h, w = batch_size, 3, 32, 32
x_eval = nn.Variable((batch_size_eval, m, h, w))
x_eval.persistent = True # reused
pred_eval = cnn_model_003(ctx, "cnn", x_eval, test=True)
pred_res_eval = cifar10_resnet23_prediction(ctx, "resnet", x_eval, test=True)
# Solver
with nn.context_scope(ctx):
with nn.parameter_scope("cnn"):
solver = S.Adam(alpha=learning_rate)
solver.set_parameters(nn.get_parameters())
with nn.parameter_scope("resnet"):
solver_res = S.Adam(alpha=learning_rate)
solver_res.set_parameters(nn.get_parameters())
# Dataset
## separate dataset
home = os.environ.get("HOME")
fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz")
separator = Separator(n_l_train_data)
separator.separate_then_save(fpath)
l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz")
u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
# data reader
data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path,
batch_size=batch_size,
n_cls=n_cls,
da=True,
shape=True)
# Training loop
print("# Training loop")
epoch = 1
st = time.time()
acc_prev = 0.
for i in range(n_iter):
# Get data and set it to the varaibles
x_l0_data, x_l1_data, y_l_data = data_reader.get_l_train_batch()
x_u0_data, x_u1_data, y_u_data = data_reader.get_u_train_batch()
x_l.d, _ , y_l.d= x_l0_data, x_l1_data, y_l_data
x_u0.d, x_u1.d= x_u0_data, x_u1_data
# Train for cnn
loss_supervised.forward(clear_no_need_grad=True)
loss_unsupervised.forward(clear_no_need_grad=True)
solver.zero_grad()
#.........这里部分代码省略.........
开发者ID:kzky,项目名称:works,代码行数:101,代码来源:exp059.py
示例7: main
def main(args):
# Settings
device_id = args.device_id
batch_size = args.batch_size
batch_size_eval = args.batch_size_eval
n_l_train_data = 4000
n_train_data = 50000
n_cls = 10
learning_rate = 1. * 1e-3
n_epoch = 300
act = F.relu
iter_epoch = int(n_train_data / batch_size)
n_iter = n_epoch * iter_epoch
extension_module = args.context
alpha = args.alpha
# Supervised Model
## ERM
batch_size, m, h, w = batch_size, 3, 32, 32
ctx = extension_context(extension_module, device_id=device_id)
x_l_0 = nn.Variable((batch_size, m, h, w))
y_l_0 = nn.Variable((batch_size, 1))
pred = cnn_model_003(ctx, x_l_0)
loss_ce = ce_loss(ctx, pred, y_l_0)
loss_er = er_loss(ctx, pred)
loss_supervised = loss_ce + loss_er
## VRM (mixup)
x_l_1 = nn.Variable((batch_size, m, h, w))
y_l_1 = nn.Variable((batch_size, 1))
coef = nn.Variable()
coef_b = F.broadcast(coef.reshape([1]*x_l_0.ndim, unlink=True), x_l_0.shape)
x_l_m = coef_b * x_l_0 + (1 - coef_b) * x_l_1
coef_b = F.broadcast(coef.reshape([1]*pred.ndim, unlink=True), pred.shape)
y_l_m = coef_b * F.one_hot(y_l_0, (n_cls, )) \
+ (1-coef_b) * F.one_hot(y_l_1, (n_cls, ))
x_l_m.need_grad, y_l_m.need_grad = False, False
pred_m = cnn_model_003(ctx, x_l_m)
loss_er_m = er_loss(ctx, pred_m) #todo: need?
loss_ce_m = ce_loss_soft(ctx, pred, y_l_m)
loss_supervised_m = loss_ce_m #+ loss_er_m
# Semi-Supervised Model
## ERM
x_u0 = nn.Variable((batch_size, m, h, w))
x_u1 = nn.Variable((batch_size, m, h, w))
pred_x_u0 = cnn_model_003(ctx, x_u0)
pred_x_u1 = cnn_model_003(ctx, x_u1)
pred_x_u0.persistent, pred_x_u1.persistent = True, True
loss_sr = sr_loss(ctx, pred_x_u0, pred_x_u1)
loss_er0 = er_loss(ctx, pred_x_u0)
loss_er1 = er_loss(ctx, pred_x_u1)
loss_unsupervised = loss_sr + loss_er0 + loss_er1
## VRM (mixup)
x_u2 = nn.Variable((batch_size, m, h, w)) # not to overwrite x_u1.d
coef_u = nn.Variable()
coef_u_b = F.broadcast(coef_u.reshape([1]*x_u0.ndim, unlink=True), x_u0.shape)
x_u_m = coef_u_b * x_u0 + (1-coef_u_b) * x_u2
pred_x_u0_ = nn.Variable(pred_x_u0.shape) # unlink forward pass but reuse result
pred_x_u1_ = nn.Variable(pred_x_u1.shape)
pred_x_u0_.data = pred_x_u0.data
pred_x_u1_.data = pred_x_u1.data
coef_u_b = F.broadcast(coef_u.reshape([1]*pred_x_u0.ndim, unlink=True), pred_x_u0.shape)
y_u_m = coef_u_b * pred_x_u0_ + (1-coef_u_b) * pred_x_u1_
x_u_m.need_grad, y_u_m.need_grad = False, False
pred_x_u_m = cnn_model_003(ctx, x_u_m)
loss_er_u_m = er_loss(ctx, pred_x_u_m) #todo: need?
loss_ce_u_m = ce_loss_soft(ctx, pred_x_u_m, y_u_m)
loss_unsupervised_m = loss_ce_u_m #+ loss_er_u_m
# Evaluatation Model
batch_size_eval, m, h, w = batch_size, 3, 32, 32
x_eval = nn.Variable((batch_size_eval, m, h, w))
pred_eval = cnn_model_003(ctx, x_eval, test=True)
# Solver
with nn.context_scope(ctx):
solver = S.Adam(alpha=learning_rate)
solver.set_parameters(nn.get_parameters())
# Dataset
## separate dataset
home = os.environ.get("HOME")
fpath = os.path.join(home, "datasets/cifar10/cifar-10.npz")
separator = Separator(n_l_train_data)
separator.separate_then_save(fpath)
l_train_path = os.path.join(home, "datasets/cifar10/l_cifar-10.npz")
u_train_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
test_path = os.path.join(home, "datasets/cifar10/cifar-10.npz")
# data reader
data_reader = Cifar10DataReader(l_train_path, u_train_path, test_path,
batch_size=batch_size,
n_cls=n_cls,
da=True,
shape=True)
# Training loop
print("# Training loop")
epoch = 1
#.........这里部分代码省略.........
开发者ID:kzky,项目名称:works,代码行数:101,代码来源:exp005_001.py
注:本文中的st2.cifar10.datasets.Separator类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论