本文整理汇总了Python中pyro.clear_param_store函数的典型用法代码示例。如果您正苦于以下问题:Python clear_param_store函数的具体用法?Python clear_param_store怎么用?Python clear_param_store使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clear_param_store函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_elbo_nonreparameterized
def test_elbo_nonreparameterized(self):
pyro.clear_param_store()
def model():
p_latent = pyro.sample("p_latent", dist.beta, self.alpha0, self.beta0)
pyro.map_data("aaa",
self.data, lambda i, x: pyro.observe(
"obs_{}".format(i), dist.bernoulli, x, p_latent),
batch_size=self.batch_size)
return p_latent
def guide():
alpha_q_log = pyro.param("alpha_q_log",
Variable(self.log_alpha_n.data + 0.17, requires_grad=True))
beta_q_log = pyro.param("beta_q_log",
Variable(self.log_beta_n.data - 0.143, requires_grad=True))
alpha_q, beta_q = torch.exp(alpha_q_log), torch.exp(beta_q_log)
pyro.sample("p_latent", dist.beta, alpha_q, beta_q)
pyro.map_data("aaa", self.data, lambda i, x: None, batch_size=self.batch_size)
adam = optim.Adam({"lr": .001, "betas": (0.97, 0.999)})
svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)
for k in range(10001):
svi.step()
alpha_error = param_abs_error("alpha_q_log", self.log_alpha_n)
beta_error = param_abs_error("beta_q_log", self.log_beta_n)
self.assertEqual(0.0, alpha_error, prec=0.08)
self.assertEqual(0.0, beta_error, prec=0.08)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:31,代码来源:test_inference.py
示例2: do_elbo_test
def do_elbo_test(self, reparameterized, n_steps):
pyro.clear_param_store()
pt_guide = LogNormalNormalGuide(self.log_mu_n.data + 0.17,
self.log_tau_n.data - 0.143)
def model():
mu_latent = pyro.sample("mu_latent", dist.normal,
self.mu0, torch.pow(self.tau0, -0.5))
sigma = torch.pow(self.tau, -0.5)
pyro.observe("obs0", dist.lognormal, self.data[0], mu_latent, sigma)
pyro.observe("obs1", dist.lognormal, self.data[1], mu_latent, sigma)
return mu_latent
def guide():
pyro.module("mymodule", pt_guide)
mu_q, tau_q = torch.exp(pt_guide.mu_q_log), torch.exp(pt_guide.tau_q_log)
sigma = torch.pow(tau_q, -0.5)
pyro.sample("mu_latent", dist.Normal(mu_q, sigma, reparameterized=reparameterized))
adam = optim.Adam({"lr": .0005, "betas": (0.96, 0.999)})
svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)
for k in range(n_steps):
svi.step()
mu_error = param_abs_error("mymodule$$$mu_q_log", self.log_mu_n)
tau_error = param_abs_error("mymodule$$$tau_q_log", self.log_tau_n)
self.assertEqual(0.0, mu_error, prec=0.07)
self.assertEqual(0.0, tau_error, prec=0.07)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:29,代码来源:test_inference.py
示例3: test_module_nn
def test_module_nn(nn_module):
pyro.clear_param_store()
nn_module = nn_module()
assert pyro.get_param_store()._params == {}
pyro.module("module", nn_module)
for name in pyro.get_param_store().get_all_param_names():
assert pyro.params.user_param_name(name) in nn_module.state_dict().keys()
开发者ID:lewisKit,项目名称:pyro,代码行数:7,代码来源:test_module.py
示例4: test_gmm_iter_discrete_traces
def test_gmm_iter_discrete_traces(data_size, graph_type, model):
pyro.clear_param_store()
data = torch.arange(0, data_size)
model = config_enumerate(model)
traces = list(iter_discrete_traces(graph_type, model, data=data, verbose=True))
# This non-vectorized version is exponential in data_size:
assert len(traces) == 2**data_size
开发者ID:lewisKit,项目名称:pyro,代码行数:7,代码来源:test_enum.py
示例5: main
def main(args):
pyro.clear_param_store()
data = build_linear_dataset(N, p)
if args.cuda:
# make tensors and modules CUDA
data = data.cuda()
softplus.cuda()
regression_model.cuda()
for j in range(args.num_epochs):
if args.batch_size == N:
# use the entire data set
epoch_loss = svi.step(data)
else:
# mini batch
epoch_loss = 0.0
perm = torch.randperm(N) if not args.cuda else torch.randperm(N).cuda()
# shuffle data
data = data[perm]
# get indices of each batch
all_batches = get_batch_indices(N, args.batch_size)
for ix, batch_start in enumerate(all_batches[:-1]):
batch_end = all_batches[ix + 1]
batch_data = data[batch_start: batch_end]
epoch_loss += svi.step(batch_data)
if j % 100 == 0:
print("epoch avg loss {}".format(epoch_loss/float(N)))
开发者ID:lewisKit,项目名称:pyro,代码行数:26,代码来源:bayesian_regression.py
示例6: test_bern_elbo_gradient
def test_bern_elbo_gradient(enum_discrete, trace_graph):
pyro.clear_param_store()
num_particles = 2000
def model():
p = Variable(torch.Tensor([0.25]))
pyro.sample("z", dist.Bernoulli(p))
def guide():
p = pyro.param("p", Variable(torch.Tensor([0.5]), requires_grad=True))
pyro.sample("z", dist.Bernoulli(p))
print("Computing gradients using surrogate loss")
Elbo = TraceGraph_ELBO if trace_graph else Trace_ELBO
elbo = Elbo(enum_discrete=enum_discrete,
num_particles=(1 if enum_discrete else num_particles))
with xfail_if_not_implemented():
elbo.loss_and_grads(model, guide)
params = sorted(pyro.get_param_store().get_all_param_names())
assert params, "no params found"
actual_grads = {name: pyro.param(name).grad.clone() for name in params}
print("Computing gradients using finite difference")
elbo = Trace_ELBO(num_particles=num_particles)
expected_grads = finite_difference(lambda: elbo.loss(model, guide))
for name in params:
print("{} {}{}{}".format(name, "-" * 30, actual_grads[name].data,
expected_grads[name].data))
assert_equal(actual_grads, expected_grads, prec=0.1)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:30,代码来源:test_enum.py
示例7: test_iter_discrete_traces_vector
def test_iter_discrete_traces_vector(graph_type):
pyro.clear_param_store()
def model():
p = pyro.param("p", Variable(torch.Tensor([[0.05], [0.15]])))
ps = pyro.param("ps", Variable(torch.Tensor([[0.1, 0.2, 0.3, 0.4],
[0.4, 0.3, 0.2, 0.1]])))
x = pyro.sample("x", dist.Bernoulli(p))
y = pyro.sample("y", dist.Categorical(ps, one_hot=False))
assert x.size() == (2, 1)
assert y.size() == (2, 1)
return dict(x=x, y=y)
traces = list(iter_discrete_traces(graph_type, model))
p = pyro.param("p").data
ps = pyro.param("ps").data
assert len(traces) == 2 * ps.size(-1)
for scale, trace in traces:
x = trace.nodes["x"]["value"].data.squeeze().long()[0]
y = trace.nodes["y"]["value"].data.squeeze().long()[0]
expected_scale = torch.exp(dist.Bernoulli(p).log_pdf(x) *
dist.Categorical(ps, one_hot=False).log_pdf(y))
expected_scale = expected_scale.data.view(-1)[0]
assert_equal(scale, expected_scale)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:26,代码来源:test_enum.py
示例8: assert_ok
def assert_ok(model, guide, elbo):
"""
Assert that inference works without warnings or errors.
"""
pyro.clear_param_store()
inference = SVI(model, guide, Adam({"lr": 1e-6}), elbo)
inference.step()
开发者ID:lewisKit,项目名称:pyro,代码行数:7,代码来源:test_valid_models.py
示例9: test_elbo_bern
def test_elbo_bern(quantity, enumerate1):
pyro.clear_param_store()
num_particles = 1 if enumerate1 else 10000
prec = 0.001 if enumerate1 else 0.1
q = pyro.param("q", torch.tensor(0.5, requires_grad=True))
kl = kl_divergence(dist.Bernoulli(q), dist.Bernoulli(0.25))
def model():
with pyro.iarange("particles", num_particles):
pyro.sample("z", dist.Bernoulli(0.25).expand_by([num_particles]))
@config_enumerate(default=enumerate1)
def guide():
q = pyro.param("q")
with pyro.iarange("particles", num_particles):
pyro.sample("z", dist.Bernoulli(q).expand_by([num_particles]))
elbo = TraceEnum_ELBO(max_iarange_nesting=1,
strict_enumeration_warning=any([enumerate1]))
if quantity == "loss":
actual = elbo.loss(model, guide) / num_particles
expected = kl.item()
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected),
"\n actual = {}".format(actual),
]))
else:
elbo.loss_and_grads(model, guide)
actual = q.grad / num_particles
expected = grad(kl, [q])[0]
assert_equal(actual, expected, prec=prec, msg="".join([
"\nexpected = {}".format(expected.detach().cpu().numpy()),
"\n actual = {}".format(actual.detach().cpu().numpy()),
]))
开发者ID:lewisKit,项目名称:pyro,代码行数:35,代码来源:test_enum.py
示例10: test_dynamic_lr
def test_dynamic_lr(scheduler, num_steps):
pyro.clear_param_store()
def model():
sample = pyro.sample('latent', Normal(torch.tensor(0.), torch.tensor(0.3)))
return pyro.sample('obs', Normal(sample, torch.tensor(0.2)), obs=torch.tensor(0.1))
def guide():
loc = pyro.param('loc', torch.tensor(0.))
scale = pyro.param('scale', torch.tensor(0.5))
pyro.sample('latent', Normal(loc, scale))
svi = SVI(model, guide, scheduler, loss=TraceGraph_ELBO())
for epoch in range(2):
scheduler.set_epoch(epoch)
for _ in range(num_steps):
svi.step()
if epoch == 1:
loc = pyro.param('loc')
scale = pyro.param('scale')
opt = scheduler.optim_objs[loc].optimizer
assert opt.state_dict()['param_groups'][0]['lr'] == 0.02
assert opt.state_dict()['param_groups'][0]['initial_lr'] == 0.01
opt = scheduler.optim_objs[scale].optimizer
assert opt.state_dict()['param_groups'][0]['lr'] == 0.02
assert opt.state_dict()['param_groups'][0]['initial_lr'] == 0.01
开发者ID:lewisKit,项目名称:pyro,代码行数:26,代码来源:test_optim.py
示例11: test_dirichlet_bernoulli
def test_dirichlet_bernoulli(Elbo, vectorized):
pyro.clear_param_store()
data = torch.tensor([1.0] * 6 + [0.0] * 4)
def model1(data):
concentration0 = torch.tensor([10.0, 10.0])
f = pyro.sample("latent_fairness", dist.Dirichlet(concentration0))[1]
for i in pyro.irange("irange", len(data)):
pyro.sample("obs_{}".format(i), dist.Bernoulli(f), obs=data[i])
def model2(data):
concentration0 = torch.tensor([10.0, 10.0])
f = pyro.sample("latent_fairness", dist.Dirichlet(concentration0))[1]
pyro.sample("obs", dist.Bernoulli(f).expand_by(data.shape).independent(1),
obs=data)
model = model2 if vectorized else model1
def guide(data):
concentration_q = pyro.param("concentration_q", torch.tensor([15.0, 15.0]),
constraint=constraints.positive)
pyro.sample("latent_fairness", dist.Dirichlet(concentration_q))
elbo = Elbo(num_particles=7, strict_enumeration_warning=False)
optim = Adam({"lr": 0.0005, "betas": (0.90, 0.999)})
svi = SVI(model, guide, optim, elbo)
for step in range(40):
svi.step(data)
开发者ID:lewisKit,项目名称:pyro,代码行数:28,代码来源:test_jit.py
示例12: test_gmm_batch_iter_discrete_traces
def test_gmm_batch_iter_discrete_traces(model, data_size, graph_type):
pyro.clear_param_store()
data = torch.arange(0, data_size)
model = config_enumerate(model)
traces = list(iter_discrete_traces(graph_type, model, data=data))
# This vectorized version is independent of data_size:
assert len(traces) == 2
开发者ID:lewisKit,项目名称:pyro,代码行数:7,代码来源:test_enum.py
示例13: test_random_module
def test_random_module(self):
pyro.clear_param_store()
lifted_tr = poutine.trace(pyro.random_module("name", self.model, prior=self.prior)).get_trace()
for name in lifted_tr.nodes.keys():
if lifted_tr.nodes[name]["type"] == "param":
assert lifted_tr.nodes[name]["type"] == "sample"
assert not lifted_tr.nodes[name]["is_observed"]
开发者ID:lewisKit,项目名称:pyro,代码行数:7,代码来源:test_poutines.py
示例14: test_elbo_hmm_in_guide
def test_elbo_hmm_in_guide(enumerate1, num_steps):
pyro.clear_param_store()
data = torch.ones(num_steps)
init_probs = torch.tensor([0.5, 0.5])
def model(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
emission_probs = pyro.param("emission_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in enumerate(data):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
pyro.sample("y_{}".format(i), dist.Categorical(emission_probs[x]), obs=y)
@config_enumerate(default=enumerate1)
def guide(data):
transition_probs = pyro.param("transition_probs",
torch.tensor([[0.75, 0.25], [0.25, 0.75]]),
constraint=constraints.simplex)
x = None
for i, y in enumerate(data):
probs = init_probs if x is None else transition_probs[x]
x = pyro.sample("x_{}".format(i), dist.Categorical(probs))
elbo = TraceEnum_ELBO(max_iarange_nesting=0)
elbo.loss_and_grads(model, guide, data)
# These golden values simply test agreement between parallel and sequential.
expected_grads = {
2: {
"transition_probs": [[0.1029949, -0.1029949], [0.1029949, -0.1029949]],
"emission_probs": [[0.75, -0.75], [0.25, -0.25]],
},
3: {
"transition_probs": [[0.25748726, -0.25748726], [0.25748726, -0.25748726]],
"emission_probs": [[1.125, -1.125], [0.375, -0.375]],
},
10: {
"transition_probs": [[1.64832076, -1.64832076], [1.64832076, -1.64832076]],
"emission_probs": [[3.75, -3.75], [1.25, -1.25]],
},
20: {
"transition_probs": [[3.70781687, -3.70781687], [3.70781687, -3.70781687]],
"emission_probs": [[7.5, -7.5], [2.5, -2.5]],
},
}
for name, value in pyro.get_param_store().named_parameters():
actual = value.grad
expected = torch.tensor(expected_grads[num_steps][name])
assert_equal(actual, expected, msg=''.join([
'\nexpected {}.grad = {}'.format(name, expected.cpu().numpy()),
'\n actual {}.grad = {}'.format(name, actual.detach().cpu().numpy()),
]))
开发者ID:lewisKit,项目名称:pyro,代码行数:59,代码来源:test_enum.py
示例15: assert_error
def assert_error(model, guide, elbo):
"""
Assert that inference fails with an error.
"""
pyro.clear_param_store()
inference = SVI(model, guide, Adam({"lr": 1e-6}), elbo)
with pytest.raises((NotImplementedError, UserWarning, KeyError, ValueError, RuntimeError)):
inference.step()
开发者ID:lewisKit,项目名称:pyro,代码行数:8,代码来源:test_valid_models.py
示例16: test_duplicate_obs_name
def test_duplicate_obs_name(self):
pyro.clear_param_store()
adam = optim.Adam({"lr": .001})
svi = SVI(self.duplicate_obs, self.guide, adam, loss="ELBO", trace_graph=False)
with pytest.raises(RuntimeError):
svi.step()
开发者ID:Magica-Chen,项目名称:pyro,代码行数:8,代码来源:test_inference.py
示例17: test_extra_samples
def test_extra_samples(self):
pyro.clear_param_store()
adam = optim.Adam({"lr": .001})
svi = SVI(self.model, self.guide, adam, loss="ELBO", trace_graph=False)
with pytest.warns(Warning):
svi.step()
开发者ID:Magica-Chen,项目名称:pyro,代码行数:8,代码来源:test_inference.py
示例18: test_svi_step_smoke
def test_svi_step_smoke(model, guide, enum_discrete, trace_graph):
pyro.clear_param_store()
data = Variable(torch.Tensor([0, 1, 9]))
optimizer = pyro.optim.Adam({"lr": .001})
inference = SVI(model, guide, optimizer, loss="ELBO",
trace_graph=trace_graph, enum_discrete=enum_discrete)
with xfail_if_not_implemented():
inference.step(data)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:9,代码来源:test_enum.py
示例19: test_random_module
def test_random_module(nn_module):
pyro.clear_param_store()
nn_module = nn_module()
p = torch.ones(2, 2)
prior = dist.Bernoulli(p)
lifted_mod = pyro.random_module("module", nn_module, prior)
nn_module = lifted_mod()
for name, parameter in nn_module.named_parameters():
assert torch.equal(torch.ones(2, 2), parameter.data)
开发者ID:lewisKit,项目名称:pyro,代码行数:9,代码来源:test_module.py
示例20: do_elbo_test
def do_elbo_test(self, reparameterized, n_steps, lr, prec, beta1,
difficulty=1.0, model_permutation=False):
n_repa_nodes = torch.sum(self.which_nodes_reparam) if not reparameterized \
else len(self.q_topo_sort)
logger.info((" - - - DO GAUSSIAN %d-LAYERED PYRAMID ELBO TEST " +
"(with a total of %d RVs) [reparameterized=%s; %d/%d; perm=%s] - - -") %
(self.N, (2 ** self.N) - 1, reparameterized, n_repa_nodes,
len(self.q_topo_sort), model_permutation))
pyro.clear_param_store()
# check graph structure is as expected but only for N=2
if self.N == 2:
guide_trace = pyro.poutine.trace(self.guide,
graph_type="dense").get_trace(reparameterized=reparameterized,
model_permutation=model_permutation,
difficulty=difficulty)
expected_nodes = set(['log_sig_1R', 'kappa_1_1L', '_INPUT', 'constant_term_loc_latent_1R', '_RETURN',
'loc_latent_1R', 'loc_latent_1', 'constant_term_loc_latent_1', 'loc_latent_1L',
'constant_term_loc_latent_1L', 'log_sig_1L', 'kappa_1_1R', 'kappa_1R_1L',
'log_sig_1'])
expected_edges = set([('loc_latent_1R', 'loc_latent_1'), ('loc_latent_1L', 'loc_latent_1R'),
('loc_latent_1L', 'loc_latent_1')])
assert expected_nodes == set(guide_trace.nodes)
assert expected_edges == set(guide_trace.edges)
adam = optim.Adam({"lr": lr, "betas": (beta1, 0.999)})
svi = SVI(self.model, self.guide, adam, loss=TraceGraph_ELBO())
for step in range(n_steps):
t0 = time.time()
svi.step(reparameterized=reparameterized, model_permutation=model_permutation, difficulty=difficulty)
if step % 5000 == 0 or step == n_steps - 1:
log_sig_errors = []
for node in self.target_lambdas:
target_log_sig = -0.5 * torch.log(self.target_lambdas[node])
log_sig_error = param_mse('log_sig_' + node, target_log_sig)
log_sig_errors.append(log_sig_error)
max_log_sig_error = np.max(log_sig_errors)
min_log_sig_error = np.min(log_sig_errors)
mean_log_sig_error = np.mean(log_sig_errors)
leftmost_node = self.q_topo_sort[0]
leftmost_constant_error = param_mse('constant_term_' + leftmost_node,
self.target_leftmost_constant)
almost_leftmost_constant_error = param_mse('constant_term_' + leftmost_node[:-1] + 'R',
self.target_almost_leftmost_constant)
logger.debug("[mean function constant errors (partial)] %.4f %.4f" %
(leftmost_constant_error, almost_leftmost_constant_error))
logger.debug("[min/mean/max log(scale) errors] %.4f %.4f %.4f" %
(min_log_sig_error, mean_log_sig_error, max_log_sig_error))
logger.debug("[step time = %.3f; N = %d; step = %d]\n" % (time.time() - t0, self.N, step))
assert_equal(0.0, max_log_sig_error, prec=prec)
assert_equal(0.0, leftmost_constant_error, prec=prec)
assert_equal(0.0, almost_leftmost_constant_error, prec=prec)
开发者ID:lewisKit,项目名称:pyro,代码行数:56,代码来源:test_conjugate_gaussian_models.py
注:本文中的pyro.clear_param_store函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论