本文整理汇总了Python中utils.cdf_to_pdf函数的典型用法代码示例。如果您正苦于以下问题:Python cdf_to_pdf函数的具体用法?Python cdf_to_pdf怎么用?Python cdf_to_pdf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cdf_to_pdf函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: generate_information_weight_matrix
def generate_information_weight_matrix(expert_predictions, average_distribution, eps=1e-14, KL_weight = 1.0, cross_entropy_weight=1.0, expert_weights=None):
pdf = utils.cdf_to_pdf(expert_predictions)
average_pdf = utils.cdf_to_pdf(average_distribution)
average_pdf[average_pdf<=0] = np.min(average_pdf[average_pdf>0])/2 # KL is not defined when Q=0 and P is not
inside = pdf * (np.log(pdf) - np.log(average_pdf[None,None,:]))
inside[pdf<=0] = 0 # (xlog(x) of zero is zero)
KL_distance_from_average = np.sum(inside, axis=2) # (NUM_EXPERTS, NUM_VALIDATIONS)
assert np.isfinite(KL_distance_from_average).all()
clipped_predictions = np.clip(expert_predictions, 0.0, 1.0)
cross_entropy_per_sample = - ( average_distribution[None,None,:] * np.log( clipped_predictions+eps) +\
(1.-average_distribution[None,None,:]) * np.log(1.-clipped_predictions+eps) )
cross_entropy_per_sample[cross_entropy_per_sample<0] = 0 # (NUM_EXPERTS, NUM_VALIDATIONS, 600)
assert np.isfinite(cross_entropy_per_sample).all()
if expert_weights is None:
weights = cross_entropy_weight*cross_entropy_per_sample + KL_weight*KL_distance_from_average[:,:,None] #+ # <- is too big?
else:
weights = (cross_entropy_weight*cross_entropy_per_sample + KL_weight*KL_distance_from_average[:,:,None]) * expert_weights[:,None,None] #+ # <- is too big?
#make sure the ones without predictions don't get weight, unless absolutely necessary
weights[np.where((expert_predictions == average_distribution[None,None,:]).all(axis=2))] = 1e-14
return weights
开发者ID:317070,项目名称:kaggle-heart,代码行数:23,代码来源:merge_predictions_jeroen.py
示例2: weighted_geom_method
def weighted_geom_method(prediction_matrix, average, eps=1e-14, expert_weights=None, *args, **kwargs):
if len(prediction_matrix.flatten()) == 0:
return np.zeros(600)
weights = generate_information_weight_matrix(prediction_matrix, average, expert_weights=expert_weights, *args, **kwargs)
assert np.isfinite(weights).all()
pdf = utils.cdf_to_pdf(prediction_matrix)
x_log = np.log(pdf)
x_log[pdf<=0] = np.log(eps)
# Compute the mean
geom_av_log = np.sum(x_log * weights, axis=(0,1)) / (np.sum(weights, axis=(0,1)) + eps)
geom_av_log = geom_av_log - np.max(geom_av_log) # stabilizes rounding errors?
geom_av = np.exp(geom_av_log)
res = np.cumsum(geom_av/np.sum(geom_av))
return res
开发者ID:317070,项目名称:kaggle-heart,代码行数:15,代码来源:merge_predictions.py
示例3: prodav
def prodav(x, **kwargs):
if len(x) == 0:
return np.zeros(600)
return np.cumsum(utils.norm_prod(utils.cdf_to_pdf(x)))
开发者ID:317070,项目名称:kaggle-heart,代码行数:4,代码来源:merge_predictions_jeroen.py
示例4: geomav
def geomav(x, **kwargs):
if len(x) == 0:
return np.zeros(600)
res = np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
return res
开发者ID:317070,项目名称:kaggle-heart,代码行数:5,代码来源:merge_predictions_jeroen.py
示例5: build_objective
}
# Objective
l2_weight = 0.000
l2_weight_out = 0.000
def build_objective(interface_layers):
# l2 regu on certain layers
l2_penalty = nn.regularization.regularize_layer_params_weighted(
interface_layers["regularizable"], nn.regularization.l2)
# build objective
return objectives.KaggleObjective(interface_layers["outputs"], penalty=l2_penalty)
# Testing
postprocess = postprocess.postprocess
test_time_augmentations = 100 # More augmentations since a we only use single slices
tta_average_method = lambda x: np.cumsum(utils.norm_geometric_average(utils.cdf_to_pdf(x)))
# nonlinearity putting a lower bound on it's output
def lb_softplus(lb):
return lambda x: nn.nonlinearities.softplus(x) + lb
init = nn.init.Orthogonal()
rnn_layer = functools.partial(nn.layers.RecurrentLayer,
W_in_to_hid=init,
W_hid_to_hid=init,
b=nn.init.Constant(0.1),
nonlinearity=nn.nonlinearities.rectify,
hid_init=nn.init.Constant(0.),
开发者ID:317070,项目名称:kaggle-heart,代码行数:31,代码来源:je_os_segmentandintegrate_noreg_bn.py
示例6: make_monotone_distribution_fast
def make_monotone_distribution_fast(distributions):
return utils.pdf_to_cdf(np.clip(utils.cdf_to_pdf(distributions), 0.0, 1.0))
开发者ID:317070,项目名称:kaggle-heart,代码行数:2,代码来源:postprocess.py
示例7: optimize_expert_weights
def optimize_expert_weights(expert_predictions,
average_distribution,
mask_matrix=None,
targets=None,
num_cross_validation_masks=2,
num_folds=1,
eps=1e-14,
cutoff=0.01,
do_optimization=True,
expert_weights=None,
optimal_params=None,
special_average=False,
*args, **kwargs):
"""
:param expert_predictions: experts x validation_samples x 600 x
:param mask_matrix: experts x validation_samples x
:param targets: validation_samples x 600 x
:param average_distribution: 600 x
:param eps:
:return:
"""
if expert_weights is not None:
mask_matrix = mask_matrix[expert_weights>cutoff,:] # remove
expert_predictions = expert_predictions[expert_weights>cutoff,:,:] # remove
NUM_EXPERTS = expert_predictions.shape[0]
NUM_FILTER_PARAMETERS = 2
WINDOW_SIZE = 599
# optimizing weights
X = theano.shared(expert_predictions.astype('float32')) # source predictions = (NUM_EXPERTS, NUM_VALIDATIONS, 600)
x_coor = theano.shared(np.linspace(-(WINDOW_SIZE-1)/2, (WINDOW_SIZE-1)/2, num=WINDOW_SIZE, dtype='float32')) # targets = (NUM_VALIDATIONS, 600)
NUM_VALIDATIONS = expert_predictions.shape[1]
ind = theano.shared(np.zeros((NUM_VALIDATIONS,), dtype='int32')) # targets = (NUM_VALIDATIONS, 600)
if optimal_params is None:
params_init = np.concatenate([ np.ones((NUM_EXPERTS,), dtype='float32'),
np.ones((NUM_FILTER_PARAMETERS,), dtype='float32') ])
else:
params_init = optimal_params.astype('float32')
params = theano.shared(params_init.astype('float32'))
#params = T.vector('params', dtype='float32') # expert weights = (NUM_EXPERTS,)
C = 0.0001
if not special_average:
# Create theano expression
# inputs:
W = params[:NUM_EXPERTS]
weights = T.nnet.softmax(W.dimshuffle('x',0)).dimshuffle(1, 0)
preds = X.take(ind, axis=1)
mask = theano.shared(mask_matrix.astype('float32')).take(ind, axis=1)
# expression
masked_weights = mask * weights
tot_masked_weights = T.clip(masked_weights.sum(axis=0), 1e-7, utils.maxfloat)
preds_weighted_masked = preds * masked_weights.dimshuffle(0, 1, 'x')
cumulative_distribution = preds_weighted_masked.sum(axis=0) / tot_masked_weights.dimshuffle(0, 'x')
# loss
l1_loss = weights.sum()
else:
# calculate the weighted average for each of these experts
weights = generate_information_weight_matrix(expert_predictions, average_distribution) # = (NUM_EXPERTS, NUM_VALIDATIONS, 600)
weight_matrix = theano.shared((mask_matrix[:,:,None]*weights).astype('float32'))
pdf = utils.cdf_to_pdf(expert_predictions)
x_log = np.log(pdf)
x_log[pdf<=0] = np.log(eps)
# Compute the mean
X_log = theano.shared(x_log.astype('float32')) # source predictions = (NUM_EXPERTS, NUM_VALIDATIONS, 600)
X_log_i = X_log.take(ind, axis=1)
w_i = weight_matrix.take(ind, axis=1)
W = params[:NUM_EXPERTS]
w_i = w_i * T.nnet.softmax(W.dimshuffle('x',0)).dimshuffle(1, 0, 'x')
#the different predictions, are the experts
geom_av_log = T.sum(X_log_i * w_i, axis=0) / (T.sum(w_i, axis=0) + eps)
geom_av_log = geom_av_log - T.max(geom_av_log,axis=-1).dimshuffle(0,'x') # stabilizes rounding errors?
geom_av = T.exp(geom_av_log)
geom_pdf = geom_av/T.sum(geom_av,axis=-1).dimshuffle(0,'x')
l1_loss = 0
cumulative_distribution = T.cumsum(geom_pdf, axis=-1)
if not do_optimization:
ind.set_value(range(NUM_VALIDATIONS))
f_eval = theano.function([], cumulative_distribution)
cumulative_distribution = f_eval()
return cumulative_distribution[0]
else:
# convert to theano_values (for regularization)
t_valid = theano.shared(targets.astype('float32')) # targets = (NUM_VALIDATIONS, 600)
t_train = theano.shared(targets.astype('float32')) # targets = (NUM_VALIDATIONS, 600)
CRPS_train = T.mean((cumulative_distribution - t_train.take(ind, axis=0))**2) + C * l1_loss
CRPS_valid = T.mean((cumulative_distribution - t_valid.take(ind, axis=0))**2)
iter_optimize = theano.function([], CRPS_train, on_unused_input="ignore", updates=lasagne.updates.adam(CRPS_train, [params], 1.0))
f_val = theano.function([], CRPS_valid)
#.........这里部分代码省略.........
开发者ID:317070,项目名称:kaggle-heart,代码行数:101,代码来源:merge_predictions.py
注:本文中的utils.cdf_to_pdf函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论