本文整理汇总了Python中sklearn.utils.extmath.cartesian函数的典型用法代码示例。如果您正苦于以下问题:Python cartesian函数的具体用法?Python cartesian怎么用?Python cartesian使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cartesian函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_cartesian
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array(
[
[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7],
]
)
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
开发者ID:arvindchari88,项目名称:newGitTest,代码行数:28,代码来源:test_extmath.py
示例2: combine_args
def combine_args(**argarrs):#argarrs are [arg name]=[list of values]
#Get all permutations of the arguments. Returns a pandas data frame with the argument names as the columns and the cartesian product of all their possible values.
#Note that this can't handle None values (at least not yet)
arg_keys = argarrs.keys()
if len(arg_keys) == 0:
raise ValueError("Must be at least one keyword argument (if you don't want to train multiple models just use lists with single entries")
arg_tup = ()
str_lens = []
type_list = []
M = 1
for key in arg_keys:
str_vals = [str(entry) for entry in argarrs[key]]
str_lens.extend([len(entry) for entry in str_vals])
type_list.append(argarrs[key].dtype)
#print key,str_vals,str_lens
M *= len(argarrs[key])
#print str_vals,str_lens
arg_tup += (str_vals,)
#print 'debug',type_list
max_str_lens = max(str_lens)
all_arg_combos = np.zeros((M,len(arg_keys)),dtype='S{0:d}'.format(max_str_lens))
all_arg_combos = pd.DataFrame(cartesian(arg_tup,all_arg_combos),columns=arg_keys)
for i,currtype in enumerate(type_list):
if currtype == np.bool:
all_arg_combos[arg_keys[i]] = (all_arg_combos[arg_keys[i]] == 'True')
else:
all_arg_combos[arg_keys[i]] = all_arg_combos[arg_keys[i]].astype(currtype)
return all_arg_combos
开发者ID:AkiraKane,项目名称:dataprojects,代码行数:28,代码来源:train_streaming_funcs.py
示例3: compute_reward
def compute_reward(grid_map, cell_list, passenger_list, rew):
"""
Compute the reward matrix.
Args:
grid_map (list): list containing the grid structure;
cell_list (list): list of non-wall cells;
passenger_list (list): list of passenger cells;
rew (tuple): rewards obtained in goal states.
Returns:
The reward matrix.
"""
g = np.array(grid_map)
c = np.array(cell_list)
n_states = len(cell_list) * 2**len(passenger_list)
r = np.zeros((n_states, 4, n_states))
directions = [[-1, 0], [1, 0], [0, -1], [0, 1]]
passenger_states = cartesian([[0, 1]] * len(passenger_list))
for goal in np.argwhere(g == 'G'):
for a in range(len(directions)):
prev_state = goal - directions[a]
if prev_state in c:
for i in range(len(passenger_states)):
i_idx = np.where((c == prev_state).all(axis=1))[0] + len(
cell_list) * i
j_idx = j = np.where((c == goal).all(axis=1))[0] + len(
cell_list) * i
r[i_idx, a, j_idx] = rew[np.sum(passenger_states[i])]
return r
开发者ID:ronald-xie,项目名称:mushroom,代码行数:34,代码来源:taxi.py
示例4: _initialize_event_orders
def _initialize_event_orders(self,timestamps,order_type):
symb_matrix = mth.cartesian([np.array(timestamps),self.event_matrix.columns.values])
symb_matrix = symb_matrix.reshape(len(timestamps),len(self.event_matrix.columns.values),2)
order_timestamps = symb_matrix[~np.isnan(self.event_matrix.values),0]
order_dataframe = pd.DataFrame(symb_matrix[~np.isnan(self.event_matrix.values),1], columns=['Symbol'] )
order_dataframe['Buy'] = order_type
return (order_dataframe,order_timestamps)
开发者ID:jasongforbes,项目名称:ComputationalInvesting,代码行数:7,代码来源:SimpleEventToMarketOrderConverter.py
示例5: calc_cartesian_group_assignment_p
def calc_cartesian_group_assignment_p(group_assignment_p_list):
cart = np.array(
[[
np.prod(values)
for values in cartesian(line)]
for line in zip(*group_assignment_p_list)])
return cart
开发者ID:hyptrails,项目名称:mixedtrails-notebook,代码行数:7,代码来源:common.py
示例6: _set_state_combinations_if_necessary
def _set_state_combinations_if_necessary(self):
"""Get centroids"""
# If we import sklearn at the top of the file then auto doc fails.
if (self.state_combinations is None or
self.state_combinations.shape[1] != len(self.model)):
from sklearn.utils.extmath import cartesian
centroids = [model['states'] for model in self.model]
self.state_combinations = cartesian(centroids)
开发者ID:nilmtk,项目名称:nilmtk,代码行数:8,代码来源:combinatorial_optimisation.py
示例7: setUp
def setUp(self):
self.useLocal = False
if self.useLocal:
self.tempdir = tempdir = '.'
else:
self.tempdir = tempdir = mkdtemp(prefix='patty-analytics')
self.drivemapLas = os.path.join(tempdir, 'testDriveMap.las')
self.sourcelas = os.path.join(tempdir, 'testSource.las')
self.footprint_csv = os.path.join(tempdir, 'testFootprint.csv')
self.foutlas = os.path.join(tempdir, 'testOutput.las')
self.min = -10
self.max = 10
self.num_rows = 1000
# Create plane with a pyramid
dm_pct = 0.5
dm_rows = np.round(self.num_rows * dm_pct)
dm_min = self.min * dm_pct
dm_max = self.max * dm_pct
delta = dm_max / dm_rows
shape_side = dm_max - dm_min
dm_offset = [0, 0, 0]
self.dense_obj_offset = [3, 2, -(1 + shape_side / 2)]
# make drivemap
plane_row = np.linspace(
start=self.min, stop=self.max, num=self.num_rows)
plane_points = cartesian((plane_row, plane_row, [0]))
shape_points, footprint = make_tri_pyramid_with_base(
shape_side, delta, dm_offset)
np.savetxt(self.footprint_csv, footprint, fmt='%.3f', delimiter=',')
dm_points = np.vstack([plane_points, shape_points])
plane_grid = np.zeros((dm_points.shape[0], 6), dtype=np.float32)
plane_grid[:, 0:3] = dm_points
self.drivemap_pc = pcl.PointCloudXYZRGB(plane_grid)
self.drivemap_pc = downsample_voxel(self.drivemap_pc,
voxel_size=delta * 20)
# utils.set_registration(self.drivemap_pc)
utils.save(self.drivemap_pc, self.drivemapLas)
# Create a simple pyramid
dense_grid = np.zeros((shape_points.shape[0], 6), dtype=np.float32)
dense_grid[:, 0:3] = shape_points + self.dense_obj_offset
self.source_pc = pcl.PointCloudXYZRGB(dense_grid)
self.source_pc = downsample_voxel(self.source_pc, voxel_size=delta * 5)
utils.save(self.source_pc, self.sourcelas)
开发者ID:NLeSC,项目名称:PattyAnalytics,代码行数:55,代码来源:test_registration.py
示例8: interpolate_image
def interpolate_image(image_data, zoom_factor):
X = np.arange(image_data.shape[0])
Y = np.arange(image_data.shape[1])
rgi = RegularGridInterpolator((X, Y), image_data)
grid_x, grid_y = (np.linspace(0, len(X)-1, zoom_factor*len(X)),
np.linspace(0, len(Y)-1, zoom_factor*len(Y)))
return rgi(cartesian([grid_x, grid_y])).reshape(grid_x.shape[0], grid_y.shape[0])
开发者ID:hookerlab,项目名称:huntington-with-pbr28,代码行数:11,代码来源:hd_classifier.py
示例9: voxel2voxels_in_volume
def voxel2voxels_in_volume(x, y, z, stepX, stepY, stepZ):
"""
Returns a numpy array with all the voxels in the volume corresponding to representative (x, y, z).
Here we assume that the representative is the upper, left, front pixel of a (stepX, stepY, stepZ) sized volume.
"""
# This is what Andrew originally used. Probably not fully correct, but practical.
# We could also just return slices and let numpy do its tiling magic...
# This should be hidden in an up/down sampler object
return cartesian((np.arange(x, x + stepX),
np.arange(y, y + stepY),
np.arange(z, z + stepZ)))
开发者ID:strawlab,项目名称:braincode,代码行数:12,代码来源:hierarchical.py
示例10: test_cgauss_likelihood
def test_cgauss_likelihood():
mu = np.array([0], dtype='float')
sigma = np.array([2], dtype='float')
x = np.linspace(-1, 2, 2)
lapse = np.array([0], dtype='float')
parameters = cartesian((mu, sigma, lapse, x))
proportionMethod = PsiMarginal.pf(parameters, psyfun='cGauss')
samples = np.random.normal(mu, sigma, (200000, 1))
proportionSamples = np.empty([2, ])
proportionSamples[0] = np.mean(samples <= x[0]) # cdf is p(X<=x), compute this through sampling to check likelihood
proportionSamples[1] = np.mean(samples <= x[1])
np.testing.assert_almost_equal(proportionSamples, proportionMethod, decimal=2) == 1
开发者ID:NNiehof,项目名称:Psi-staircase,代码行数:12,代码来源:test_psi.py
示例11: stateact_to_feature
def stateact_to_feature(self, state, act, onlyindex=True):
zedaind = []
for nm, xs in sorted(self.feature_tiles.items()):
val = None
if nm == 'speedx':
val = state.getSpeedX()
elif nm == 'trackpos':
val = state.getTrackPos()
elif nm == 'angle':
val = state.getAngle()
#print val, nm
inds = []
if not val == None:
# on of the above
for i in range(len(xs) - 1):
if xs[i][0] <= val < xs[i + 1][1]:
inds.append(i)
zedaind.append(inds)
elif nm == 'track':
# remaning are trackpositions, lets get them
tracks = np.array(state.getTrack()) / 200.
sensors = []
sensors.append(tracks[3]) # -40
sensors.append((tracks[4] + tracks[5] + tracks[6])/3.)
sensors.append((tracks[9] + tracks[8] + tracks[10]) / 3.) # 0
sensors.append((tracks[12] + tracks[13] + tracks[14])/3.)
sensors.append(tracks[15])
if self.arguments.show_sensors:
print sensors
for val in sensors:
for i in range(len(xs) - 1):
if xs[i] <= val <= xs[i + 1]:
ind.append(i)
break
else:
assert False
zedaind.append([act])
#print 'feature shape-', self.w.shape,'index length-', len(ind)
#print ind
assert len(zedaind) == len(self.w.shape), 'ind %s, w %s' %(str(ind), str(self.w.shape))
if onlyindex:
return tuple(ind)
else:
ft = np.zeros_like(self.w)
for tot in cartesian(zedaind):
ft[tuple(tot)] = 1
return ft
开发者ID:scientist1642,项目名称:bot-ironman,代码行数:52,代码来源:ironmandriver.py
示例12: optimize
def optimize(self):
best_sharpe_ratio = 0
best_allocation = []
num_symbols = len(self.portfolio.get_symbols())
steps = numpy.linspace(0, 1, 1/self.stepsize + 1)
allocations = cartesian([steps]*num_symbols)
legal_allocations = allocations[numpy.where(allocations.sum(1)==1)]
for allocation in legal_allocations:
sharpe = self.portfolio.simulate(allocation)[2]
if sharpe > best_sharpe_ratio:
best_sharpe_ratio = sharpe
best_allocation = allocation
return (best_allocation, best_sharpe_ratio)
开发者ID:jasongforbes,项目名称:ComputationalInvesting,代码行数:13,代码来源:BruteOptimizer.py
示例13: train
def train(self, metergroup, num_states_dict={}, **load_kwargs):
"""Train using 1D CO. Places the learnt model in the `model` attribute.
Parameters
----------
metergroup : a nilmtk.MeterGroup object
Notes
-----
* only uses first chunk for each meter (TODO: handle all chunks).
"""
if self.model:
raise RuntimeError(
"This implementation of Combinatorial Optimisation"
" does not support multiple calls to `train`.")
num_meters = len(metergroup.meters)
if num_meters > 12:
max_num_clusters = 2
else:
max_num_clusters = 3
for i, meter in enumerate(metergroup.submeters().meters):
print("Training model for submeter '{}'".format(meter))
for chunk in meter.power_series(**load_kwargs):
num_total_states = num_states_dict.get(meter)
if num_total_states is not None:
num_on_states = num_total_states - 1
else:
num_on_states = None
states = cluster(chunk, max_num_clusters, num_on_states)
self.model.append({
'states': states,
'training_metadata': meter})
break # TODO handle multiple chunks per appliance
# Get centroids
# If we import sklearn at the top of the file then auto doc fails.
from sklearn.utils.extmath import cartesian
centroids = [model['states'] for model in self.model]
self.state_combinations = cartesian(centroids)
# self.state_combinations is a 2D array
# each column is a chan
# each row is a possible combination of power demand values e.g.
# [[0, 0, 0, 0], [0, 0, 0, 100], [0, 0, 50, 0],
# [0, 0, 50, 100], ...]
print("Done training!")
开发者ID:Kriechi,项目名称:nilmtk,代码行数:49,代码来源:combinatorial_optimisation.py
示例14: spread_points_in_hypercube
def spread_points_in_hypercube(point_count, dimension_count): # TODO rename points_spread_in_hypercube
"""
Place points in a unit hypercube such that the minimum distance between
points is approximately maximal.
Euclidean distance is used.
.. note:: Current implementation simply puts the points in a hypergrid
Parameters
----------
point_count : int
Number of points to pick
dimension_count : int
Number of dimensions of the hypercube
Returns
-------
np.array(shape=(point_count, dimension_count))
Points spread approximately optimally across the hypercube.
Raises
------
ValueError
When ``point_count < 0 or dimension_count < 1``
Notes
-----
The exact solution to this problem is known for only a few `n`.
References
----------
.. [1] http://stackoverflow.com/a/2723764/1031434
"""
# Current implementation simply puts points in a grid
if point_count < 0:
raise ValueError("point_count must be at least 0")
if dimension_count < 1:
raise ValueError("dimension_count must be at least 1")
if point_count == 0:
return np.empty(shape=(0, dimension_count))
side_count = np.ceil(point_count ** (1 / dimension_count)) # number of points per side
points = np.linspace(0, 1, side_count)
points = cartesian([points] * dimension_count)
return np.random.permutation(points)[:point_count] # XXX permutation is unnecessary
开发者ID:timdiels,项目名称:chicken_turtle_util,代码行数:45,代码来源:algorithms.py
示例15: cartesian_prod_dicts_lists
def cartesian_prod_dicts_lists(the_dict):
#takes a dictionary and produces a dictionary of the cartesian product of the input
if not type(the_dict) is type(ordDict()):
warnings.warn('An ordered dict was not used. Thus if this function is called again with the same dict it might not produce the same results.')
from sklearn.utils.extmath import cartesian
stim_list = []
stim_list = tuple([ list(the_dict[ key_name ]) for key_name in the_dict ])
#cartesian has the last column change the fastest, thus is like c-indexing
stim_cart_array = cartesian(stim_list)
cart_dict = ordDict()
#load up the vectors assosciated with keys to cart_dict
for key_name, key_num in zip(the_dict, range(len(the_dict))):
cart_dict[key_name] = stim_cart_array[:, key_num]
return cart_dict
开发者ID:deanpospisil,项目名称:v4cnn,代码行数:19,代码来源:d_misc.py
示例16: generate_predictor_data
def generate_predictor_data(self):
from sklearn.utils.extmath import cartesian
ps = np.linspace(*self.train_p_range)
Ts = np.linspace(*self.train_T_range)
rhs = atanspace(*self.train_rh_range, scaling=2.5)
data = cartesian([ps, Ts, rhs])
# Remove some (for Innsbruck) unrealistic data
remove = (
# Lower atmosphere is rather warm
((data[:,0] > 700) & (data[:,1] < 230))
# Middle atmosphere
| ((data[:,0] < 700) & (data[:,0] > 400)
& (data[:,1] > 300) | (data[:,1] < 200))
# Upper atmosphere is rather cold
| ((data[:,0] < 400) & (data[:,1] > 270))
)
data = data[~remove]
# Calculate q
data[:,2] = data[:,2] * qsat(p=data[:,0], T=data[:,1])
return data
开发者ID:chpolste,项目名称:MScAtmosphericSciences,代码行数:20,代码来源:fapgen.py
示例17: __init__
def __init__(self, T, N, eta, tau0, kappa, lambda_init=np.asarray([])):
"""
Arguments:
T: Length of SNP sequence
N: Total number of people in the population.
eta: Hyperparameter for prior on haplotype weights pi
tau0: A (positive) learning parameter that downweights early iterations
kappa: Learning rate: exponential decay rate---should be between
(0.5, 1.0] to guarantee asymptotic convergence.
Note that if you pass the same data in every time and
set kappa=0 this class can also be used to do batch VB.
"""
self._K = pow(2,T)
self._T = T
self._N = N
# pi dist hyperparams
self._eta = eta
self._tau0 = tau0 + 1
self._kappa = kappa
# iteration counter, used for updating rho
self._updatect = 0
# Initialize the variational distribution q(pi|lambda)
if (lambda_init.shape==(self._K,)):
self._lambda = lambda_init
else:
# todo: not totally sure this is a sensible initialization
self._lambda = np.random.gamma(10, 1. / 10, self._K)
self._E_log_pi = dirichlet_expectation(self._lambda)
self._exp_E_log_pi = np.exp(self._E_log_pi)
#all theta values
theta = cartesian(np.repeat(np.array([[0.01,0.99]]),T,0))
self.logs_theta = np.zeros([self._K, self._T, 2])
self.logs_theta[:,:,0] = np.log(theta)
self.logs_theta[:,:,1] = np.log(1-theta)
开发者ID:vveitch,项目名称:genetics_code,代码行数:41,代码来源:single_level_fixed_haplotypes.py
示例18: generate_state_combinations_all
def generate_state_combinations_all(self):
mains = self.loc.elec.mains()
from sklearn.utils.extmath import cartesian
centroids = [model['states'] for model in self.co.model]
state_combinations = cartesian(centroids)
baseline = self.vampire_power
if baseline is None:
vampire_power = mains.vampire_power()
else:
vampire_power = self.vampire_power
n_rows = state_combinations.shape[0]
vampire_power_array = np.zeros((n_rows, 1)) + vampire_power
state_combinations = np.hstack((state_combinations, vampire_power_array))
summed_power_of_each_combination = np.sum(state_combinations, axis=1)
self.vampire_power = vampire_power
self.state_combinations = state_combinations
self.summed_power_of_each_combination = summed_power_of_each_combination
return vampire_power, state_combinations, summed_power_of_each_combination
开发者ID:t7reyeslua,项目名称:NILM-Loc,代码行数:21,代码来源:ground_truth.py
示例19: constructTensor
def constructTensor(med_file, diag_file):
diag_med_comb = diag_cross_med(med_file, diag_file)
## create index map for subject_id, icdcode, and med_name
patDict = createIndexMap(diag_med_comb.subject_id)
medDict = createIndexMap(np.hstack(diag_med_comb.med_name))
diagDict = createIndexMap(np.hstack(diag_med_comb.code))
tensorIdx = np.array([[0,0,0]])
tensorVal = np.array([[0]])
for i in xrange(diag_med_comb.shape[0]):
curDiag = [diagDict[x] for x in diag_med_comb.iloc[i,0]]
curMed = [medDict[x] for x in diag_med_comb.iloc[i,1]]
curPatId = patDict[diag_med_comb.iloc[i,2]]
dmCombo = extmath.cartesian((curDiag, curMed))
tensorIdx = np.append(tensorIdx,np.column_stack((np.repeat(curPatId, dmCombo.shape[0]), dmCombo)),axis=0)
tensorVal = np.append(tensorVal, np.ones((dmCombo.shape[0],1), dtype=np.int), axis=0)
tensorIdx = np.delete(tensorIdx, (0), axis=0)
tensorVal = np.delete(tensorVal, (0), axis=0)
tenX = sptensor.sptensor(tensorIdx, tensorVal, np.array([len(patDict), len(diagDict), len(medDict)]))
axisDict = {0: patDict, 1: diagDict, 2: medDict}
return tenX, axisDict
开发者ID:mrthat,项目名称:npb_phenotyping,代码行数:23,代码来源:createMimicTensor.py
示例20: symmetry_score
def symmetry_score(transformation, left, right, stepz=100, ignore_value=0):
"""Counts how many elements in reflected img2 are equal in img1."""
sizex, sizey, sizez = left.shape
score = 0
for zstart in range(0, sizez, stepz):
# Generate original coordinates
coords = cartesian((np.arange(sizex),
np.arange(sizey),
np.arange(zstart, min(sizez, zstart + stepz))))
# Reflect coordinates
reflected_coords = transform_coords(transformation, coords)
# Find valid transformations
valid_coords = ((reflected_coords >= 0) &
(reflected_coords < (sizex, sizey, sizez))).all(axis=1)
coords = coords[valid_coords]
reflected_coords = reflected_coords[valid_coords]
# print('There were %d of %d reflected points out of boundaries' %
# ((~valid_coords).sum(), len(valid_coords)))
# Compute score
equal = left[tuple(coords.T)] == right[tuple(reflected_coords.T)]
valid = (left[tuple(coords.T)] != ignore_value) & (right[tuple(reflected_coords.T)] != ignore_value)
score += np.sum(equal & valid)
return score
开发者ID:strawlab,项目名称:braincode,代码行数:24,代码来源:symmetry.py
注:本文中的sklearn.utils.extmath.cartesian函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论