本文整理汇总了Python中numpy.moveaxis函数的典型用法代码示例。如果您正苦于以下问题:Python moveaxis函数的具体用法?Python moveaxis怎么用?Python moveaxis使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了moveaxis函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: load_and_subsample
def load_and_subsample(raw_img_path, substep, low_freq_percent):
"""
Loads and subsamples an MR image in Analyze format
Parameters
------------
raw_img_path : str
The path to the MR image
substep : int
The substep to use when subsampling image slices
low_freq_percent : float
The percentage of low frequency data to retain when subsampling slices
Returns
------------
tuple
A triple containing the following ordered numpy arrays:
1. The subsampled MR image (datatype `np.float32`)
2. The k-space representation of the subsampled MR image (datatype `np.complex128`)
3. The original MR image (datatype `np.float32`)
"""
original_img = load_image_data(analyze_img_path=raw_img_path)
subsampled_img, subsampled_k = subsample(
analyze_img_data=original_img,
substep=substep,
low_freq_percent=low_freq_percent)
original_img = np.moveaxis(original_img, -1, 0)
subsampled_img = np.moveaxis(subsampled_img, -1, 0)
subsampled_k = np.moveaxis(subsampled_k, -1, 0)
return subsampled_img, subsampled_k, original_img
开发者ID:snowbhr06,项目名称:MRI-Reconstruction,代码行数:33,代码来源:test_net.py
示例2: get_float_tensor_from_cntk_convolutional_weight_parameter
def get_float_tensor_from_cntk_convolutional_weight_parameter(tensorParameter):
"""Returns an ELL.FloatTensor from a trainable parameter
Note that ELL's ordering is row, column, channel.
4D parameters (e.g. those that represent convolutional weights) are stacked vertically in the row dimension.
CNTK has them in filter, channel, row, column order.
"""
tensorShape = tensorParameter.shape
tensorValue = tensorParameter.value
if (len(tensorShape) == 4):
orderedWeights = np.moveaxis(tensorValue, 1, -1)
orderedWeights = orderedWeights.ravel().astype(np.float).reshape(
tensorShape[0] * tensorShape[2], tensorShape[3], tensorShape[1])
elif (len(tensorShape) == 3):
orderedWeights = np.moveaxis(tensorValue, 0, -1)
orderedWeights = orderedWeights.ravel().astype(np.float).reshape(
tensorShape[1], tensorShape[2], tensorShape[0])
elif (len(tensorShape) == 2):
orderedWeights = np.moveaxis(tensorValue, 0, -1)
orderedWeights = orderedWeights.ravel().astype(
np.float).reshape(tensorShape[1], tensorShape[0], 1)
else:
orderedWeights = tensorValue.ravel().astype(
np.float).reshape(1, 1, tensorValue.size)
return ELL.FloatTensor(orderedWeights)
开发者ID:openube,项目名称:ELL,代码行数:25,代码来源:cntk_to_ell.py
示例3: apply_transform
def apply_transform(matrix, image, params):
"""
Apply a transformation to an image.
The origin of transformation is at the top left corner of the image.
The matrix is interpreted such that a point (x, y) on the original image is moved to transform * (x, y) in the generated image.
Mathematically speaking, that means that the matrix is a transformation from the transformed image space to the original image space.
Parameters:
matrix: A homogenous 3 by 3 matrix holding representing the transformation to apply.
image: The image to transform.
params: The transform parameters (see TransformParameters)
"""
if params.channel_axis != 2:
image = np.moveaxis(image, params.channel_axis, 2)
output = cv2.warpAffine(
image,
matrix[:2, :],
dsize = (image.shape[1], image.shape[0]),
flags = params.cvInterpolation(),
borderMode = params.cvBorderMode(),
borderValue = params.cval,
)
if params.channel_axis != 2:
output = np.moveaxis(output, 2, params.channel_axis)
return output
开发者ID:saanasum,项目名称:keras-retinanet,代码行数:29,代码来源:image.py
示例4: calc
def calc(self, pars, elo, xlo, ylo, ehi, xhi, yhi):
etrue_centers = self.true_energy.log_centers
if self.use_psf:
# Convolve the spatial model * exposure by the psf in etrue
spatial = np.zeros((self.dim_Etrue, self.dim_x, self.dim_y))
a = self.spatial_model.calc(pars[self._spatial_pars], self.xx_lo.ravel(), self.xx_hi.ravel(),
self.yy_lo.ravel(), self.yy_hi.ravel()).reshape(self.xx_lo.shape)
for ind_E in range(self.dim_Etrue):
spatial[ind_E, :, :] = self._fftconvolve(a * self.exposure.data[ind_E, :, :],
self.psf.data[ind_E, :, :] /
(self.psf.data[ind_E, :, :].sum()), mode='same')
# To avoid nan value for the true energy values asked by the user for which the PSF is not defined.
# The interpolation gives nan when you are outside the range and when you sum over all the true energy bin to calculate the expected
# number of counts in the reconstucted energy bin, you get nan whereas you just want the bin in true energy
# for which the PSF is not defined to not count in the sum.
spatial[np.isnan(spatial)] = 0
else:
spatial_2d = self.spatial_model.calc(pars[self._spatial_pars], self.xx_lo.ravel(), self.xx_hi.ravel(),
self.yy_lo.ravel(), self.yy_hi.ravel()).reshape(self.xx_lo.shape)
spatial = np.tile(spatial_2d, (len(etrue_centers), 1, 1))
# Calculate the spectral model in etrue
spectral_1d = self.spectral_model.calc(pars[self._spectral_pars], etrue_centers)
spectral = spectral_1d.reshape(len(etrue_centers), 1, 1) * np.ones_like(self.xx_lo)
# Convolve by the energy resolution
etrue_band = self.true_energy.bands
for ireco in range(self.dim_Ereco):
self.convolve_edisp[:, :, :, ireco] = np.moveaxis(spatial, 0, -1) * np.moveaxis(spectral, 0, -1) * \
self.edisp[:, ireco] * etrue_band
# Integration in etrue
model = np.moveaxis(np.sum(self.convolve_edisp, axis=2), -1, 0)
if not self.select_region:
return model.ravel()
else:
return model[self.index_selected_region].ravel()
开发者ID:mirca,项目名称:gammapy,代码行数:35,代码来源:sherpa_.py
示例5: get_float_tensor_from_cntk_dense_weight_parameter
def get_float_tensor_from_cntk_dense_weight_parameter(tensorParameter):
"""Returns an ELL.FloatTensor from a trainable parameter
Note that ELL's ordering is row, column, channel.
CNTK has them in channel, row, column, filter order.
4D parameters are converted to ELL Tensor by stacking vertically in the row dimension.
"""
tensorShape = tensorParameter.shape
tensorValue = tensorParameter.value
#orderedWeights = tensorValue
if (len(tensorShape) == 4):
orderedWeights = tensorValue
orderedWeights = np.moveaxis(orderedWeights, 0, -1)
orderedWeights = np.moveaxis(orderedWeights, 2, 0)
orderedWeights = orderedWeights.ravel().astype(np.float).reshape(
tensorShape[3] * tensorShape[1], tensorShape[2], tensorShape[0])
elif (len(tensorShape) == 3):
orderedWeights = np.moveaxis(tensorValue, 0, -1)
orderedWeights = orderedWeights.ravel().astype(np.float).reshape(
tensorShape[1], tensorShape[2], tensorShape[0])
elif (len(tensorShape) == 2):
orderedWeights = np.moveaxis(tensorValue, 0, -1)
orderedWeights = orderedWeights.ravel().astype(
np.float).reshape(tensorShape[1], 1, tensorShape[0])
else:
orderedWeights = tensorValue.ravel().astype(
np.float).reshape(1, 1, tensorValue.size)
return ELL.FloatTensor(orderedWeights)
开发者ID:openube,项目名称:ELL,代码行数:29,代码来源:cntk_to_ell.py
示例6: test_exceptions
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
a = np.ones((1,)*ndim)
np.concatenate((a, a), axis=0) # OK
assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
# Scalars cannot be concatenated
assert_raises(ValueError, concatenate, (0,))
assert_raises(ValueError, concatenate, (np.array(0),))
# test shapes must match except for concatenation axis
a = np.ones((1, 2, 3))
b = np.ones((2, 2, 3))
axis = list(range(3))
for i in range(3):
np.concatenate((a, b), axis=axis[0]) # OK
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
a = np.moveaxis(a, -1, 0)
b = np.moveaxis(b, -1, 0)
axis.append(axis.pop(0))
# No arrays to concatenate raises ValueError
assert_raises(ValueError, concatenate, ())
开发者ID:ales-erjavec,项目名称:numpy,代码行数:26,代码来源:test_shape_base.py
示例7: coral_numpy
def coral_numpy(source, target):
n_channels = source.shape[-1]
source = np.moveaxis(source, -1, 0) # HxWxC -> CxHxW
target = np.moveaxis(target, -1, 0) # HxWxC -> CxHxW
source_flatten = source.reshape(n_channels, source.shape[1]*source.shape[2])
target_flatten = target.reshape(n_channels, target.shape[1]*target.shape[2])
source_flatten_mean = source_flatten.mean(axis=1, keepdims=True)
source_flatten_std = source_flatten.std(axis=1, keepdims=True)
source_flatten_norm = (source_flatten - source_flatten_mean) / source_flatten_std
target_flatten_mean = target_flatten.mean(axis=1, keepdims=True)
target_flatten_std = target_flatten.std(axis=1, keepdims=True)
target_flatten_norm = (target_flatten - target_flatten_mean) / target_flatten_std
source_flatten_cov_eye = source_flatten_norm.dot(source_flatten_norm.T) + np.eye(n_channels)
target_flatten_cov_eye = target_flatten_norm.dot(target_flatten_norm.T) + np.eye(n_channels)
source_flatten_norm_transfer = matSqrt_numpy(target_flatten_cov_eye).dot(np.linalg.inv(matSqrt_numpy(source_flatten_cov_eye))).dot(source_flatten_norm)
source_flatten_transfer = source_flatten_norm_transfer * target_flatten_std + target_flatten_mean
coraled = source_flatten_transfer.reshape(source.shape)
coraled = np.moveaxis(coraled, 0, -1) # CxHxW -> HxWxC
return coraled
开发者ID:calvinlcchen,项目名称:WCT-TF,代码行数:27,代码来源:coral.py
示例8: test_pick
def test_pick():
group = icosahedral.Pyritohedral()
full = group.group
from pycomplex.math import linalg
N = 128
points = np.moveaxis(np.indices((N, N)).astype(np.float), 0, -1) / (N - 1) * 2 - 1
z = np.sqrt(np.clip(1 - linalg.dot(points, points), 0, 1))
points = np.concatenate([points, z[..., None]], axis=-1)
element_idx, sub_idx, quotient_idx, bary = group.pick(points.reshape(-1, 3))
if False:
col = bary
else:
col = np.array([
sub_idx.astype(np.float) / sub_idx.max(),
sub_idx * 0,
quotient_idx.astype(np.float) / quotient_idx.max()
]).T
plt.figure()
img = np.flip(np.moveaxis(col.reshape(N, N, 3), 0, 1), axis=0)
# img = (img * 255).astype(np.uint8)
plt.imshow(img)
plt.show()
开发者ID:EelcoHoogendoorn,项目名称:Escheresque,代码行数:30,代码来源:test_icosahedral.py
示例9: make_step
def make_step(self, signals, dt, rng):
if self.conv.dimensions > 2:
# note: we raise the error here, rather than earlier, because
# other backends might support different convolutions
raise NotImplementedError("Convolution > 2D not supported")
W = signals[self.W]
X = signals[self.X]
Y = signals[self.Y]
pad = self.conv.padding.upper()
stride = self.conv.strides
X = X.reshape(self.conv.input_shape.shape)
Y = Y.reshape(self.conv.output_shape.shape)
if not self.conv.channels_last:
X = np.moveaxis(X, 0, -1)
Y = np.moveaxis(Y, 0, -1)
if self.conv.dimensions == 1:
# add extra dimension to make it a 2D convolution
X = X[None, :, :]
W = W[None, :, :, :]
Y = Y[None, :, :]
stride = (1,) + stride
# add empty batch dimension
X = X[None, ...]
def step_conv():
Y[...] += conv2d.conv2d(X, W, pad=pad, stride=stride)[0]
return step_conv
开发者ID:nengo,项目名称:nengo,代码行数:33,代码来源:transforms.py
示例10: test_pick
def test_pick():
group = Cyclic(2)
complex = MultiComplex.generate(group, 6)
from pycomplex.math import linalg
N = 1024
points = np.moveaxis(np.indices((N, N)).astype(np.float), 0, -1) / (N - 1) * 2 - 1
z = np.sqrt(np.clip(1 - linalg.dot(points, points), 0, 1))
points = np.concatenate([points, z[..., None]], axis=-1)
element_idx, sub_idx, quotient_idx, triangle_idx, bary = complex[-1].pick(points.reshape(-1, 3))
print(bary.min(), bary.max())
if True:
col = bary
else:
col = np.array([
sub_idx.astype(np.float) / sub_idx.max(),
sub_idx * 0,
quotient_idx.astype(np.float) / quotient_idx.max()
]).T
plt.figure()
img = np.flip(np.moveaxis(col.reshape(N, N, 3), 0, 1), axis=0)
plt.imshow(img)
plt.show()
开发者ID:EelcoHoogendoorn,项目名称:Escheresque,代码行数:28,代码来源:test_multicomplex.py
示例11: weighed_arithmetic_mean_numpy
def weighed_arithmetic_mean_numpy(data, weights=None):
"""
Calculate the weighted mean of an array/list using numpy
"""
# Not weighted
if weights is None: return arithmetic_mean_numpy(data)
import numpy as np
# Get the number of dimensions
ndim_data = len(data.shape)
ndim_weights = len(weights.shape)
#weights = np.array(weights).flatten() / float(sum(weights))
#return np.dot(np.array(data), weights)
if ndim_weights > 1:
weights = np.copy(weights)
divisors = np.sum(weights, axis=-1)
#norm_weights = weights /
norm_weights = np.moveaxis(weights, -1, 0) # move last to first axis
#print("1", norm_weights.shape)
# Loop over
for index in range(norm_weights.shape[0]): norm_weights[index] /= divisors
#print(norm_weights.shape)
norm_weights = np.moveaxis(norm_weights, 0, 1)
#print("2", norm_weights.shape)
else: norm_weights = weights / float(np.sum(weights))
return np.dot(data, norm_weights)
开发者ID:SKIRT,项目名称:PTS,代码行数:34,代码来源:numbers.py
示例12: load_data
def load_data(dirp,nb_classes):
# load the dataset as X_train and as a copy the X_val
X_train = pickle.load( open( os.path.join(dirp,'X_train.pkl'), "rb" ) )
y_train = pickle.load( open(os.path.join(dirp,'y_train.pkl'), "rb" ) )
X_val = pickle.load( open( os.path.join(dirp,'X_val.pkl'), "rb" ) )
y_val = pickle.load( open( os.path.join(dirp,'y_val.pkl'), "rb" ) )
if len(X_train.shape)>3:
X_train=np.moveaxis(X_train,3,1)
X_val=np.moveaxis(X_val,3,1)
else:
X_train = np.expand_dims(X_train,1)
X_val =np.expand_dims(X_val,1)
print ('Xtrain :',X_train.shape)
print 'X_train min max :',X_train.min(),X_train.max()
# labels to categorical vectors
# uniquelbls = np.unique(y_train)
# nb_classes = int( uniquelbls.shape[0])
print ('number of classes :', int(nb_classes))
# zbn = np.min(uniquelbls) # zero based numbering
y_train = np_utils.to_categorical(y_train, nb_classes)
y_val = np_utils.to_categorical(y_val, nb_classes)
return (X_train, y_train), (X_val, y_val)
开发者ID:skconsulting,项目名称:ild,代码行数:28,代码来源:ild_helpers.py
示例13: scattering_matrix
def scattering_matrix(vp1, vs1, rho1, vp2, vs2, rho2, theta1=0):
"""
Full Zoeppritz solution, considered the definitive solution.
Calculates the angle dependent p-wave reflectivity of an interface
between two mediums.
Originally written by: Wes Hamlyn, vectorized by Agile.
Returns the complex reflectivity.
Args:
vp1 (float): The upper P-wave velocity.
vs1 (float): The upper S-wave velocity.
rho1 (float): The upper layer's density.
vp2 (float): The lower P-wave velocity.
vs2 (float): The lower S-wave velocity.
rho2 (float): The lower layer's density.
theta1 (ndarray): The incidence angle; float or 1D array length n.
Returns:
ndarray. The exact Zoeppritz solution for all modes at the interface.
A 4x4 array representing the scattering matrix at the incident
angle theta1.
"""
theta1 = np.radians(theta1).astype(complex) * np.ones_like(vp1)
p = np.sin(theta1) / vp1 # Ray parameter.
theta2 = np.arcsin(p * vp2) # Trans. angle of P-wave.
phi1 = np.arcsin(p * vs1) # Refl. angle of converted S-wave.
phi2 = np.arcsin(p * vs2) # Trans. angle of converted S-wave.
# Matrix form of Zoeppritz equations... M & N are matrices.
M = np.array([[-np.sin(theta1), -np.cos(phi1), np.sin(theta2), np.cos(phi2)],
[np.cos(theta1), -np.sin(phi1), np.cos(theta2), -np.sin(phi2)],
[2 * rho1 * vs1 * np.sin(phi1) * np.cos(theta1),
rho1 * vs1 * (1 - 2 * np.sin(phi1) ** 2),
2 * rho2 * vs2 * np.sin(phi2) * np.cos(theta2),
rho2 * vs2 * (1 - 2 * np.sin(phi2) ** 2)],
[-rho1 * vp1 * (1 - 2 * np.sin(phi1) ** 2),
rho1 * vs1 * np.sin(2 * phi1),
rho2 * vp2 * (1 - 2 * np.sin(phi2) ** 2),
-rho2 * vs2 * np.sin(2 * phi2)]])
N = np.array([[np.sin(theta1), np.cos(phi1), -np.sin(theta2), -np.cos(phi2)],
[np.cos(theta1), -np.sin(phi1), np.cos(theta2), -np.sin(phi2)],
[2 * rho1 * vs1 * np.sin(phi1) * np.cos(theta1),
rho1 * vs1 * (1 - 2 * np.sin(phi1) ** 2),
2 * rho2 * vs2 * np.sin(phi2) * np.cos(theta2),
rho2 * vs2 * (1 - 2 * np.sin(phi2) ** 2)],
[rho1 * vp1 * (1 - 2 * np.sin(phi1) ** 2),
-rho1 * vs1 * np.sin(2 * phi1),
- rho2 * vp2 * (1 - 2 * np.sin(phi2) ** 2),
rho2 * vs2 * np.sin(2 * phi2)]])
M_ = np.moveaxis(np.squeeze(M), [0, 1], [-2, -1])
A = np.linalg.inv(M_)
N_ = np.moveaxis(np.squeeze(N), [0, 1], [-2, -1])
Z_ = np.matmul(A, N_)
return np.transpose(Z_, axes=list(range(Z_.ndim - 2)) + [-1, -2])
开发者ID:agile-geoscience,项目名称:bruges,代码行数:59,代码来源:reflection.py
示例14: test_convinc_2d
def test_convinc_2d(
channels_last, stride0, stride1, kernel0, kernel1, padding, rng):
correlate2d = pytest.importorskip("scipy.signal").correlate2d
shape0 = 16
shape1 = 17
in_channels = 32
out_channels = 64
x_shape = (shape0, shape1, in_channels) if channels_last else (
in_channels, shape0, shape1)
x = Signal(rng.randn(*x_shape))
w = Signal(rng.randn(kernel0, kernel1, in_channels, out_channels))
conv = Convolution(out_channels,
x_shape,
kernel_size=(kernel0, kernel1),
strides=(stride0, stride1),
padding=padding,
channels_last=channels_last)
y = Signal(np.zeros(conv.output_shape.shape))
signals = {sig: np.array(sig.initial_value) for sig in (x, w, y)}
step = ConvInc(w, x, y, conv).make_step(signals, None, None)
step()
x0 = x.initial_value
if not channels_last:
x0 = np.moveaxis(x0, 0, -1)
if padding == "same":
strides = np.asarray([stride0, stride1])
padding = np.ceil(np.asarray([shape0, shape1]) / strides)
padding = np.maximum(
(padding - 1) * strides + (kernel0, kernel1) - (shape0, shape1),
0).astype(np.int64)
x0 = np.pad(x0, [
(padding[0] // 2, padding[0] - padding[0] // 2),
(padding[1] // 2, padding[1] - padding[1] // 2),
(0, 0),
], "constant")
y0 = np.stack([
np.sum([
correlate2d(x0[..., j], w.initial_value[..., j, i], mode="valid")
for j in range(in_channels)
], axis=0) for i in range(out_channels)
], axis=-1)
y0 = y0[::stride0, ::stride1, :]
if not channels_last:
y0 = np.moveaxis(y0, -1, 0)
assert np.allclose(signals[y], y0)
开发者ID:nengo,项目名称:nengo,代码行数:55,代码来源:test_transforms.py
示例15: __init__
def __init__(self):
train = scipy.io.loadmat('/home/roliveira/.keras/datasets/svhn/train_32x32.mat')
test = scipy.io.loadmat('/home/roliveira/.keras/datasets/svhn/test_32x32.mat')
self.X_train = np.moveaxis(train['X'], [0, 1 , 2, 3], [2, 3, 1, 0])
self.y_train = train['y'].reshape(-1)
self.y_train[self.y_train == 10] = 0
self.X_test = np.moveaxis(test['X'], [0, 1 , 2, 3], [2, 3, 1, 0])
self.y_test = test['y']
self.y_test[self.y_test == 10] = 0
开发者ID:ramon-oliveira,项目名称:deepstats,代码行数:11,代码来源:dataloader.py
示例16: run_same
def run_same(self, batch, input_support, channels, filters, kernel_support,
corr, strides_down, strides_up, padding, extra_pad_end,
channel_separable, data_format, activation, use_bias):
assert channels == filters == 1
# Create input array.
input_shape = (batch, 1) + input_support
inputs = np.arange(np.prod(input_shape))
inputs = inputs.reshape(input_shape).astype(np.float32)
if data_format != "channels_first":
tf_inputs = tf.constant(np.moveaxis(inputs, 1, -1))
else:
tf_inputs = tf.constant(inputs)
# Create kernel array. This is an identity kernel, so the outputs should
# be equal to the inputs except for up- and downsampling.
tf_kernel = parameterizers.StaticParameterizer(
initializers.IdentityInitializer())
# Run SignalConv* layer.
layer_class = {
3: signal_conv.SignalConv1D,
4: signal_conv.SignalConv2D,
5: signal_conv.SignalConv3D,
}[inputs.ndim]
layer = layer_class(
1, kernel_support, corr=corr, strides_down=strides_down,
strides_up=strides_up, padding=padding, extra_pad_end=extra_pad_end,
channel_separable=channel_separable, data_format=data_format,
activation=activation, use_bias=use_bias,
kernel_parameterizer=tf_kernel)
tf_outputs = layer(tf_inputs)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs = sess.run(tf_outputs)
# Check that SignalConv* computes the correct output size.
predicted_shape = layer.compute_output_shape(tf_inputs.shape)
self.assertEqual(outputs.shape, tuple(predicted_shape.as_list()))
# If not using channels_first, convert back to it to compare to input.
if data_format != "channels_first":
outputs = np.moveaxis(outputs, -1, 1)
# Upsample and then downsample inputs.
expected = inputs
if not all(s == 1 for s in strides_up):
expected = self.numpy_upsample(expected, strides_up, extra_pad_end)
slices = (slice(None), slice(None))
slices += tuple(slice(None, None, s) for s in strides_down)
expected = expected[slices]
self.assertAllClose(expected, outputs, rtol=0, atol=1e-3)
开发者ID:michaelshiyu,项目名称:compression,代码行数:53,代码来源:signal_conv_test.py
示例17: gen
def gen(self, snr, x_axis=0, ivar_precision=.05, structure_shape=(1, )):
'''
generate data from full PC basis, and noisify according to snr
'''
if x_axis < 0:
raise ValueError('x axis index must be positive')
# since in this case we're using all PCs to construct fake data
q = self.n
self.x_axis = x_axis
# if SNR is a single number, just return a single spectrum
if not hasattr(snr, '__len__'):
snr = snr * np.ones_like(self.x)
fulldata_shape = (self.n, )
coeffs_shape = (q, )
# if SNR is given as a map (i.e., has an incompatible shape to self.x),
# then add a dimension where specified in x_axis to make shapes compatible
elif self.n not in snr.shape:
# define higher-dimensional data structure shape
# that delimits separate measurements
structure_shape = snr.shape
snr = np.expand_dims(snr, x_axis)
snr = np.repeat(snr, self.n, axis=x_axis)
fulldata_shape = snr.shape
coeffs_shape = tuple_insert(structure_shape, x_axis, q)
else:
structure_shape = tuple_delete(snr.shape, x_axis)
fulldata_shape = snr.shape
coeffs_shape = tuple_insert(structure_shape, x_axis, q)
self.snr = snr
self.A0 = np.random.randn(*coeffs_shape)
# generate centered data, and then add mean
self.obs0_ctrd = np.moveaxis(
(np.moveaxis(self.A0, x_axis, -1) @ self.E_full.T), -1, x_axis)
self.obs0 = np.moveaxis(
np.moveaxis(self.obs0_ctrd, x_axis, -1) + self.M, -1, x_axis)
obs_noise = self.obs0 * np.random.randn(*fulldata_shape) / snr
spectrophotometric_noise = np.moveaxis(
np.random.multivariate_normal(
np.zeros(self.n), self.K_inst.covariance_,
structure_shape),
-1, x_axis)
self.obs = self.obs0 + obs_noise + spectrophotometric_noise
self.ivar0 = (snr / self.obs)**2.
self.ivar = (self.ivar0 * (1. + ivar_precision * \
np.random.randn(*self.ivar0.shape))).clip(min=0.)
开发者ID:zpace,项目名称:stellarmass_pca,代码行数:51,代码来源:linalg.py
示例18: __call__
def __call__(self, l, flam, ivar=None, axis=0, *args, **kwargs):
if len(l.shape) > 1:
raise ValueError('wavelength array (l) must be 1D')
flam[flam == 0.] = eps
if ivar is None:
ivar = np.ones_like(flam)
ivar[ivar == 0.] = eps
# rearrange axes to make broadcasting work
ivar = np.moveaxis(ivar, axis, -1)
flam = np.moveaxis(flam, axis, -1)
return getattr(self, self._func)(l, flam, ivar, axis, *args, **kwargs)
开发者ID:zpace,项目名称:stellarmass_pca,代码行数:14,代码来源:indices.py
示例19: run_resample
def run_resample(self, src_data, interp_method='nearest', fill_value=np.nan, nprocs=1, print_msg=True):
"""Run interpolation operation for input 2D/3D data
Parameters: src_data : 2D/3D np.array, source data to be geocoded
interp_method : string, nearest | linear
fill_value : NaN or number
nprocs : int, number of processes to be used
print_msg : bool
Returns: geo_data : 2D/3D np.array
"""
# use pyresample
if self.processor == 'pyresample':
if len(src_data.shape) == 3:
src_data = np.moveaxis(src_data, 0, -1)
if src_data.dtype == np.bool_:
fill_value = False
print('restrict fill value to False for bool type source data')
# resample source data into target data
geo_data = self.run_pyresample(src_data=src_data,
interp_method=interp_method,
fill_value=fill_value,
nprocs=nprocs,
radius=None,
print_msg=True)
if len(geo_data.shape) == 3:
geo_data = np.moveaxis(geo_data, -1, 0)
# use scipy.interpolater.RegularGridInterpolator
else:
if print_msg:
print('resampling using scipy.interpolate.RegularGridInterpolator ...')
if len(src_data.shape) == 3:
geo_data = np.empty((src_data.shape[0], self.length, self.width), src_data.dtype)
prog_bar = ptime.progressBar(maxValue=src_data.shape[0])
for i in range(src_data.shape[0]):
geo_data[i, :, :] = self.run_regular_grid_interpolator(src_data=src_data[i, :, :],
interp_method=interp_method,
fill_value=fill_value,
print_msg=True)
prog_bar.update(i+1)
prog_bar.close()
else:
geo_data = self.run_regular_grid_interpolator(src_data=src_data,
interp_method=interp_method,
fill_value=fill_value,
print_msg=True)
return geo_data
开发者ID:hfattahi,项目名称:PySAR,代码行数:49,代码来源:resample.py
示例20: run_valid
def run_valid(self, batch, input_support, channels, filters, kernel_support,
corr, strides_down, strides_up, padding, extra_pad_end,
channel_separable, data_format, activation, use_bias):
assert padding == "valid"
# Create input array.
inputs = np.random.randint(32, size=(batch, channels) + input_support)
inputs = inputs.astype(np.float32)
if data_format != "channels_first":
tf_inputs = tf.constant(np.moveaxis(inputs, 1, -1))
else:
tf_inputs = tf.constant(inputs)
# Create kernel array.
kernel = np.random.randint(16, size=kernel_support + (channels, filters))
kernel = kernel.astype(np.float32)
tf_kernel = parameterizers.StaticParameterizer(
tf.constant_initializer(kernel))
# Run SignalConv* layer.
layer_class = {
3: signal_conv.SignalConv1D,
4: signal_conv.SignalConv2D,
5: signal_conv.SignalConv3D,
}[inputs.ndim]
layer = layer_class(
filters, kernel_support, corr=corr, strides_down=strides_down,
strides_up=strides_up, padding="valid", extra_pad_end=extra_pad_end,
channel_separable=channel_separable, data_format=data_format,
activation=activation, use_bias=use_bias,
kernel_parameterizer=tf_kernel)
tf_outputs = layer(tf_inputs)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
outputs = sess.run(tf_outputs)
# Check that SignalConv* computes the correct output size.
predicted_shape = layer.compute_output_shape(tf_inputs.shape)
self.assertEqual(outputs.shape, tuple(predicted_shape.as_list()))
# If not using channels_first, convert back to it to compare to SciPy.
if data_format != "channels_first":
outputs = np.moveaxis(outputs, -1, 1)
# Compute the equivalent result using SciPy and compare.
expected = self.scipy_convolve_valid(
corr, inputs, kernel, strides_down, strides_up, extra_pad_end,
channel_separable)
self.assertAllClose(expected, outputs, rtol=0, atol=1e-3)
开发者ID:michaelshiyu,项目名称:compression,代码行数:49,代码来源:signal_conv_test.py
注:本文中的numpy.moveaxis函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论