本文整理汇总了Python中numpy.common_type函数的典型用法代码示例。如果您正苦于以下问题:Python common_type函数的具体用法?Python common_type怎么用?Python common_type使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了common_type函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_df_arith_2d_array_collike_broadcasts
def test_df_arith_2d_array_collike_broadcasts(self,
all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze())}
dtype = None
if opname in ['__rmod__', '__rfloordiv__']:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index,
dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
开发者ID:bkandel,项目名称:pandas,代码行数:25,代码来源:test_arithmetic.py
示例2: minmax_normalize
def minmax_normalize(samples, out=None):
"""Min-max normalization of a function evaluated on the unit sphere
Normalizes samples to ``(samples - min(samples)) / (max(samples) -
min(samples))`` for each unit sphere.
Parameters
----------
samples : ndarray (..., N)
N samples on a unit sphere for each point, stored along the last axis
of the array.
out : ndrray (..., N), optional
An array to store the normalized samples.
Returns
-------
out : ndarray, (..., N)
Normalized samples.
"""
if out is None:
dtype = np.common_type(np.empty(0, 'float32'), samples)
out = np.array(samples, dtype=dtype, copy=True)
else:
out[:] = samples
sample_mins = np.min(samples, -1)[..., None]
sample_maxes = np.max(samples, -1)[..., None]
out -= sample_mins
out /= (sample_maxes - sample_mins)
return out
开发者ID:gauvinalexandre,项目名称:dipy,代码行数:31,代码来源:odf.py
示例3: poly_outer_product
def poly_outer_product(left, right):
left, right = numpy.asarray(left), numpy.asarray(right)
nleft, nright = left.ndim-1, right.ndim-1
pshape = left.shape[1:] if not nright else right.shape[1:] if not nleft else (max(left.shape[1:])+max(right.shape[1:])-1,) * (nleft + nright)
outer = numpy.zeros((left.shape[0], right.shape[0], *pshape), dtype=numpy.common_type(left, right))
a = slice(None)
outer[(a,a,*(map(slice, left.shape[1:]+right.shape[1:])))] = left[(a,None)+(a,)*nleft+(None,)*nright]*right[(None,a)+(None,)*nleft+(a,)*nright]
return types.frozenarray(outer.reshape(left.shape[0] * right.shape[0], *pshape), copy=False)
开发者ID:CVerhoosel,项目名称:nutils,代码行数:8,代码来源:numeric.py
示例4: _get_shared_type_and_fill_value
def _get_shared_type_and_fill_value(data1, data2, fill1=None, fill2=None) :
"""
Figure out a shared type that can be used when adding or subtracting
the two data sets given (accounting for possible overflow)
Also returns a fill value that can be used.
"""
# figure out the shared type
type_to_return = data1.dtype
changed_type = False
if data1.dtype is not data2.dtype:
type_to_return = np.common_type(data1, data2)
changed_type = True
# make sure we're using a type that has negative values in it
if type_to_return in DiffInfoObject.POSITIVE_UPCASTS :
type_to_return = DiffInfoObject.POSITIVE_UPCASTS[type_to_return]
changed_type = True
# upcast the type if we think we'll need more space for subtracting
if type_to_return in DiffInfoObject.DATATYPE_UPCASTS :
type_to_return = DiffInfoObject.DATATYPE_UPCASTS[type_to_return]
changed_type = True
if changed_type :
LOG.debug('To prevent overflow, difference data will be upcast from ('
+ str(data1.dtype) + '/' + str(data2.dtype) + ') to: ' + str(type_to_return))
# figure out the fill value
fill_value_to_return = None
# if both of the old fill values exist and are the same, use them
if (fill1 is not None) and (fill1 == fill2) :
fill_value_to_return = fill1
if changed_type :
fill_value_to_return = type_to_return(fill_value_to_return)
else:
# if we're looking at float or complex data, use a nan
if (np.issubdtype(type_to_return, np.float) or
np.issubdtype(type_to_return, np.complex)) :
fill_value_to_return = np.nan
# if we're looking at int data, use the minimum value
elif np.issubdtype(type_to_return, np.int) :
fill_value_to_return = np.iinfo(type_to_return).min
# if we're looking at unsigned data, use the maximum value
elif ((type_to_return is np.uint8) or
(type_to_return is np.uint16) or
(type_to_return is np.uint32) or
(type_to_return is np.uint64)) :
fill_value_to_return = np.iinfo(type_to_return).max
return type_to_return, fill_value_to_return
开发者ID:adesmet-ssec,项目名称:uwglance,代码行数:57,代码来源:data.py
示例5: _normalize_scalar_dtype
def _normalize_scalar_dtype(s, arrs):
# cast python scalars to an appropriate numpy dtype
if isinstance(s, (int, float, complex)):
ndarrs = [_a for _a in arrs if hasattr(_a, 'dtype')]
flt_arrs = [_a for _a in ndarrs if _a.dtype.kind in 'fc']
int_arrs = [_a for _a in ndarrs if _a.dtype.kind in 'i']
if flt_arrs and isinstance(s, (int, float, complex)):
s = np.asarray(s).astype(np.common_type(*flt_arrs))
elif int_arrs and isinstance(s, (int, )):
s = np.asarray(s).astype(max([_a.dtype for _a in int_arrs]))
return s
开发者ID:KristoforMaynard,项目名称:Viscid,代码行数:11,代码来源:necalc.py
示例6: as_series
def as_series(alist, trim=True):
"""Return arguments as a list of 1d arrays.
The return type will always be an array of double, complex double. or
object.
Parameters
----------
[a1, a2,...] : list of array_like.
The arrays must have no more than one dimension when converted.
trim : boolean
When True, trailing zeros are removed from the inputs.
When False, the inputs are passed through intact.
Returns
-------
[a1, a2,...] : list of 1d-arrays
A copy of the input data as a 1d-arrays.
Raises
------
ValueError :
Raised when an input can not be coverted to 1-d array or the
resulting array is empty.
"""
arrays = [np.array(a, ndmin=1, copy=0) for a in alist]
if min([a.size for a in arrays]) == 0:
raise ValueError("Coefficient array is empty")
if max([a.ndim for a in arrays]) > 1:
raise ValueError("Coefficient array is not 1-d")
if trim:
arrays = [trimseq(a) for a in arrays]
if any([a.dtype == np.dtype(object) for a in arrays]):
ret = []
for a in arrays:
if a.dtype != np.dtype(object):
tmp = np.empty(len(a), dtype=np.dtype(object))
tmp[:] = a[:]
ret.append(tmp)
else:
ret.append(a.copy())
else:
try:
dtype = np.common_type(*arrays)
except:
raise ValueError("Coefficient arrays have no common type")
ret = [np.array(a, copy=1, dtype=dtype) for a in arrays]
return ret
开发者ID:NirBenTalLab,项目名称:proorigami-cde-package,代码行数:50,代码来源:polyutils.py
示例7: nulp_diff
def nulp_diff(x, y, dtype=None):
"""For each item in x and y, return the number of representable floating
points between them.
Parameters
----------
x : array_like
first input array
y : array_like
second input array
Returns
-------
nulp: array_like
number of representable floating point numbers between each item in x
and y.
Examples
--------
# By definition, epsilon is the smallest number such as 1 + eps != 1, so
# there should be exactly one ULP between 1 and 1 + eps
>>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
1.0
"""
import numpy as np
if dtype:
x = np.array(x, dtype=dtype)
y = np.array(y, dtype=dtype)
else:
x = np.array(x)
y = np.array(y)
t = np.common_type(x, y)
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise NotImplementedError("_nulp not implemented for complex array")
x = np.array(x, dtype=t)
y = np.array(y, dtype=t)
if not x.shape == y.shape:
raise ValueError("x and y do not have the same shape: %s - %s" % \
(x.shape, y.shape))
def _diff(rx, ry, vdt):
diff = np.array(rx-ry, dtype=vdt)
return np.abs(diff)
rx = integer_repr(x)
ry = integer_repr(y)
return _diff(rx, ry, t)
开发者ID:EmployInsight,项目名称:numpy,代码行数:50,代码来源:utils.py
示例8: matrixmultiply
def matrixmultiply(a, b):
if len(b.shape) == 1:
b_is_vector = True
b = b[:,newaxis]
else:
b_is_vector = False
assert_(a.shape[1] == b.shape[0])
c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
for i in xrange(a.shape[0]):
for j in xrange(b.shape[1]):
s = 0
for k in xrange(a.shape[1]):
s += a[i,k] * b[k, j]
c[i,j] = s
if b_is_vector:
c = c.reshape((a.shape[0],))
return c
开发者ID:123jefferson,项目名称:MiniBloq-Sparki,代码行数:17,代码来源:test_fblas.py
示例9: dot_generalized
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
开发者ID:Prastaruszek,项目名称:numpy,代码行数:17,代码来源:test_linalg.py
示例10: vq
def vq(obs, code_book):
""" Vector Quantization: assign features sets to codes in a code book.
Vector quantization determines which code in the code book best represents
an observation of a target. The features of each observation are compared
to each code in the book, and assigned the one closest to it. The
observations are contained in the obs array. These features should be
"whitened," or nomalized by the standard deviation of all the features
before being quantized. The code book can be created using the kmeans
algorithm or something similar.
:Parameters:
obs : ndarray
Each row of the array is an observation. The columns are the
"features" seen during each observation The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray.
The code book is usually generated using the kmeans algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
::
# f0 f1 f2 f3
code_book = [[ 1., 2., 3., 4.], #c0
[ 1., 2., 3., 4.], #c1
[ 1., 2., 3., 4.]]) #c2
:Returns:
code : ndarray
If obs is a NxM array, then a length N array is returned that holds
the selected code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code
Notes
-----
This currently forces 32 bit math precision for speed. Anyone know
of a situation where this undermines the accuracy of the algorithm?
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
try:
import _vq
ct = common_type(obs, code_book)
c_obs = obs.astype(ct)
c_code_book = code_book.astype(ct)
if ct is single:
results = _vq.vq(c_obs, c_code_book)
elif ct is double:
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
except ImportError:
results = py_vq(obs, code_book)
return results
开发者ID:mbentz80,项目名称:jzigbeercp,代码行数:68,代码来源:vq.py
示例11: diags
def diags(diagonals, offsets, shape=None, format=None, dtype=None):
"""
Note: copied from scipy.sparse.construct
Construct a sparse matrix from diagonals.
.. versionadded:: 0.11
Parameters
----------
diagonals : sequence of array_like
Sequence of arrays containing the matrix diagonals,
corresponding to `offsets`.
offsets : sequence of int
Diagonals to set:
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
shape : tuple of int, optional
Shape of the result. If omitted, a square matrix large enough
to contain the diagonals is returned.
format : {"dia", "csr", "csc", "lil", ...}, optional
Matrix format of the result. By default (format=None) an
appropriate sparse matrix format is returned. This choice is
subject to change.
dtype : dtype, optional
Data type of the matrix.
See Also
--------
spdiags : construct matrix from diagonals
Notes
-----
This function differs from `spdiags` in the way it handles
off-diagonals.
The result from `diags` is the sparse equivalent of::
np.diag(diagonals[0], offsets[0])
+ ...
+ np.diag(diagonals[k], offsets[k])
Repeated diagonal offsets are disallowed.
Examples
--------
>>> diagonals = [[1,2,3,4], [1,2,3], [1,2]]
>>> diags(diagonals, [0, -1, 2]).todense()
matrix([[1., 0., 1., 0.],
[1., 2., 0., 2.],
[0., 2., 3., 0.],
[0., 0., 3., 4.]])
Broadcasting of scalars is supported (but shape needs to be
specified):
>>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).todense()
matrix([[-2., 1., 0., 0.],
[ 1., -2., 1., 0.],
[ 0., 1., -2., 1.],
[ 0., 0., 1., -2.]])
If only one diagonal is wanted (as in `numpy.diag`), the following
works as well:
>>> diags([1, 2, 3], 1).todense()
matrix([[ 0., 1., 0., 0.],
[ 0., 0., 2., 0.],
[ 0., 0., 0., 3.],
[ 0., 0., 0., 0.]])
"""
# if offsets is not a sequence, assume that there's only one diagonal
try:
iter(offsets)
except TypeError:
# now check that there's actually only one diagonal
try:
iter(diagonals[0])
except TypeError:
diagonals = [np.atleast_1d(diagonals)]
else:
raise ValueError("Different number of diagonals and offsets.")
else:
diagonals = list(map(np.atleast_1d, diagonals))
offsets = np.atleast_1d(offsets)
# Basic check
if len(diagonals) != len(offsets):
raise ValueError("Different number of diagonals and offsets.")
# Determine shape, if omitted
if shape is None:
m = len(diagonals[0]) + abs(int(offsets[0]))
shape = (m, m)
# Determine data type, if omitted
if dtype is None:
dtype = np.common_type(*diagonals)
#.........这里部分代码省略.........
开发者ID:ismaelresp,项目名称:PyEMMA,代码行数:101,代码来源:numeric.py
示例12: vq
def vq(obs, code_book):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
acheived by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'N' x 'M' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]]) #c2
Returns
-------
code : ndarray
A length N array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Notes
-----
This currently forces 32-bit math precision for speed. Anyone know
of a situation where this undermines the accuracy of the algorithm?
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
try:
from . import _vq
ct = common_type(obs, code_book)
c_obs = obs.astype(ct)
c_code_book = code_book.astype(ct)
if ct is single:
results = _vq.vq(c_obs, c_code_book)
elif ct is double:
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
except ImportError:
results = py_vq(obs, code_book)
return results
开发者ID:b-t-g,项目名称:Sim,代码行数:71,代码来源:vq.py
示例13: prepare_for_fortran
def prepare_for_fortran(overwrite, *args):
"""Convert arrays to Fortran format.
This function takes a number of array objects in `args` and converts them
to a format that can be directly passed to a Fortran function (Fortran
contiguous NumPy array). If the arrays have different data type, they
converted arrays are cast to a common compatible data type (one of NumPy's
`float32`, `float64`, `complex64`, `complex128` data types).
If `overwrite` is ``False``, an NumPy array that would already be in the
correct format (Fortran contiguous, right data type) is neverthelessed
copied. (Hence, overwrite = True does not imply that acting on the
converted array in the return values will overwrite the original array in
all cases -- it does only so if the original array was already in the
correct format. The conversions require copying. In fact, that's the same
behavior as in SciPy, it's just not explicitly stated there)
If an argument is ``None``, it is just passed through and not used to
determine the proper data type.
`prepare_for_lapack` returns a character indicating the proper
data type in LAPACK style ('s', 'd', 'c', 'z') and a list of
properly converted arrays.
"""
# Make sure we have NumPy arrays
mats = [None]*len(args)
for i in range(len(args)):
if args[i] is not None:
arr = np.asanyarray(args[i])
if not np.issubdtype(arr.dtype, np.number):
raise ValueError("Argument cannot be interpreted "
"as a numeric array")
mats[i] = (arr, arr is not args[i] or overwrite)
else:
mats[i] = (None, True)
# First figure out common dtype
# Note: The return type of common_type is guaranteed to be a floating point
# kind.
dtype = np.common_type(*[arr for arr, ovwrt in mats if arr is not None])
if dtype == np.float32:
lapacktype = 's'
elif dtype == np.float64:
lapacktype = 'd'
elif dtype == np.complex64:
lapacktype = 'c'
elif dtype == np.complex128:
lapacktype = 'z'
else:
raise AssertionError("Unexpected data type from common_type")
ret = [ lapacktype ]
for npmat, ovwrt in mats:
# Now make sure that the array is contiguous, and copy if necessary.
if npmat is not None:
if npmat.ndim == 2:
if not npmat.flags["F_CONTIGUOUS"]:
npmat = np.asfortranarray(npmat, dtype = dtype)
elif npmat.dtype != dtype:
npmat = npmat.astype(dtype)
elif not ovwrt:
# ugly here: copy makes always C-array, no way to tell it
# to make a Fortran array.
npmat = np.asfortranarray(npmat.copy())
elif npmat.ndim == 1:
if not npmat.flags["C_CONTIGUOUS"]:
npmat = np.ascontiguousarray(npmat, dtype = dtype)
elif npmat.dtype != dtype:
npmat = npmat.astype(dtype)
elif not ovwrt:
npmat = np.asfortranarray(npmat.copy())
else:
raise ValueError("Dimensionality of array is not 1 or 2")
ret.append(npmat)
return tuple(ret)
开发者ID:kohei0821,项目名称:kwant-1,代码行数:80,代码来源:fortran_helpers.py
示例14: arma_acovf
def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None):
"""
Theoretical autocovariance function of ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acovf
sigma2 : float
Variance of the innovation term.
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
See Also
--------
arma_acf
acovf
References
----------
Brockwell, Peter J., and Richard A. Davis. 2009.
Time Series: Theory and Methods. 2nd ed. 1991.
New York, NY: Springer.
"""
if dtype is None:
dtype = np.common_type(np.array(ar), np.array(ma), np.array(sigma2))
p = len(ar) - 1
q = len(ma) - 1
m = max(p, q) + 1
if sigma2.real < 0:
raise ValueError('Must have positive innovation variance.')
# Short-circuit for trivial corner-case
if p == q == 0:
out = np.zeros(nobs, dtype=dtype)
out[0] = sigma2
return out
# Get the moving average representation coefficients that we need
ma_coeffs = arma2ma(ar, ma, lags=m)
# Solve for the first m autocovariances via the linear system
# described by (BD, eq. 3.3.8)
A = np.zeros((m, m), dtype=dtype)
b = np.zeros((m, 1), dtype=dtype)
# We need a zero-right-padded version of ar params
tmp_ar = np.zeros(m, dtype=dtype)
tmp_ar[:p + 1] = ar
for k in range(m):
A[k, :(k + 1)] = tmp_ar[:(k + 1)][::-1]
A[k, 1:m - k] += tmp_ar[(k + 1):m]
b[k] = sigma2 * np.dot(ma[k:q + 1], ma_coeffs[:max((q + 1 - k), 0)])
acovf = np.zeros(max(nobs, m), dtype=dtype)
acovf[:m] = np.linalg.solve(A, b)[:, 0]
# Iteratively apply (BD, eq. 3.3.9) to solve for remaining autocovariances
if nobs > m:
zi = signal.lfiltic([1], ar, acovf[:m:][::-1])
acovf[m:] = signal.lfilter([1], ar, np.zeros(nobs - m, dtype=dtype),
zi=zi)[0]
return acovf[:nobs]
开发者ID:ChadFulton,项目名称:statsmodels,代码行数:72,代码来源:arima_process.py
示例15: vq
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
ct = common_type(obs, code_book)
c_obs = obs.astype(ct, copy=False)
if code_book.dtype != ct:
c_code_book = code_book.astype(ct)
else:
c_code_book = code_book
if ct in (single, double):
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
return results
开发者ID:gwang-cv,项目名称:scipy,代码行数:74,代码来源:vq.py
示例16: as_series
def as_series(alist, trim=True) :
"""
Return argument as a list of 1-d arrays.
The returned list contains array(s) of dtype double, complex double, or
object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of
size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays
of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array
raises a Value Error if it is not first reshaped into either a 1-d or 2-d
array.
Parameters
----------
a : array_like
A 1- or 2-d array_like
trim : boolean, optional
When True, trailing zeros are removed from the inputs.
When False, the inputs are passed through intact.
Returns
-------
[a1, a2,...] : list of 1-D arrays
A copy of the input data as a list of 1-d arrays.
Raises
------
ValueError :
Raised when `as_series` cannot convert its input to 1-d arrays, or at
least one of the resulting arrays is empty.
Examples
--------
>>> from numpy import polynomial as P
>>> a = np.arange(4)
>>> P.as_series(a)
[array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])]
>>> b = np.arange(6).reshape((2,3))
>>> P.as_series(b)
[array([ 0., 1., 2.]), array([ 3., 4., 5.])]
"""
arrays = [np.array(a, ndmin=1, copy=0) for a in alist]
if min([a.size for a in arrays]) == 0 :
raise ValueError("Coefficient array is empty")
if any([a.ndim != 1 for a in arrays]) :
raise ValueError("Coefficient array is not 1-d")
if trim :
arrays = [trimseq(a) for a in arrays]
if any([a.dtype == np.dtype(object) for a in arrays]) :
ret = []
for a in arrays :
if a.dtype != np.dtype(object) :
tmp = np.empty(len(a), dtype=np.dtype(object))
tmp[:] = a[:]
ret.append(tmp)
else :
ret.append(a.copy())
else :
try :
dtype = np.common_type(*arrays)
except :
raise ValueError("Coefficient arrays have no common type")
ret = [np.array(a, copy=1, dtype=dtype) for a in arrays]
return ret
开发者ID:beiko-lab,项目名称:gengis,代码行数:65,代码来源:polyutils.py
示例17: unified_eigenproblem
def unified_eigenproblem(a, b=None, tol=1e6):
"""A helper routine for modes(), that wraps eigenproblems.
This routine wraps the regular and general eigenproblems that can arise
in a unfied way.
Parameters
----------
a : numpy array
The matrix on the left hand side of a regular or generalized eigenvalue
problem.
b : numpy array or None
The matrix on the right hand side of the generalized eigenvalue problem.
tol : float
The tolerance for separating eigenvalues with absolute value 1 from the
rest.
Returns
-------
ev : numpy array
An array of eigenvalues (can contain NaNs and Infs, but those
are not accessed in `modes()`) The number of eigenvalues equals
twice the number of nonzero singular values of
`h_hop` (so `2*h_cell.shape[0]` if `h_hop` is invertible).
evanselect : numpy integer array
Index array of right-decaying modes.
propselect : numpy integer array
Index array of propagating modes (both left and right).
vec_gen(select) : function
A function that computes the eigenvectors chosen by the array select.
ord_schur(select) : function
A function that computes the unitary matrix (corresponding to the right
eigenvector space) of the (general) Schur decomposition reordered such
that the eigenvalues chosen by the array select are in the top left
block.
"""
if b is None:
eps = np.finfo(a.dtype).eps * tol
t, z, ev = kla.schur(a)
# Right-decaying modes.
select = abs(ev) > 1 + eps
# Propagating modes.
propselect = abs(abs(ev) - 1) < eps
vec_gen = lambda x: kla.evecs_from_schur(t, z, select=x)
ord_schur = lambda x: kla.order_schur(x, t, z, calc_ev=False)[1]
else:
eps = np.finfo(np.common_type(a, b)).eps * tol
s, t, z, alpha, beta = kla.gen_schur(a, b, calc_q=False)
# Right-decaying modes.
select = abs(alpha) > (1 + eps) * abs(beta)
# Propagating modes.
propselect = (abs(abs(alpha) - abs(beta)) < eps * abs(beta))
with np.errstate(divide='ignore', invalid='ignore'):
ev = alpha / beta
# Note: the division is OK here, since we later only access
# eigenvalues close to the unit circle
vec_gen = lambda x: kla.evecs_from_gen_schur(s, t, z=z, select=x)
ord_schur = lambda x: kla.order_gen_schur(x, s, t, z=z,
calc_ev=False)[2]
return ev, select, propselect, vec_gen, ord_schur
开发者ID:gitter-badger,项目名称:kwant,代码行数:67,代码来源:leads.py
示例18: make_proper_modes
def make_proper_modes(lmbdainv, psi, extract, tol=1e6):
"""
Find, normalize and sort the propagating eigenmodes.
Special care is taken of the case of degenerate k-values, where the
numerically computed modes are typically a superposition of the real
modes. In this case, also the proper (orthogonal) modes are computed.
"""
vel_eps = np.finfo(psi.dtype).eps * tol
nmodes = psi.shape[1]
n = len(psi) // 2
# Array for the velocities.
velocities = np.empty(nmodes, dtype=float)
# Calculate the full wave function in real space.
full_psi = extract(psi, lmbdainv)
# Find clusters of nearby eigenvalues. Since the eigenvalues occupy the
# unit circle, special care has to be taken to not introduce a cut at
# lambda = -1.
eps = np.finfo(lmbdainv.dtype).eps * tol
angles = np.angle(lmbdainv)
sort_order = np.resize(np.argsort(angles), (2 * len(angles,)))
boundaries = np.argwhere(np.abs(np.diff(lmbdainv[sort_order]))
> eps).flatten() + 1
# Detect the singular case of all eigenvalues equal.
if boundaries.shape == (0,) and len(angles):
boundaries = np.array([0, len(angles)])
for interval in izip(boundaries[:-1], boundaries[1:]):
if interval[1] > boundaries[0] + len(angles):
break
indx = sort_order[interval[0] : interval[1]]
# If there is a degenerate eigenvalue with several different
# eigenvectors, the numerical routines return some arbitrary
# overlap of the real, physical solutions. In order
# to figure out the correct wave function, we need to
# have the full, not the projected wave functions
# (at least to our current knowledge).
# Finding the true modes is done in two steps:
# 1. The true transversal modes should be orthogonal to each other, as
# they share the same Bloch momentum (note that transversal modes with
# different Bloch momenta k1 and k2 need not be orthogonal, the full
# modes are orthogonal because of the longitudinal dependence e^{i k1
# x} and e^{i k2 x}). The modes with the same k are therefore
# orthogonalized. Moreover for the velocity to have a proper value the
# modes should also be normalized.
q, r = la.qr(full_psi[:, indx], mode='economic')
# If the eigenvectors were purely real up to this stage,
# they will typically become complex after the rotation.
if psi.dtype != np.common_type(psi, r):
psi = psi.astype(np.common_type(psi, r))
if full_psi.dtype != np.common_type(full_psi, q):
full_psi = full_psi.astype(np.common_type(psi, q))
full_psi[:, indx] = q
psi[:, indx] = la.solve(r.T, psi[:, indx].T).T
# 2. Moving infinitesimally away from the degeneracy
# point, the modes should diagonalize the velocity
# operator (i.e. when they are non-degenerate any more)
# The modes are therefore rotated properly such that they
# diagonalize the velocity operator.
# Note that step 2. does not give a unique result if there are
# two modes with the same velocity, or if the modes stay
# degenerate even for a range of Bloch momenta (and hence
# must have the same velocity). However, this does not matter,
# since we are happy with any superposition in this case.
vel_op = -1j * dot(psi[n:, indx].T.conj(), psi[:n, indx])
vel_op = vel_op + vel_op.T.conj()
vel_vals, rot = la.eigh(vel_op)
# If the eigenvectors were purely real up to this stage,
# they will typically become complex after the rotation.
if psi.dtype != np.common_type(psi, rot):
psi = psi.astype(np.common_type(psi, rot))
if full_psi.dtype != np.common_type(full_psi, rot):
full_psi = full_psi.astype(np.common_type(psi, rot))
psi[:, indx] = dot(psi[:, indx], rot)
full_psi[:, indx] = dot(full_psi[:, indx], rot)
velocities[indx] = vel_vals
if np.any(abs(velocities) < vel_eps):
raise RuntimeError("Found a mode with zero or close to zero velocity.")
if 2 * np.sum(velocities < 0) != len(velocities):
raise RuntimeError("Numbers of left- and right-propagating "
"modes differ, possibly due to a numerical "
"instability.")
#.........这里部分代码省略.........
开发者ID:gitter-badger,项目名称:kwant,代码行数:101,代码来源:leads.py
示例19: setup_linsys
def setup_linsys(h_cell, h_hop, tol=1e6, stabilization=None):
"""Make an eigenvalue problem for eigenvectors of translation operator.
Parameters
----------
h_cell : numpy array with shape (n, n)
Hamiltonian of a single lead unit cell.
h_hop : numpy array with shape (n, m), m <= n
Hopping Hamiltonian from a cell to the next one.
tol : float
Numbers are considered zero when they are smaller than `tol` times
the machine precision.
stabilization : sequence of 2 booleans or None
Which steps of the eigenvalue problem stabilization to perform. If the
value is `None`, then Kwant chooses the fastest (and least stable)
algorithm that is expected to be sufficient. For any other value,
Kwant forms the eigenvalue problem in the basis of the hopping singular
values. The first element set to `True` forces Kwant to add an
anti-Hermitian term to the cell Hamiltonian before inverting. If it is
set to `False`, the extra term will only be added if the cell
Hamiltonian isn't invertible. The second element set to `True` forces
Kwant to solve a generalized eigenvalue problem, and not to reduce it
to the regular one. If it is `False`, reduction to a regular problem
is performed if possible.
Returns
-------
linsys : namedtuple
A named tuple containing `matrices` a matrix pencil defining
the eigenproblem, `v` a hermitian conjugate of the last matrix in
the hopping singular value decomposition, and functions for
extracting the wave function in the unit cell from the wave function
in the basis of the nonzero singular exponents of the hopping.
Notes
-----
The lead problem with degenerate hopping is rather complicated, and the
details of the algorithm will be published elsewhere.
"""
n = h_cell.shape[0]
m = h_hop.shape[1]
if stabilization is not None:
stabilization = list(stabilization)
|
请发表评论