本文整理汇总了Python中numpy.result_type函数的典型用法代码示例。如果您正苦于以下问题:Python result_type函数的具体用法?Python result_type怎么用?Python result_type使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了result_type函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: ShiftConvNumbaFFT
def ShiftConvNumbaFFT(h, N, M, xdtype=np.complex_, powerof2=True):
# implements Doppler filter:
# y[n, p] = SUM_k (exp(2*pi*j*n*(k - (L-1))/N) * h[k]) * x[p - k]
# = SUM_k (exp(-2*pi*j*n*k/N) * s*[k]) * x[p - (L-1) + k]
L = len(h)
outlen = M + L - 1
nfft = outlen
if powerof2:
nfft = pow2(nfft)
dopplermat = np.exp(2*np.pi*1j*np.arange(N)[:, np.newaxis]*(np.arange(L) - (L - 1))/N)
dopplermat.astype(np.result_type(h.dtype, np.complex64)) # cast to complex type with precision of h
hbank = h*dopplermat
# speed not critical here, just use numpy fft
hbankpad = zero_pad(hbank, nfft)
H = np.fft.fft(hbankpad) / nfft # divide by nfft b/c FFTW's ifft does not do this
xcdtype = np.result_type(xdtype, np.complex64) # cast to complex type with precision of x
xpad = pyfftw.n_byte_align(np.zeros(nfft, xcdtype), 16)
X = pyfftw.n_byte_align(np.zeros(nfft, xcdtype), 16)
xfft = pyfftw.FFTW(xpad, X, threads=_THREADS)
ydtype = np.result_type(H.dtype, xcdtype)
Y = pyfftw.n_byte_align_empty(H.shape, 16, ydtype)
y = pyfftw.n_byte_align_empty(H.shape, 16, ydtype)
ifft = pyfftw.FFTW(Y, y, direction='FFTW_BACKWARD', threads=_THREADS)
xtype = numba.__getattribute__(str(np.dtype(xdtype)))
#htype = numba.__getattribute__(str(H.dtype))
#xctype = numba.__getattribute__(str(X.dtype))
#ytype = numba.__getattribute__(str(Y.dtype))
#@jit(argtypes=[htype[:, ::1], xctype[::1], ytype[:, ::1], xtype[::1]])
#def fun(H, X, Y, x):
#xpad[:M] = x
#xfft.execute() # input is xpad, output is X
#Y[:, :] = H*X # need expression optimized by numba but that writes into Y
#ifft.execute() # input is Y, output is y
#yc = np.array(y)[:, :outlen] # need a copy, which np.array provides
#return yc
#@dopplerbank_dec(h, N, M, nfft=nfft, H=H)
#def shiftconv_numba_fft(x):
#return fun(H, X, Y, x)
#@jit(argtypes=[xtype[::1]])
@jit
def shiftconv_numba_fft(x):
xpad[:M] = x
xfft.execute() # input is xpad, output is X
Y[:, :] = X*H # need expression optimized by numba but that writes into Y
ifft.execute() # input is Y, output is y
yc = np.array(y[:, :outlen]) # need a copy, which np.array provides
return yc
shiftconv_numba_fft = dopplerbank_dec(h, N, M, nfft=nfft, H=H)(shiftconv_numba_fft)
return shiftconv_numba_fft
开发者ID:ryanvolz,项目名称:echolect,代码行数:60,代码来源:dopplerbanks.py
示例2: accum
def accum (self, key1, key2, value, weight=1):
index1 = self._key1map[key1]
index2 = self._key2map[key2]
if self._m0 is None:
self._m0 = np.zeros ((self.chunk0size, self.chunk0size), dtype=np.result_type (weight))
self._m1 = np.zeros ((self.chunk0size, self.chunk0size), dtype=np.result_type (value, weight))
self._m2 = np.zeros_like (self._m1)
if index1 >= self._m0.shape[0]:
self._m0 = np.concatenate ((self._m0, np.zeros_like (self._m0)), axis=0)
self._m1 = np.concatenate ((self._m1, np.zeros_like (self._m1)), axis=0)
self._m2 = np.concatenate ((self._m2, np.zeros_like (self._m2)), axis=0)
if index2 >= self._m0.shape[1]:
self._m0 = np.concatenate ((self._m0, np.zeros_like (self._m0)), axis=1)
self._m1 = np.concatenate ((self._m1, np.zeros_like (self._m1)), axis=1)
self._m2 = np.concatenate ((self._m2, np.zeros_like (self._m2)), axis=1)
self._m0[index1,index2] += weight
q = weight * value
self._m1[index1,index2] += q
q *= value
self._m2[index1,index2] += q
return self
开发者ID:pkgw,项目名称:pwkit,代码行数:25,代码来源:closures.py
示例3: da_sub
def da_sub(daa, dab):
"""
subtract 2 DataArrays as cleverly as possible:
* keep the metadata of the first DA in the result
* ensures the result has the right type so that no underflows happen
returns (DataArray): the result of daa - dab
"""
rt = numpy.result_type(daa, dab) # dtype of result of daa-dab
dt = None # default is to let numpy decide
if rt.kind == "f":
# float should always be fine
pass
elif rt.kind in "iub":
# underflow can happen (especially if unsigned)
# find the worse case value (could be improved, but would be longer)
worse_val = int(daa.min()) - int(dab.max())
dt = numpy.result_type(rt, numpy.min_scalar_type(worse_val))
else:
# subtracting such a data is suspicious, but try anyway
logging.warning("Subtraction on data of type %s unsupported", rt.name)
res = numpy.subtract(daa, dab, dtype=dt) # metadata is copied from daa
logging.debug("type = %s, %s", res.dtype.name, daa.dtype.name)
return res
开发者ID:delmic,项目名称:odemis,代码行数:26,代码来源:convert.py
示例4: _take_with_fill
def _take_with_fill(self, indices, fill_value=None):
if fill_value is None:
fill_value = self.dtype.na_value
if indices.min() < -1:
raise ValueError("Invalid value in 'indices'. Must be between -1 "
"and the length of the array.")
if indices.max() >= len(self):
raise IndexError("out of bounds value in 'indices'.")
if len(self) == 0:
# Empty... Allow taking only if all empty
if (indices == -1).all():
dtype = np.result_type(self.sp_values, fill_value)
taken = np.empty_like(indices, dtype=dtype)
taken.fill(fill_value)
return taken
else:
raise IndexError('cannot do a non-empty take from an empty '
'axes.')
sp_indexer = self.sp_index.lookup_array(indices)
if self.sp_index.npoints == 0:
# Avoid taking from the empty self.sp_values
taken = np.full(sp_indexer.shape, fill_value=fill_value,
dtype=np.result_type(fill_value))
else:
taken = self.sp_values.take(sp_indexer)
# sp_indexer may be -1 for two reasons
# 1.) we took for an index of -1 (new)
# 2.) we took a value that was self.fill_value (old)
new_fill_indices = indices == -1
old_fill_indices = (sp_indexer == -1) & ~new_fill_indices
# Fill in two steps.
# Old fill values
# New fill values
# potentially coercing to a new dtype at each stage.
m0 = sp_indexer[old_fill_indices] < 0
m1 = sp_indexer[new_fill_indices] < 0
result_type = taken.dtype
if m0.any():
result_type = np.result_type(result_type, self.fill_value)
taken = taken.astype(result_type)
taken[old_fill_indices] = self.fill_value
if m1.any():
result_type = np.result_type(result_type, fill_value)
taken = taken.astype(result_type)
taken[new_fill_indices] = fill_value
return taken
开发者ID:sechilds,项目名称:pandas,代码行数:58,代码来源:array.py
示例5: NumbaFFTW
def NumbaFFTW(h, M, xdtype=np.complex_, powerof2=True):
L = len(h)
outlen = M + L - 1
nfft = outlen
if powerof2:
nfft = pow2(nfft)
outdtype = np.result_type(h.dtype, xdtype)
fftdtype = np.result_type(outdtype, np.complex64) # output is always complex, promote using smallest
# speed not critical here, just use numpy fft
# cast to outdtype so we use same type of fft as when transforming x
hpad = zero_pad(h, nfft).astype(outdtype)
if np.iscomplexobj(hpad):
H = np.fft.fft(hpad)
else:
H = np.fft.rfft(hpad)
H = (H / nfft).astype(fftdtype) # divide by nfft b/c FFTW's ifft does not do this
xpad = pyfftw.n_byte_align(np.zeros(nfft, outdtype), 16) # outdtype so same type fft as h->H
X = pyfftw.n_byte_align(np.zeros(len(H), fftdtype), 16) # len(H) b/c rfft may be used
xfft = pyfftw.FFTW(xpad, X, threads=_THREADS)
y = pyfftw.n_byte_align_empty(nfft, 16, outdtype)
ifft = pyfftw.FFTW(X, y, direction='FFTW_BACKWARD', threads=_THREADS)
xtype = numba.__getattribute__(str(np.dtype(xdtype)))
outtype = numba.__getattribute__(str(outdtype))
ffttype = numba.__getattribute__(str(fftdtype))
#@jit(restype=outtype[::1],
#argtypes=[outtype[::1], ffttype[::1], ffttype[::1], outtype[::1], xtype[::1]])
#def filt(xpad, X, H, y, x):
#xpad[:M] = x
#xfft.execute() # input in xpad, result in X
#X[:] = H*X
#ifft.execute() # input in X, result in y
#yc = y[:outlen].copy()
#return yc
#@filter_dec(h, M, nfft=nfft, H=H)
#def numba_fftw(x):
#return filt(xpad, X, H, y, x)
#@jit(argtypes=[xtype[::1]])
@jit
def numba_fftw(x):
xpad[:M] = x
xfft.execute() # input in xpad, result in X
X[:] = H*X # want expression that is optimized by numba but writes into X
ifft.execute() # input in X, result in y
yc = y[:outlen].copy()
return yc
numba_fftw = filter_dec(h, M, nfft=nfft, H=H)(numba_fftw)
return numba_fftw
开发者ID:ryanvolz,项目名称:echolect,代码行数:57,代码来源:filters.py
示例6: test_result_type
def test_result_type(self):
self.check_promotion_cases(np.result_type)
f64 = float64(0)
c64 = complex64(0)
## Scalars do not coerce to complex if the value is real
#assert_equal(np.result_type(c64,array([f64])), np.dtype(float64))
# But they do if the value is complex
assert_equal(np.result_type(complex64(3j),array([f64])),
np.dtype(complex128))
# Scalars do coerce to complex even if the value is real
# This is so "a+0j" can be reliably used to make something complex.
assert_equal(np.result_type(c64,array([f64])), np.dtype(complex128))
开发者ID:bogdangherca,项目名称:numpy,代码行数:14,代码来源:test_numeric.py
示例7: _convert_list
def _convert_list(self, value):
"""Convert a string into a typed numpy array.
If it is not possible it returns a numpy string.
"""
try:
numpy_values = []
values = value.split(" ")
types = set([])
for string_value in values:
v = self._convert_scalar_value(string_value)
numpy_values.append(v)
types.add(v.dtype.type)
result_type = numpy.result_type(*types)
if issubclass(result_type.type, (numpy.string_, six.binary_type)):
# use the raw data to create the result
return numpy.string_(value)
elif issubclass(result_type.type, (numpy.unicode_, six.text_type)):
# use the raw data to create the result
return numpy.unicode_(value)
else:
return numpy.array(numpy_values, dtype=result_type)
except ValueError:
return numpy.string_(value)
开发者ID:vallsv,项目名称:silx,代码行数:26,代码来源:fabioh5.py
示例8: test_upcast
def test_upcast():
a0 = csr_matrix([[np.pi, np.pi*1j], [3, 4]], dtype=complex)
b0 = np.array([256+1j, 2**32], dtype=complex)
for a_dtype in supported_dtypes:
for b_dtype in supported_dtypes:
msg = "(%r, %r)" % (a_dtype, b_dtype)
if np.issubdtype(a_dtype, np.complexfloating):
a = a0.copy().astype(a_dtype)
else:
a = a0.real.copy().astype(a_dtype)
if np.issubdtype(b_dtype, np.complexfloating):
b = b0.copy().astype(b_dtype)
else:
b = b0.real.copy().astype(b_dtype)
if not (a_dtype == np.bool_ and b_dtype == np.bool_):
c = np.zeros((2,), dtype=np.bool_)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
if ((np.issubdtype(a_dtype, np.complexfloating) and
not np.issubdtype(b_dtype, np.complexfloating)) or
(not np.issubdtype(a_dtype, np.complexfloating) and
np.issubdtype(b_dtype, np.complexfloating))):
c = np.zeros((2,), dtype=np.float64)
assert_raises(ValueError, _sparsetools.csr_matvec,
2, 2, a.indptr, a.indices, a.data, b, c)
c = np.zeros((2,), dtype=np.result_type(a_dtype, b_dtype))
_sparsetools.csr_matvec(2, 2, a.indptr, a.indices, a.data, b, c)
assert_allclose(c, np.dot(a.toarray(), b), err_msg=msg)
开发者ID:ElDeveloper,项目名称:scipy,代码行数:34,代码来源:test_sparsetools.py
示例9: _normalize_vector_type
def _normalize_vector_type(self, dtype):
"""Normalize the """
if self.__at_least_32bits:
if numpy.issubdtype(dtype, numpy.signedinteger):
dtype = numpy.result_type(dtype, numpy.uint32)
if numpy.issubdtype(dtype, numpy.unsignedinteger):
dtype = numpy.result_type(dtype, numpy.uint32)
elif numpy.issubdtype(dtype, numpy.floating):
dtype = numpy.result_type(dtype, numpy.float32)
elif numpy.issubdtype(dtype, numpy.complexfloating):
dtype = numpy.result_type(dtype, numpy.complex64)
if self.__signed_type:
if numpy.issubdtype(dtype, numpy.unsignedinteger):
signed = numpy.dtype("%s%i" % ('i', dtype.itemsize))
dtype = numpy.result_type(dtype, signed)
return dtype
开发者ID:vallsv,项目名称:silx,代码行数:16,代码来源:fabioh5.py
示例10: matvec_transp
def matvec_transp(x):
if x.shape != (nargout,):
msg = 'Input has shape ' + str(x.shape)
msg += ' instead of (%d,)' % self.nargout
raise ValueError(msg)
result_type = np.result_type(self.dtype, x.dtype)
return np.zeros(nargin, dtype=result_type)
开发者ID:PythonOptimizers,项目名称:pykrylov,代码行数:7,代码来源:linop.py
示例11: rfft
def rfft(a):
n = a.shape[-1]
b = a.reshape(np.prod(a.shape[:-1]),n)
fb = np.empty((b.shape[0],b.shape[1]/2+1),dtype=np.result_type(a,0j))
for i in range(b.shape[0]):
fb[i] = myfft.rfft(b[i])*n**-0.5
return np.reshape(fb, list(a.shape[:-1]) + [fb.shape[-1]])
开发者ID:amaurea,项目名称:enutil,代码行数:7,代码来源:misc.py
示例12: promote
def promote(*operands):
"""
Take an arbitrary number of graph nodes and produce the promoted
dtype by discarding all shape information and just looking at the
measures.
::
5, 5, | int |
2, 5, | int |
1, 3, | float |
>>> promote(IntNode(1), Op(IntNode(2))
int
>>> promote(FloatNode(1), Op(IntNode(2))
float
"""
# Looks something like this...
# (ArrayNode, IntNode...) -> (dshape('2, int'), dshape('int'))
# (dshape('2, int', dshape('int')) -> (dshape('int', dshape('int'))
# (dshape('2, int', dshape('int')) -> (dtype('int', dtype('int'))
types = (op.simple_type() for op in operands if op is not None)
measures = (extract_measure(t) for t in types)
dtypes = (to_numpy(m) for m in measures)
promoted = np.result_type(*dtypes)
datashape = CType.from_dtype(promoted)
return datashape
开发者ID:bussiere,项目名称:blaze,代码行数:31,代码来源:coretypes.py
示例13: _contract_plain
def _contract_plain(mydf, mos, coulG, phase, max_memory):
cell = mydf.cell
moiT, mojT, mokT, molT = mos
nmoi, nmoj, nmok, nmol = [x.shape[0] for x in mos]
ngrids = moiT.shape[1]
wcoulG = coulG * (cell.vol/ngrids)
dtype = numpy.result_type(phase, *mos)
eri = numpy.empty((nmoi*nmoj,nmok*nmol), dtype=dtype)
blksize = int(min(max(nmoi,nmok), (max_memory*1e6/16 - eri.size)/2/ngrids/max(nmoj,nmol)+1))
assert blksize > 0
buf0 = numpy.empty((blksize,max(nmoj,nmol),ngrids), dtype=dtype)
buf1 = numpy.ndarray((blksize,nmoj,ngrids), dtype=dtype, buffer=buf0)
buf2 = numpy.ndarray((blksize,nmol,ngrids), dtype=dtype, buffer=buf0)
for p0, p1 in lib.prange(0, nmoi, blksize):
mo_pairs = numpy.einsum('ig,jg->ijg', moiT[p0:p1].conj()*phase,
mojT, out=buf1[:p1-p0])
mo_pairs_G = tools.fft(mo_pairs.reshape(-1,ngrids), mydf.mesh)
mo_pairs = None
mo_pairs_G*= wcoulG
v = tools.ifft(mo_pairs_G, mydf.mesh)
mo_pairs_G = None
v *= phase.conj()
if dtype == numpy.double:
v = numpy.asarray(v.real, order='C')
for q0, q1 in lib.prange(0, nmok, blksize):
mo_pairs = numpy.einsum('ig,jg->ijg', mokT[q0:q1].conj(),
molT, out=buf2[:q1-q0])
eri[p0*nmoj:p1*nmoj,q0*nmol:q1*nmol] = lib.dot(v, mo_pairs.reshape(-1,ngrids).T)
v = None
return eri
开发者ID:chrinide,项目名称:pyscf,代码行数:31,代码来源:fft_ao2mo.py
示例14: call
def call(self, args, axis=0, out=None, chunksize=1024 * 1024, **kwargs):
""" axis is the axis to chop it off.
if self.altreduce is set, the results will
be reduced with altreduce and returned
otherwise will be saved to out, then return out.
"""
if self.altreduce is not None:
ret = [None]
else:
if out is None :
if self.outdtype is not None:
dtype = self.outdtype
else:
try:
dtype = numpy.result_type(*[args[i] for i in self.ins] * 2)
except:
dtype = None
out = sharedmem.empty(
numpy.broadcast(*[args[i] for i in self.ins] * 2).shape,
dtype=dtype)
if axis != 0:
for i in self.ins:
args[i] = numpy.rollaxis(args[i], axis)
out = numpy.rollaxis(out, axis)
size = numpy.max([len(args[i]) for i in self.ins])
with sharedmem.MapReduce() as pool:
def work(i):
sl = slice(i, i+chunksize)
myargs = args[:]
for j in self.ins:
try:
tmp = myargs[j][sl]
a, b, c = sl.indices(len(args[j]))
myargs[j] = tmp
except Exception as e:
print tmp
print j, e
pass
if b == a: return None
rt = self.ufunc(*myargs, **kwargs)
if self.altreduce is not None:
return rt
else:
out[sl] = rt
def reduce(rt):
if self.altreduce is None:
return
if ret[0] is None:
ret[0] = rt
elif rt is not None:
ret[0] = self.altreduce(ret[0], rt)
pool.map(work, range(0, size, chunksize), reduce=reduce)
if self.altreduce is None:
if axis != 0:
out = numpy.rollaxis(out, 0, axis + 1)
return out
else:
return ret[0]
开发者ID:StevenLOL,项目名称:sharedmem,代码行数:60,代码来源:array.py
示例15: _inequality
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
# Dense other.
elif isdense(other):
return op(self.todense(), other)
# Sparse other.
elif isspmatrix(other):
#TODO sparse broadcasting
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
开发者ID:beyondmetis,项目名称:scipy,代码行数:33,代码来源:compressed.py
示例16: covariance
def covariance(m, ddof=1.5):
"""
A simplified version of numpy's 'cov' which allows ddof=1.5 and assumes
variables to be in columns (rowvar=0).
"""
# Handles complex arrays too
m = np.asarray(m)
dtype = np.result_type(m, np.float64)
X = np.array(m, ndmin=2, dtype=dtype)
if X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return np.array([]).reshape(0, 0)
avg = np.mean(X, axis=1)
# Determine the normalization
fact = float(X.shape[1] - ddof)
if fact <= 0:
warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
fact = 0.0
X -= avg[:, None]
X_T = X.T
return (np.dot(X, X_T.conj())/fact).squeeze()
开发者ID:mshvartsman,项目名称:pyEPABC,代码行数:26,代码来源:EPABC.py
示例17: cast
def cast(arrays, dtype=None, order='c'):
"""
Cast a list of arrays into a same data type.
Parameters
----------
arrays : sequence of array-like or None
The list of arrays to be cast.
dtype : numpy.dtype
If specified, all arrays will be cast to this data type. Otherwise,
the data types is inferred from the arrays.
Example
-------
>>> cast([[1., 2.], None, np.array(2j)])
(array([ 1.+0.j, 2.+0.j]), None, array(2j))
"""
arrays = tuple(arrays)
if dtype is None:
arrays_ = [np.array(a, copy=False) for a in arrays if a is not None]
dtype = np.result_type(*arrays_)
result = (np.array(a, dtype=dtype, order=order, copy=False)
if a is not None else None for a in arrays)
return tuple(result)
开发者ID:ghisvail,项目名称:pyoperators,代码行数:25,代码来源:misc.py
示例18: trisolve
def trisolve(dl, d, du, b, inplace=False):
"""
The tridiagonal matrix (Thomas) algorithm for solving tridiagonal systems
of equations:
a_{i}x_{i-1} + b_{i}x_{i} + c_{i}x_{i+1} = y_{i}
in matrix form:
Mx = b
TDMA is O(n), whereas standard Gaussian elimination is O(n^3).
Arguments:
-----------
dl: (n - 1,) vector
the lower diagonal of M
d: (n,) vector
the main diagonal of M
du: (n - 1,) vector
the upper diagonal of M
b: (n,) vector
the result of Mx
inplace:
if True, and if d and b are both float64 vectors, they will be
modified in place (may be faster)
Returns:
-----------
x: (n,) vector
the solution to Mx = b
References:
-----------
http://en.wikipedia.org/wiki/Tridiagonal_matrix_algorithm
http://www.netlib.org/lapack/explore-html/d1/db3/dgtsv_8f.html
"""
if (dl.shape[0] != du.shape[0] or (d.shape[0] != dl.shape[0] + 1)
or d.shape[0] != b.shape[0]):
raise ValueError('Invalid diagonal shapes')
bshape_in = b.shape
rtype = np.result_type(dl, d, du, b)
if not inplace:
# force a copy
dl = np.array(dl, dtype=rtype, copy=True, order='F')
d = np.array(d, dtype=rtype, copy=True, order='F')
du = np.array(du, dtype=rtype, copy=True, order='F')
b = np.array(b, dtype=rtype, copy=True, order='F')
# this may also force copies if arrays have inconsistent types / incorrect
# order
dl, d, du, b = (np.array(v, dtype=rtype, copy=False, order='F')
for v in (dl, d, du, b))
# use the LAPACK implementation
_lapack_trisolve(dl, d, du, b, rtype)
return b.reshape(bshape_in)
开发者ID:Palpatineli,项目名称:PyFNND,代码行数:60,代码来源:_tridiag_solvers.py
示例19: use_hmm
def use_hmm(observations, state_count, symbol_count, maxit=1000, accuracy=-1, retries=10, dtype=numpy.float32):
curr_A, curr_B, curr_pi = None, None, None
curr_eps = None
# try:
# importlib.import_module('spscicomp.hmm.kernel.opencl')
# kernel = spscicomp.hmm.kernel.opencl
# LOG.debug('OpenCL-Kernel used')
# except:
# LOG.debug('OpenCL-Kernel not available')
try:
importlib.import_module('spscicomp.hmm.kernel.c')
kernel = spscicomp.hmm.kernel.c
if numpy.result_type(observations) != numpy.int16:
LOG.debug('Observations data type was not int16, thus casting it for c-extension.')
observations = numpy.array(observations, dtype=numpy.int16)
LOG.debug('C-Kernel used')
except:
LOG.debug('C-Kernel not available')
kernel = spscicomp.hmm.kernel.python
LOG.debug('Python-Kernel used')
for _ in range(0, retries):
A = spscicomp.hmm.utility.generate_random_matrice(state_count, state_count)
B = spscicomp.hmm.utility.generate_random_matrice(state_count, symbol_count)
pi = spscicomp.hmm.utility.generate_random_array(state_count)
A, B, pi, eps, it = spscicomp.hmm.algorithms.baum_welch_multiple(obs=observations, A=A, B=B, pi=pi,
kernel=kernel,
dtype=dtype, maxit=maxit, accuracy=accuracy)
if curr_eps is None or curr_eps < eps:
curr_A, curr_B, curr_pi, curr_eps = A, B, pi, eps
return curr_A, curr_B, curr_pi
开发者ID:clonker,项目名称:spscicomp,代码行数:34,代码来源:use_hmm.py
示例20: _coo_matmul
def _coo_matmul(sp_data, sp_row, sp_col, sp_shape, dn, transa, transb, transc,
dtype=None):
if dtype is None:
dtype = numpy.result_type(sp_data.dtype, dn.dtype)
A_data = sp_data
if transa:
A_row = sp_col
A_col = sp_row
A_shape = (sp_shape[1], sp_shape[0])
else:
A_row = sp_row
A_col = sp_col
A_shape = sp_shape
if transb:
B = dn.swapaxes(-1, -2)
else:
B = dn
xp = cuda.get_array_module(A_data, B)
if xp is numpy:
C = _coo_matmul_cpu(A_data, A_row, A_col, A_shape, B, dtype)
else:
C = _coo_matmul_gpu(A_data, A_row, A_col, A_shape, B, dtype)
if transc:
C = C.swapaxes(-1, -2)
return C
开发者ID:qipengzhang,项目名称:chainer,代码行数:28,代码来源:sparse_matmul.py
注:本文中的numpy.result_type函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论