本文整理汇总了Python中numpy.nditer函数的典型用法代码示例。如果您正苦于以下问题:Python nditer函数的具体用法?Python nditer怎么用?Python nditer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了nditer函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: group_ref_color_atom_overlaps
def group_ref_color_atom_overlaps(results):
"""
Create a 3D masked array containing all overlap scores.
Parameters
----------
scores : array_like
2D array containing reference molecule color atom overlap results.
"""
# get maximum number of ref color atoms
# don't use `for result in it` because that gives an array of size 1
max_size = 0
it = np.nditer(results, flags=['multi_index', 'refs_ok'])
for _ in it:
max_size = max(max_size, len(results[it.multi_index]))
# build a masked array containing results
# don't use data[it.multi_index][:result.size] because that assigns
# to a view and not to data
data = np.ma.masked_all((results.shape[:2] + (max_size,)), dtype=float)
it = np.nditer(results, flags=['multi_index', 'refs_ok'])
for _ in it:
i, j = it.multi_index
result = results[i, j]
data[i, j, :result.size] = result
return data
开发者ID:skearnes,项目名称:color-features,代码行数:26,代码来源:overlap.py
示例2: test_iter_allocate_output_subtype
def test_iter_allocate_output_subtype():
# Make sure that the subtype with priority wins
# 2018-04-29: moved here from core.tests.test_nditer, given the
# matrix specific shape test.
# matrix vs ndarray
a = np.matrix([[1, 2], [3, 4]])
b = np.arange(4).reshape(2, 2).T
i = np.nditer([a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_(type(i.operands[2]) is np.matrix)
assert_(type(i.operands[2]) is not np.ndarray)
assert_equal(i.operands[2].shape, (2, 2))
# matrix always wants things to be 2D
b = np.arange(4).reshape(1, 2, 2)
assert_raises(RuntimeError, np.nditer, [a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
# but if subtypes are disabled, the result can still work
i = np.nditer([a, b, None], [],
[['readonly'], ['readonly'],
['writeonly', 'allocate', 'no_subtype']])
assert_(type(i.operands[2]) is np.ndarray)
assert_(type(i.operands[2]) is not np.matrix)
assert_equal(i.operands[2].shape, (1, 2, 2))
开发者ID:chinaloryu,项目名称:numpy,代码行数:25,代码来源:test_interaction.py
示例3: launch_boolean_query
def launch_boolean_query(self, query, num_results):
doc_relevance_vector = np.zeros(len(self.doc_index.index))
query_feature_vector = \
helpers.create_doc_index(self.dictionary, helpers.docs2bows([query], self.dictionary)).index[0]
iter_count = 0
for doc_feature_vector in self.doc_index.index:
if np.sum(query_feature_vector) > 0 and np.array_equal(
np.where((query_feature_vector > 0) & (doc_feature_vector > 0)),
np.where(query_feature_vector > 0)):
doc_relevance_vector[iter_count] = 1
iter_count += 1
relevant_docs = np.where(doc_relevance_vector == 1)[0]
if relevant_docs.size == 0:
return []
else:
results_shown = 0
for doc in np.nditer(relevant_docs):
if results_shown < num_results:
print('[ID: ' + str(doc + 1) + '] ' + self.corpus[doc])
results_shown += 1
ranking = []
for doc in np.nditer(relevant_docs):
ranking.append((doc, 1))
return ranking
开发者ID:ph1l337,项目名称:information-retrieval-system,代码行数:25,代码来源:searchablecorpus.py
示例4: _form_slip_xyz_file_string
def _form_slip_xyz_file_string(self):
_txt = ''
for lon, lat, s in zip(np.nditer(self.lons),
np.nditer(self.lats),
np.nditer(self.slip)):
_txt +='%f %f %f\n'%(lon, lat, s)
return _txt
开发者ID:zy31415,项目名称:viscojapan,代码行数:7,代码来源:plot_slip.py
示例5: var_inner
def var_inner(self,var_v1,var_v2):
v1=[]
v2=[]
for m1,m2 in zip(var_v1,var_v2):
v1=v1+[x for x in np.nditer(m1, op_flags=['readwrite'])]
v2=v2+[x for x in np.nditer(m2, op_flags=['readwrite'])]
return np.inner(v1,v2)
开发者ID:Vendea,项目名称:summer-research-2016,代码行数:7,代码来源:BFGS_NL.py
示例6: descend_weights_numeric
def descend_weights_numeric(cost, weights, reg, learn, step):
"""
Gradient descent, for weights
cost - objective function, not requiring parameters, without regularisation
weights - their derivative will be approximated
reg - regularisation factor
learn - (negative) learning rate
step - step size for derivative
"""
derivative = []
for arr in weights:
der = zeros(arr.shape)
it = nditer(arr, flags=['multi_index'], op_flags=['readwrite'])
for value in it:
old_val = value.copy()
old_obj = cost()
value[...] += step
new_obj = cost()
value[...] = old_val
grad = (new_obj - old_obj)/step
grad = add_reg(old_val, grad, reg)
der[it.multi_index] = grad
derivative.append(der)
for n, arr in enumerate(weights):
der = derivative[n]
it = nditer(arr, flags=['multi_index'], op_flags=['readwrite'])
for value in it:
value[...] = descend(value[...], der[it.multi_index]*learn)
开发者ID:guyemerson,项目名称:SentiMerge,代码行数:29,代码来源:latent.py
示例7: search
def search(self, fn, top_n=10, sim_thresh=None):
"""
retrieval face from database,
return top_n similar faces' imgIDs, return None if failed
"""
if top_n > len(self.data):
top_n = len(self.data)
aligned_fn = send2align(fn)
aligned_arr = path2arr(aligned_fn)
if aligned_arr is None:
print "align none."
return None
deepIDfea = self.model.getID([aligned_arr])[0]
sims = [cosine_similarity(deepIDfea, item[1])[0][0] for item in self.data]
# print len(self.data), len(sims)
for i in range(len(sims)):
print sims[i], self.data[i][0]
sort_index = np.argsort(-np.array(sims))
result = []
if sim_thresh is None:
for index in np.nditer(sort_index):
cur_id = self.data[index][0].split("-")[0]
if cur_id not in result and len(result) < top_n:
result.append(cur_id)
return result
else:
for index in np.nditer(sort_index):
if sims[index] < sim_thresh:
break
cur_id = self.data[index][0].split("-")[0]
if cur_id not in result:
result.append(cur_id)
return result
开发者ID:cyh24,项目名称:find-lost,代码行数:33,代码来源:face_align_client.py
示例8: __init__
def __init__(self, maxResult=10, gridSpec=None, verbose=True):
self.gridSpec = gridSpec
self.maxResult = maxResult
self.enableGrid = False
self.verbose = verbose
# Calculate exact grid
self.grid = []
gsTau = self.gridSpec[0]
gsS = self.gridSpec[1]
if len(gsTau) > 1 and len(gsS) > 1:
self.enableGrid = True
countTau = 5
countS = 5
if len(gsTau) > 2:
countTau = int(gsTau[2])
if len(gsS) > 2:
countS = int(gsS[2])
minTau = gsTau[0] - gsTau[1]
maxTau = (gsTau[0] + gsTau[1]) * (1+ (1/ (2*countTau)))
minS = gsS[0] - gsS[1]
maxS = (gsS[0] + gsS[1]) * (1+ (1/ (2*countS)))
tau = np.arange(minTau, maxTau, (gsTau[1] * 2.0) / countTau)
S = np.arange(minS, maxS, (gsS[1] * 2.0) / countS)
for t in np.nditer(tau):
for s in np.nditer(S):
self.grid.append( np.array([t, s]) )
self.dTau = tau[1] - tau[0]
self.dS = S[1] - S[0]
self.bounds = [ [minTau, maxTau], [minS, maxS] ]
开发者ID:FKlama,项目名称:hycud,代码行数:32,代码来源:Minimizer.py
示例9: run
def run(self):
# temperature iteration
for dmu in np.nditer(self.delta_mu):
data = []
self.mu[0] += dmu
self.mu[1] = -self.mu[0]
self.x_[1] = self.x_1
self.x_[0] = 1 - self.x_1
print(' mu = {:06.4f}:'.format(self.mu[0].item(0)))
# delta mu iteration
for temp in np.nditer(self.temp):
self.beta = np.float64(pow(self.bzc * temp, -1))
# calculate
self.__run()
# push result into data
data.append({'temp': temp.item(0), 'c': self.x_[1].item(0)})
print(' T = {:06.3f}K, c = {:06.6f}, count = {}'.
format(temp.item(0), self.x_[1].item(0), self.count))
print('\n')
# save result to output
self.output['Results'].append(
{'mu': self.mu[0].item(0), 'data': data})
self.mu[0] -= dmu
开发者ID:TsumiNa,项目名称:CVM,代码行数:28,代码来源:tetraSquare.py
示例10: calc
def calc(self, input):
"""
Calculates the network output for the given input
@param input A array of inputs [in1, in2,..]
@return lastNetResult
"""
lastNetResult = np.array(input)
# save each layer in/output for training
self.inputs = []
self.outputs = []
for i in range(len(self.layout) - 1):
# append bias
# self.outputFun(lastNetResult)
lastNetResult = np.hstack((lastNetResult, [1]))
self.inputs.append(lastNetResult)
# calc result
lastNetResult = np.dot(self.weights[i], lastNetResult)
if i == len(self.layout) - 2:
# different activation function for last layer
lastNetResult = np.array(list(map(
self.last_layer_transfer, np.nditer(lastNetResult))))
else:
# lastNetResult = self.layer_transfer(lastNetResult)
lastNetResult = np.array(list(map(
self.layer_transfer, np.nditer(lastNetResult))))
self.outputs.append(lastNetResult)
return lastNetResult
开发者ID:dtbinh,项目名称:praktikum,代码行数:33,代码来源:multi_layer.py
示例11: test_external_loop
def test_external_loop(self):
from numpy import arange, nditer, array
a = arange(24).reshape(2, 3, 4)
import sys
if '__pypy__' in sys.builtin_module_names:
raises(NotImplementedError, nditer, a, flags=['external_loop'])
skip('nditer external_loop not implmented')
r = []
n = 0
for x in nditer(a, flags=['external_loop']):
r.append(x)
n += 1
assert n == 1
assert (array(r) == range(24)).all()
r = []
n = 0
for x in nditer(a, flags=['external_loop'], order='F'):
r.append(x)
n += 1
assert n == 12
assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all()
e = raises(ValueError, 'r[0][0] = 0')
assert str(e.value) == 'assignment destination is read-only'
r = []
for x in nditer(a.T, flags=['external_loop'], order='F'):
r.append(x)
array_r = array(r)
assert len(array_r.shape) == 2
assert array_r.shape == (1,24)
assert (array(r) == arange(24)).all()
开发者ID:yuyichao,项目名称:pypy,代码行数:30,代码来源:test_nditer.py
示例12: process
def process(self):
# counts
self.count += 1
# calculate eta
eta_sum = np.float64(0)
dt_ = np.zeros((2, 2, 2, 2, 2, 2), np.float64)
it = np.nditer(dt_, flags=['multi_index'])
while not it.finished:
i, j, k, l, m, n = it.multi_index
dt_[i, j, k, l, m, n] = __eta_dt(self, i, j, k, l, m, n)
eta_sum += dt_[i, j, k, l, m, n]
it.iternext()
############################
# normalization
############################
self.checker = np.float64(0)
# 4-body
self.m41_ = np.zeros((2, 2, 2, 2), np.float64)
# 3-body
self.m31_ = np.zeros((2, 2, 2), np.float64)
# pair
self.m21_ = np.zeros((2, 2), np.float64)
self.m22_ = np.zeros((2, 2), np.float64)
m22_ = np.zeros((2, 2), np.float64)
# point
self.x_ = np.zeros((2), np.float64)
it = np.nditer(dt_, flags=['multi_index'])
while not it.finished:
i, j, k, l, m, n = it.multi_index
# print('self.zt_{} is: {}'.format(it.multi_index, self.zt_[i, j, k]))
dt_[i, j, k, l, m, n] /= eta_sum
self.checker += np.absolute(dt_[i, j, k, l, m, n] -
self.dt_[i, j, k, l, m, n])
# dt_
self.dt_[i, j, k, l, m, n] = dt_[i, j, k, l, m, n]
# m41_
self.m41_[i, j, k, l] += self.dt_[i, j, k, l, m, n]
# m31_
self.m31_[i, m, k] += self.dt_[i, j, k, l, m, n]
# m21_
self.m21_[i, j] += self.dt_[i, j, k, l, m, n]
# m22_
self.m22_[j, n] += self.dt_[i, j, k, l, m, n]
m22_[i, m] += self.dt_[i, j, k, l, m, n]
# x_
self.x_[i] += self.dt_[i, j, k, l, m, n]
it.iternext()
开发者ID:TsumiNa,项目名称:CVM,代码行数:60,代码来源:process.py
示例13: buildDistanceMatrix
def buildDistanceMatrix(self):
for head, ngrams in self.head_clusters.iteritems():
word_indices = []
stmt_indices = []
priority_indices = []
feature_words = []
sections = []
dm_w_rows = []
dm_s_rows = []
dm_p_rows = []
for ngram in ngrams:
word_indices.append(ngram[3][1])
stmt_indices.append(ngram[3][0])
priority_indices.append(ngram[1])
feature_words.append(ngram[0])
sections.append(ngram[-1])
word_indices_clone = word_indices
stmt_indices_clone = stmt_indices
priority_indices_clone = priority_indices
for word_index, stmt_index, priority_index in zip(word_indices, stmt_indices, priority_indices):
dm_w_row = []
dm_s_row = []
dm_p_row = []
for word_index_clone, stmt_index_clone, priority_index_clone in zip(word_indices_clone, stmt_indices_clone, priority_indices_clone):
dm_w_row.append(fabs(((1 + word_index) * (1 + stmt_index)) - ((1 + word_index_clone) * (1 + stmt_index_clone))))
dm_s_row.append(fabs((1 + stmt_index) - (1 + stmt_index_clone)))
dm_p_row.append(fabs(float(priority_index) - float(priority_index_clone)))
dm_w_rows.append(dm_w_row)
dm_s_rows.append(dm_s_row)
dm_p_rows.append(dm_p_row)
dm_w = np.array(dm_w_rows)
dm_s = np.array(dm_s_rows)
dm_p = np.array(dm_p_rows)
#print dm_w
#print dm_s
#print dm_p
prox_mat = []
for w_dist, s_dist, PI in zip(np.nditer(dm_w), np.nditer(dm_s), np.nditer(dm_p)):
if PI == 0.0:
proximity_score = ((w_dist + len(np.unique(dm_s) * s_dist)) / (dm_w.shape[0] * len(np.unique(dm_s))))
prox_mat.append(proximity_score)
else:
proximity_score = ((w_dist + len(np.unique(dm_s) * s_dist)) / (dm_w.shape[0] * len(np.unique(dm_s)))) * log10(10 * PI)
prox_mat.append(proximity_score)
ps = np.array(prox_mat)
ps = np.reshape(ps, dm_w.shape)
#print ps
for r, row in enumerate(ps):
for i, ele in enumerate(row):
if ele == min(row):
self.f2.writerow([feature_words[r], priority_indices[r], 1 - np.min(row), feature_words[i], sections[r]])
开发者ID:arunenigma,项目名称:Thesis,代码行数:60,代码来源:proximity_finder.py
示例14: test_external_loop
def test_external_loop(self):
from numpy import arange, nditer, array
a = arange(24).reshape(2, 3, 4)
import sys
r = []
for x in nditer(a, flags=['external_loop']):
r.append(x)
assert len(r) == 1
assert r[0].shape == (24,)
assert (array(r) == range(24)).all()
r = []
for x in nditer(a, flags=['external_loop'], order='F'):
r.append(x)
assert len(r) == 12
assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21],
[ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23],
]).all()
e = raises(ValueError, 'r[0][0] = 0')
assert str(e.value) == 'assignment destination is read-only'
r = []
for x in nditer(a.T, flags=['external_loop'], order='F'):
r.append(x)
array_r = array(r)
assert len(array_r.shape) == 2
assert array_r.shape == (1,24)
assert (array(r) == arange(24)).all()
开发者ID:Qointum,项目名称:pypy,代码行数:26,代码来源:test_nditer.py
示例15: rvs
def rvs(self, loc=0, scale=1, size=1):
"""Random variates.
Parameters
----------
loc : float or np.ndarray
0-D or 1-D tensor.
scale : float or np.ndarray
0-D or 1-D tensor, with all elements constrained to
:math:`scale > 0`.
size : int
Number of random variable samples to return.
Returns
-------
np.ndarray
A np.ndarray of dimensions size x shape.
"""
if not isinstance(loc, np.ndarray):
loc = np.asarray(loc)
if not isinstance(scale, np.ndarray):
scale = np.asarray(scale)
if len(loc.shape) == 0:
return stats.norm.rvs(loc, scale, size=size)
x = []
for locidx, scaleidx in zip(np.nditer(loc), np.nditer(scale)):
x += [stats.norm.rvs(locidx, scaleidx, size=size)]
# Note this doesn't work for multi-dimensional sizes.
x = np.asarray(x).transpose()
return x
开发者ID:blei-lab,项目名称:edward,代码行数:32,代码来源:distributions.py
示例16: write_array
def write_array(fp, array, version=None):
"""
Write an array to an NPY file, including a header.
If the array is neither C-contiguous nor Fortran-contiguous AND the
file_like object is not a real file object, this function will have to
copy data in memory.
Parameters
----------
fp : file_like object
An open, writable file object, or similar object with a ``.write()``
method.
array : ndarray
The array to write to disk.
version : (int, int) or None, optional
The version number of the format. None means use the oldest supported
version that is able to store the data. Default: None
Raises
------
ValueError
If the array cannot be persisted.
Various other errors
If the array contains Python objects as part of its dtype, the
process of pickling them may raise various errors if the objects
are not picklable.
"""
_check_version(version)
used_ver = _write_array_header(fp, header_data_from_array_1_0(array),
version)
# this warning can be removed when 1.9 has aged enough
if version != (2, 0) and used_ver == (2, 0):
warnings.warn("Stored array in format 2.0. It can only be"
"read by NumPy >= 1.9", UserWarning)
# Set buffer size to 16 MiB to hide the Python loop overhead.
buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data directly.
# Instead, we will pickle it out with version 2 of the pickle protocol.
pickle.dump(array, fp, protocol=2)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
array.T.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='F'):
fp.write(chunk.tobytes('C'))
else:
if isfileobj(fp):
array.tofile(fp)
else:
for chunk in numpy.nditer(
array, flags=['external_loop', 'buffered', 'zerosize_ok'],
buffersize=buffersize, order='C'):
fp.write(chunk.tobytes('C'))
开发者ID:Dimitris0mg,项目名称:numpy,代码行数:60,代码来源:format.py
示例17: clustering
def clustering(self):
"""
Class the GSOM world into the right number of clusters, update cluster association dictionary as well as the
representative of each cluster.
"""
updated_representative = {}
for cell in np.nditer(self._world):
distance = {}
for key, value in self._representative.iteritems():
distance[key] = np.linalg.norm(cell - value)
belong_group = min(distance, key=distance.get)
self._association[belong_group].append(cell)
for key in self._representative.keys():
updated_representative[key] = np.mean(self._association[key], axis=0)
while self._representative != updated_representative:
self._representative = updated_representative
for cell in np.nditer(self._world):
distance = {}
for key, value in self._representative.iteritems():
distance[key] = np.linalg.norm(cell - value)
belong_group = min(distance, key=distance.get)
self._association[belong_group].append(cell)
for key in self._representative.keys():
updated_representative[key] = np.mean(self._association[key], axis=0)
开发者ID:shaunrong,项目名称:Fooling-Larson,代码行数:25,代码来源:Clustering.py
示例18: numerical_gradients
def numerical_gradients(self):
"""Compute numerical gradients of f with respect to self.Wh, self.bh, self.Ws, and self.bs
Returns approximation for df/dWh, df/dbh, df/dWs, df/dbs
"""
dWh, dbh, dWs, dbs = np.zeros_like(self.Wh), np.zeros_like(self.bh), np.zeros_like(self.Ws), np.zeros_like(self.bs)
Wh, bh, Ws, bs = self.Wh, self.bh, self.Ws, self.bs
step = 1e-5
# df/dWh
h = np.zeros_like(self.Wh)
it = np.nditer(Wh, flags=['multi_index'])
while not it.finished:
ix = it.multi_index
h[ix] = step
dWh[ix] = (self.forward_backward_prop(Wh+h, bh, Ws, bs).loss - self.forward_backward_prop(Wh-h, bh, Ws, bs).loss) / (2*step)
h[ix] = 0
it.iternext()
# df/dbh
h = np.zeros_like(self.bh)
it = np.nditer(bh, flags=['multi_index'])
while not it.finished:
ix = it.multi_index
h[ix] = step
dbh[ix] = (self.forward_backward_prop(Wh, bh+h, Ws, bs).loss - self.forward_backward_prop(Wh, bh-h, Ws, bs).loss) / (2*step)
h[ix] = 0
it.iternext()
# df/dWh
h = np.zeros_like(self.Ws)
it = np.nditer(Ws, flags=['multi_index'])
while not it.finished:
ix = it.multi_index
h[ix] = step
dWs[ix] = (self.forward_backward_prop(Wh, bh, Ws+h, bs).loss - self.forward_backward_prop(Wh, bh, Ws-h, bs).loss) / (2*step)
h[ix] = 0
it.iternext()
# df/dbs
h = np.zeros_like(self.bs)
it = np.nditer(bs, flags=['multi_index'])
while not it.finished:
ix = it.multi_index
h[ix] = step
dbs[ix] = (self.forward_backward_prop(Wh, bh, Ws, bs+h).loss - self.forward_backward_prop(Wh, bh, Ws, bs-h).loss) / (2*step)
h[ix] = 0
it.iternext()
return dWh, dbh, dWs, dbs
开发者ID:ebanner,项目名称:ml,代码行数:60,代码来源:model.py
示例19: test_itershape
def test_itershape(self):
# Check that allocated outputs work with a specified shape
from numpy import nditer, arange
import sys
if '__pypy__' in sys.builtin_module_names:
skip("op_axes not totally supported yet")
a = arange(6, dtype='i2').reshape(2,3)
i = nditer([a, None], [], [['readonly'], ['writeonly','allocate']],
op_axes=[[0,1,None], None],
itershape=(-1,-1,4))
assert i.operands[1].shape == (2,3,4)
assert i.operands[1].strides, (24,8,2)
i = nditer([a.T, None], [], [['readonly'], ['writeonly','allocate']],
op_axes=[[0,1,None], None],
itershape=(-1,-1,4))
assert i.operands[1].shape, (3,2,4)
assert i.operands[1].strides, (8,24,2)
i = nditer([a.T, None], [], [['readonly'], ['writeonly','allocate']],
order='F',
op_axes=[[0,1,None], None],
itershape=(-1,-1,4))
assert i.operands[1].shape, (3,2,4)
assert i.operands[1].strides, (2,6,12)
# If we specify 1 in the itershape, it shouldn't allow broadcasting
# of that dimension to a bigger value
raises(ValueError, nditer, [a, None], [],
[['readonly'], ['writeonly','allocate']],
op_axes=[[0,1,None], None],
itershape=(-1,1,4))
开发者ID:Qointum,项目名称:pypy,代码行数:32,代码来源:test_nditer.py
示例20: get_rmss
def get_rmss(fc):
import numpy as np
from python_gen import get_fn_sd
if hasattr(fc[(0,)*fc.ndim], '__call__'): #for forecasts that are functions
rmss=np.zeros(fc.shape)
for i in range(fc.shape[1]): #loop over forecast lead times
fc_temp=fc[:,i,...]
fc_temp_iter=np.nditer(fc_temp, flags=['multi_index','refs_ok'])
while not fc_temp_iter.finished: #loop over other indices
ind=fc_temp_iter.multi_index
rmss[ind]=get_fn_sd(fc_temp[ind])
fc_temp_iter.iternext()
else:
fc_anom=np.zeros(fc.shape)
for i in range(fc.shape[0]):
fc_anom[i,...]=fc[i,...]-np.mean(fc[i,...]) #anomaly from the mean forecast over all forecast start times and ensemble members for each lead time.
ens_mean=np.mean(fc_anom,axis=-1)
rmss=np.zeros(fc.shape[:-1])
rmss_iter=np.nditer(rmss, flags=['multi_index'])
while not rmss_iter.finished:
ind=rmss_iter.multi_index
rmss[ind]=np.mean((fc_anom[ind]-ens_mean[ind])**2)
rmss_iter.iternext()
rmss=np.mean(rmss,axis=1)
rmss=np.sqrt(rmss)
return rmss
开发者ID:edwinrobots,项目名称:forecastcombination,代码行数:31,代码来源:fc_verification.py
注:本文中的numpy.nditer函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论