本文整理汇总了Python中numpy.core.umath.minimum函数的典型用法代码示例。如果您正苦于以下问题:Python minimum函数的具体用法?Python minimum怎么用?Python minimum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了minimum函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_old_wrap
def test_old_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
开发者ID:Fematich,项目名称:article_browser,代码行数:11,代码来源:test_umath.py
示例2: test_default_prepare
def test_default_prepare(self):
class with_wrap(object):
__array_priority__ = 10
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
return arr
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x, np.zeros(1))
assert_equal(type(x), np.ndarray)
开发者ID:Fematich,项目名称:article_browser,代码行数:11,代码来源:test_umath.py
示例3: check_old_wrap
def check_old_wrap(self):
class with_wrap(object):
def __array__(self):
return zeros(1)
def __array_wrap__(self, arr):
r = with_wrap()
r.arr = arr
return r
a = with_wrap()
x = minimum(a, a)
assert_equal(x.arr, zeros(1))
开发者ID:dinarabdullin,项目名称:Pymol-script-repo,代码行数:13,代码来源:test_umath.py
示例4: test_wrap
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return np.zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
self.assertTrue(func is ncu.minimum)
self.assertEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.assertEqual(i, 0)
开发者ID:Fematich,项目名称:article_browser,代码行数:18,代码来源:test_umath.py
示例5: test_wrap
def test_wrap(self):
class with_wrap(object):
def __array__(self):
return zeros(1)
def __array_wrap__(self, arr, context):
r = with_wrap()
r.arr = arr
r.context = context
return r
a = with_wrap()
x = minimum(a, a)
assert_equal(x.arr, zeros(1))
func, args, i = x.context
self.failUnless(func is minimum)
self.failUnlessEqual(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
self.failUnlessEqual(i, 0)
开发者ID:radical-software,项目名称:radicalspam,代码行数:18,代码来源:test_umath.py
示例6: fit
def fit(self,X,y):
labdict = {}
if len(X[0].shape)==1:
ismatrix=0
else:
ismatrix=1
xma=X.max()
xmi=X.min()
if xma<0 or xma>1 or xmi<0 or xmi>1:
X=numpy.multiply(X-xmi,1/(xma-xmi))
if len(self.neurons) == 0:
ones = scipy.ones(X[0].shape);
self.neurons.append(Neuron(numpy.concatenate((X[0], ones - X[0]), ismatrix),y[0]))
startc = 1
labdict[y[0].nonzero()[0].tostring()] = [0]
else:
startc = 0
newlabel = 0
import time
time1=time.time()
ones = scipy.ones(X[0].shape);
for i1,f1 in enumerate(X[startc: ], startc):
if i1%1000==0:
print i1,X.shape[0],len(self.neurons), newlabel, "time ",time.time()-time1
time1=time.time()
found=0
if scipy.sparse.issparse(f1):
f1=f1.todense()
fc = numpy.concatenate((f1, ones - f1), ismatrix)
activationn = [0] * len(self.neurons)
activationi = [0] * len(self.neurons)
ytring=y[i1].nonzero()[0].tostring()
if ytring in labdict:
fcs = fc.sum()
for i2 in labdict[ytring]:
minnfs = umath.minimum(self.neurons[i2].vc, fc).sum()
activationi[i2] =minnfs/fcs
activationn[i2] =minnfs/self.neurons[i2].vc.sum()
if numpy.max(activationn) == 0:
newlabel += 1
self.neurons.append(Neuron(fc,y[i1]))
labdict.setdefault(ytring, []). append(len(self.neurons) - 1)
continue
inds = numpy.argsort(activationn)
indc = numpy.where(numpy.array(activationi)[inds[::-1]]>self.vigilance)[0]
if indc.shape[0] == 0:
self.neurons.append(Neuron(fc,y[i1]))
labdict.setdefault(ytring, []). append(len(self.neurons) - 1)
continue
winner =inds[::- 1][indc[0]]
self.neurons[winner].vc= umath.minimum(self.neurons[winner].vc,fc)
labadd = numpy.zeros(y[0].shape,dtype=y[0].dtype)
labadd[y[i1].nonzero()] = 1
self.neurons[winner].label += labadd
开发者ID:ChristianSch,项目名称:scikit-multilearn,代码行数:69,代码来源:MLARAMfast.py
示例7: predict_proba
def predict_proba(self,X):
result = []
if len(X) == 0:
return
if len(X[0].shape)==1:
ismatrix=0
else:
ismatrix=1
xma=X.max()
xmi=X.min()
if xma<0 or xma>1 or xmi<0 or xmi>1:
X=numpy.multiply(X-xmi,1/(xma-xmi))
ones = scipy.ones(X[0].shape);
n1s = [0] * len(self.neurons)
allranks = []
neuronsactivated=[]
allneu=numpy.vstack([n1.vc for n1 in self.neurons])
allneusum=allneu.sum(1)+self.alpha
import time
time1=time.time()
for i1,f1 in enumerate(X):
if self.debug==1:
print i1,
if (i1%10)+1==10:
print i1,time.time()-time1
time1=time.time()
if scipy.sparse.issparse(f1):
f1 = f1.todense()
fc = numpy.concatenate((f1, ones - f1), ismatrix)
activity=(umath.minimum(fc,allneu).sum(1)/allneusum).squeeze().tolist()
if ismatrix==1:
activity=activity[0]
# be very fast
sortedact=numpy.argsort(activity)[::-1]
winner=sortedact[0]
diff_act=activity[winner]-activity[sortedact[-1]]
largest_activ = 1;
par_t=self.threshold
for i in range(1, len(self.neurons)):
activ_change = (activity[winner]-activity[sortedact[i]])/activity[winner];
if activ_change >par_t*diff_act:
break
largest_activ += 1;
rbsum = sum([activity[k] for k in sortedact[0:largest_activ]])
rank = activity[winner]*self.neurons[winner].label
actives =[]
activity_actives =[]
actives.append(winner)
activity_actives.append(activity[winner])
for i in range(1,largest_activ):
rank+=activity[sortedact[i]]*self.neurons[sortedact[i]].label
actives.append(sortedact[i])
activity_actives.append(activity[sortedact[i]])
rank/= rbsum
allranks.append(rank)
return numpy.array(numpy.matrix(allranks))
开发者ID:ChristianSch,项目名称:scikit-multilearn,代码行数:74,代码来源:MLARAMfast.py
示例8: predict_proba
def predict_proba(self, X):
"""Predict probabilities of label assignments for X
Parameters
----------
X : numpy.ndarray or scipy.sparse.csc_matrix
input features of shape :code:`(n_samples, n_features)`
Returns
-------
array of arrays of float
matrix with label assignment probabilities of shape
:code:`(n_samples, n_labels)`
"""
# FIXME: we should support dense matrices natively
if isinstance(X, numpy.matrix):
X = numpy.asarray(X)
if issparse(X):
if X.getnnz() == 0:
return
elif len(X) == 0:
return
is_matrix = int(len(X[0].shape) != 1)
X = _normalize_input_space(X)
all_ranks = []
neuron_vectors = [n1.vc for n1 in self.neurons]
if any(map(issparse, neuron_vectors)):
all_neurons = scipy.sparse.vstack(neuron_vectors)
# can't add a constant to a sparse matrix in scipy
all_neurons_sum = all_neurons.sum(1).A
else:
all_neurons = numpy.vstack(neuron_vectors)
all_neurons_sum = all_neurons.sum(1)
all_neurons_sum += self._alpha
for row_number, input_vector in enumerate(X):
fc = _concatenate_with_negation(input_vector)
if issparse(fc):
activity = (fc.minimum(all_neurons).sum(1) / all_neurons_sum).squeeze().tolist()
else:
activity = (umath.minimum(fc, all_neurons).sum(1) / all_neurons_sum).squeeze().tolist()
if is_matrix:
activity = activity[0]
# be very fast
sorted_activity = numpy.argsort(activity)[::-1]
winner = sorted_activity[0]
activity_difference = activity[winner] - activity[sorted_activity[-1]]
largest_activity = 1
par_t = self.threshold
for i in range(1, len(self.neurons)):
activity_change = (activity[winner] - activity[sorted_activity[i]]) / activity[winner]
if activity_change > par_t * activity_difference:
break
largest_activity += 1
rbsum = sum([activity[k] for k in sorted_activity[0:largest_activity]])
rank = activity[winner] * self.neurons[winner].label
activated = []
activity_among_activated = []
activated.append(winner)
activity_among_activated.append(activity[winner])
for i in range(1, largest_activity):
rank += activity[sorted_activity[i]] * self.neurons[
sorted_activity[i]].label
activated.append(sorted_activity[i])
activity_among_activated.append(activity[sorted_activity[i]])
rank /= rbsum
all_ranks.append(rank)
return numpy.array(numpy.matrix(all_ranks))
开发者ID:scikit-multilearn,项目名称:scikit-multilearn,代码行数:80,代码来源:mlaram.py
示例9: fit
def fit(self, X, y):
"""Fit classifier with training data
Parameters
----------
X : numpy.ndarray or scipy.sparse
input features, can be a dense or sparse matrix of size
:code:`(n_samples, n_features)`
y : numpy.ndarray or scipy.sparse {0,1}
binary indicator matrix with label assignments.
Returns
-------
skmultilearn.MLARAMfast.MLARAM
fitted instance of self
"""
self._labels = []
self._allneu = ""
self._online = 1
self._alpha = 0.0000000000001
is_sparse_x = issparse(X)
label_combination_to_class_map = {}
# FIXME: we should support dense matrices natively
if isinstance(X, numpy.matrix):
X = numpy.asarray(X)
if isinstance(y, numpy.matrix):
y = numpy.asarray(y)
is_more_dimensional = int(len(X[0].shape) != 1)
X = _normalize_input_space(X)
y_0 = _get_label_vector(y, 0)
if len(self.neurons) == 0:
neuron_vc = _concatenate_with_negation(X[0])
self.neurons.append(Neuron(neuron_vc, y_0))
start_index = 1
label_combination_to_class_map[_get_label_combination_representation(y_0)] = [0]
else:
start_index = 0
# denotes the class enumerator for label combinations
last_used_label_combination_class_id = 0
for row_no, input_vector in enumerate(X[start_index:], start_index):
label_assignment_vector = _get_label_vector(y, row_no)
fc = _concatenate_with_negation(input_vector)
activationn = [0] * len(self.neurons)
activationi = [0] * len(self.neurons)
label_combination = _get_label_combination_representation(label_assignment_vector)
if label_combination in label_combination_to_class_map:
fcs = fc.sum()
for class_number in label_combination_to_class_map[label_combination]:
if issparse(self.neurons[class_number].vc):
minnfs = self.neurons[class_number].vc.minimum(fc).sum()
else:
minnfs = umath.minimum(self.neurons[class_number].vc, fc).sum()
activationi[class_number] = minnfs / fcs
activationn[class_number] = minnfs / self.neurons[class_number].vc.sum()
if numpy.max(activationn) == 0:
last_used_label_combination_class_id += 1
self.neurons.append(Neuron(fc, label_assignment_vector))
label_combination_to_class_map.setdefault(label_combination, []).append(len(self.neurons) - 1)
continue
inds = numpy.argsort(activationn)
indc = numpy.where(numpy.array(activationi)[inds[::-1]] > self.vigilance)[0]
if indc.shape[0] == 0:
self.neurons.append(Neuron(fc, label_assignment_vector))
label_combination_to_class_map.setdefault(label_combination, []).append(len(self.neurons) - 1)
continue
winner = inds[::- 1][indc[0]]
if issparse(self.neurons[winner].vc):
self.neurons[winner].vc = self.neurons[winner].vc.minimum(fc)
else:
self.neurons[winner].vc = umath.minimum(
self.neurons[winner].vc, fc
)
# 1 if winner neuron won a given label 0 if not
labels_won_indicator = numpy.zeros(y_0.shape, dtype=y_0.dtype)
labels_won_indicator[label_assignment_vector.nonzero()] = 1
self.neurons[winner].label += labels_won_indicator
return self
开发者ID:scikit-multilearn,项目名称:scikit-multilearn,代码行数:94,代码来源:mlaram.py
注:本文中的numpy.core.umath.minimum函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论