• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.multiply函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.multiply函数的典型用法代码示例。如果您正苦于以下问题:Python multiply函数的具体用法?Python multiply怎么用?Python multiply使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了multiply函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: reducer_pca

    def reducer_pca(self, region, matrixs):
        M = pd.DataFrame(matrixs)
        M[M == ''] = float('NaN')
        M = M.astype(float)
        M = M.transpose()
        (columns,rows)= np.shape(M)
        Mean = np.mean(M, axis=1).values
        C=np.zeros([columns,columns])  
        N=np.zeros([columns,columns])

        for i in range(rows):
            row = M.iloc[:,i] - Mean
            outer = np.outer(row,row)
            valid = np.isnan(outer) == False
            C[valid] = C[valid]+ outer[valid]
            N[valid] = N[valid]+ 1
            
        valid_outer = np.multiply(1 - np.isnan(N),N>0)
        cov = np.divide(C,N)
        cov = np.multiply(cov, valid_outer)
        U, D, V = np.linalg.svd(cov)
        cum_sum = np.cumsum(D[:])/np.sum(D)
        for i in range(len(cum_sum)):
            if cum_sum[i] >= 0.99:
                ind = i 
                break
        yield region, ind
开发者ID:wchuan,项目名称:UCSD_BigData,代码行数:27,代码来源:pca_test.py


示例2: std

def std(f):
    x = np.array(range(len(f)))
    # normalize; we do not prefer attributes with many values
    x = x / x.mean()
    xf = np.multiply(f, x)
    x2f = np.multiply(f, np.power(x, 2))
    return np.sqrt((np.sum(x2f) - np.power(np.sum(xf), 2) / np.sum(f)) / (np.sum(f) - 1))
开发者ID:Micseb,项目名称:orange3,代码行数:7,代码来源:owmpr.py


示例3: generate_RI_text_fast

def generate_RI_text_fast(N, RI_letters, cluster_sz, ordered, text_name, alph=alphabet):
	text_vector = np.zeros((1, N))
	text = utils.load_text(text_name)
	cluster2 = ''
	vector = np.ones((1,N))
	for char_num in xrange(len(text)):		
		cluster = cluster + text[char_num]
		if len(cluster) < cluster_sz:
			continue
		elif len(cluster) > cluster_sz:
			prev_letter = cluster[0]
			prev_letter_idx = alphabet.find(letter)
			inverse = np.roll(RI_letters[prev_letter_idx,:], cluster_sz-1)
			vector = np.multiply(vector, inverse)
			vector = np.roll(vector, 1)
			letter = text[char_num]
			letter_idx = alphabet.find(letter)
			vector = np.multiply(vector, RI_letters[letter_idx,:])
			cluster = cluster[1:]
		else: # (len(cluster) == cluster_size), happens once
			letters = list(cluster)
			for letter in letters:
				vector = np.roll(vector,1)
				letter_idx = alphabet.find(letter)
				vector = np.multiply(vector, RI_letters[letter_idx,:])
		text_vector += vector
	return text_vector
开发者ID:crazydreamer,项目名称:random_indexing_language_detection,代码行数:27,代码来源:random_idx_iv.py


示例4: run_sim

def run_sim(R_star, transit_duration, bodies):
    """Run 3-body sim and convert results to TTV + TDV values in [minutes]"""

    # Run 3-body sim for one full orbit of the outermost moon
    loop(bodies, orbit_duration)
    

    # Move resulting data from lists to numpy arrays
    ttv_array = numpy.array([])
    ttv_array = ttv_list
    tdv_array = numpy.array([])
    tdv_array = tdv_list

    # Zeropoint correction
    middle_point =  numpy.amin(ttv_array) + numpy.amax(ttv_array)
    ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
    ttv_array = numpy.divide(ttv_array, 1000)  # km/s

    # Compensate for barycenter offset of planet at start of simulation:
    planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
    stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
    ttv_array = numpy.divide(ttv_array, stretch_factor)

    # Convert to time units, TTV
    ttv_array = numpy.divide(ttv_array, R_star)
    ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24)  # minutes

    # Convert to time units, TDV
    oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60  # m/sec
    newspeed = oldspeed - numpy.amax(tdv_array)
    difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
    conversion_factor = difference / numpy.amax(tdv_array)
    tdv_array = numpy.multiply(tdv_array, conversion_factor)

    return ttv_array, tdv_array
开发者ID:hippke,项目名称:TTV-TDV-exomoons,代码行数:35,代码来源:system_16.py


示例5: estimateMethylatedFractions

    def estimateMethylatedFractions(self, pos, meanVector, modMeanVector, maskPos):

        maskPos = np.array(maskPos)
        L = len(maskPos)
        if L == 0:
            res = self.bootstrap(pos, meanVector[self.post], modMeanVector[self.post])
        else:
            est = np.zeros(L)
            low = np.zeros(L)
            upp = np.zeros(L)
            res = np.zeros(3)
            wts = np.zeros(L)

            # for offset in maskPos:
            for count in range(L):
                offset = maskPos[count]
                mu0 = meanVector[self.post + offset]
                mu1 = modMeanVector[self.post + offset]
                if mu1 > mu0:
                    k = self.bootstrap((pos + offset), mu0, mu1)
                    wts[count] = k[0] * (mu1 - mu0)
                    est[count] = k[0]
                    low[count] = k[1]
                    upp[count] = k[2]

            if sum(wts) > 1e-3:
                wts = wts / sum(wts)
                res[0] = np.multiply(est, wts).sum()
                res[1] = np.multiply(low, wts).sum()
                res[2] = np.multiply(upp, wts).sum()

        print str(res)
        return res
开发者ID:jgurtowski,项目名称:kineticsTools,代码行数:33,代码来源:kineticForReprocessing.py


示例6: resolve_collision

def resolve_collision(m):
    # Calculate relative velocity
    rv = numpy.subtract(m.b.velocity, m.a.velocity)

    # Calculate relative velocity in terms of the normal direction
    velocity_along_normal = numpy.dot(rv, m.normal)

    # Do not resolve if velocities are separating
    if velocity_along_normal > 0:
        # print("Separating:", velocity_along_normal)
        # print("  Normal:  ", m.normal)
        # print("  Vel:     ", m.b.velocity, m.a.velocity)
        return False

    # Calculate restitution
    e = min(m.a.restitution, m.a.restitution)

    # Calculate impulse scalar
    j = -(1 + e) * velocity_along_normal
    j /= 1 / m.a.mass + 1 / m.b.mass

    # Apply impulse
    impulse = numpy.multiply(j, m.normal)

    # print("Before: ", m.a.velocity, m.b.velocity)
    m.a.velocity = numpy.subtract(m.a.velocity,
                                  numpy.multiply(1 / m.a.mass, impulse))
    m.b.velocity = numpy.add(m.b.velocity,
                             numpy.multiply(1 / m.b.mass, impulse))
    # print("After:  ", m.a.velocity, m.b.velocity)
    # print("  Normal:  ", m.normal)

    return True
开发者ID:mwreuter,项目名称:arcade,代码行数:33,代码来源:physics_engine_2d.py


示例7: determine_likeliest

def determine_likeliest(genotypes,num_regions,rsid_info,rsid_order,sample,result_queue):
	#q initial value is uniform across all geos
	q = [float(1)/float(num_regions)] * num_regions
	g = []
	f = []

	valid = set(['A','T','G','C'])

	#set up genotype and frequency vectors
	for ind,v in enumerate(genotypes):
		rsid = rsid_order[ind]
		ref_allele = rsid_info[rsid]["allele"]

		if v[0] in valid and v[1] in valid:
			matches = 0
			for i in v:
				if i == ref_allele:
					matches += 1

			g.append(matches)
			f.append(rsid_info[rsid]["freqs"])

	q = np.array(q)
	g = np.array(g)
	f = np.array(f)

	q_n_1 = q

	e = .01
	l_n = -1.0 * sys.maxint
	l_n_1 = compute_likelihood(g,f,q)

	c = 0
	while l_n_1 - l_n > e:
		c += 1
		q = q_n_1
		q_n_1 = [0] * len(q)

		for i,g_v in enumerate(g):
			a_denom = np.dot(q,f[i])
			b_denom = np.dot(q,1.0 - f[i])

			a = np.multiply(f[i],q) / a_denom
			b = np.multiply(1.0 - f[i],q) / b_denom

			q_n_1 += float(g_v) * a
			q_n_1 += float(2 - g_v) * b

		q_n_1 = (float(1)/float(2*len(g))) * q_n_1
		l_n = l_n_1
		l_n_1 = compute_likelihood(g,f,q_n_1)

	print "Sample: %s, Iterations: %d, Likelihood: %f" % (sample,c,l_n_1)


	result_string = [str(i) for i in q_n_1]

	result_queue.put("%s|%s\n" % (sample,"|".join(result_string)))
	
	return
开发者ID:Recombine,项目名称:phosphorus-public,代码行数:60,代码来源:predict_ancestry.py


示例8: poly_centroid

def poly_centroid(P):
	X = P[:,0]
	Y = P[:,1]
	return 1/6.0/poly_area(P) * np.asarray([\
		np.dot(X + np.roll(X, -1), np.multiply(X, np.roll(Y, -1)) - np.multiply(np.roll(X, -1), Y)),\
		np.dot(Y + np.roll(Y, -1), np.multiply(Y, np.roll(X, -1)) - np.multiply(np.roll(Y, -1), X))\
	])
开发者ID:acrylic-origami,项目名称:DoubleRainbow,代码行数:7,代码来源:index.py


示例9: die

def die(first_noun, second_noun, trans_verb):
    """Vectorize a sentence with 'noun die noun verb' = (sub, obj)."""
    noun_model = space.words.polyglot_model()
    noun_space = noun_model[0]

    die_vector = compose.train.die_cat_stored()
    ver_vector = compose.train.verb(trans_verb, noun_model)

    fst_vector = noun_space[first_noun]
    snd_vector = noun_space[second_noun]

    par_vector_sub = kron(
        csr_matrix(snd_vector), csr_matrix(ver_vector))
    par_vector_obj = kron(
        csr_matrix(snd_vector), numpy.transpose(csr_matrix(ver_vector)))

    par_vector_sub = kron(
        numpy.transpose(csr_matrix(fst_vector)), csr_matrix(par_vector_sub))
    par_vector_obj = kron(
        numpy.transpose(csr_matrix(fst_vector)), csr_matrix(par_vector_obj))

    vector_sub = numpy.multiply(csr_matrix(die_vector), par_vector_sub)
    vector_obj = numpy.multiply(csr_matrix(die_vector), par_vector_obj)

    return (vector_sub.toarray().flatten(), vector_obj.toarray().flatten())
开发者ID:V1ncam,项目名称:lola1516,代码行数:25,代码来源:sentence.py


示例10: _get_H

    def _get_H(self, debug=False):
        """
        returns H_t as defined in algorithm 2
        
        Reference:
        https://en.wikipedia.org/wiki/Limited-memory_BFGS
        http://www.ccms.or.kr/data/pdfpaper/jcms21_1/21_1_117.pdf
        https://homes.cs.washington.edu/~galen/files/quasi-newton-notes.pdf
        """
        I = np.identity(len(self.w))
        
        if min(len(self.s), len(self.y)) == 0:
                print "Warning: No second order information used!"
                return I
            
        assert len(self.s) > 0, "s cannot be empty."
        assert len(self.s) == len(self.y), "s and y must have same length"
        assert self.s[0].shape == self.y[0].shape, \
            "s and y must have same shape"
        assert abs(self.y[-1]).sum() != 0, "latest y entry cannot be 0!"
        assert 1/np.inner(self.y[-1], self.s[-1]) != 0, "!"

        I = np.identity(len(self.s[0]))
        H = np.dot((np.inner(self.s[-1], self.y[-1]) / np.inner(self.y[-1],
                   self.y[-1])), I)

        for (s_j, y_j) in itertools.izip(self.s, self.y):
            rho = 1.0/np.inner(y_j, s_j)
            V = I - np.multiply(rho, np.outer(s_j, y_j))
            H = (V).dot(H).dot(V.T)
            H += np.multiply(rho, np.outer(s_j, s_j))

        return H
开发者ID:heidekrueger,项目名称:CaseStudiesMachineLearning,代码行数:33,代码来源:SGD.py


示例11: normalize_layout

def normalize_layout(l):
    """Make sure all the spots in a layout are where you can click.

    Returns a copy of the layout with all spot coordinates are
    normalized to within (0.0, 0.98).

    """
    xs = []
    ys = []
    ks = []
    for (k, (x, y)) in l.items():
        xs.append(x)
        ys.append(y)
        ks.append(k)
    minx = np.min(xs)
    maxx = np.max(xs)
    try:
        xco = 0.98 / (maxx - minx)
        xnorm = np.multiply(np.subtract(xs, [minx] * len(xs)), xco)
    except ZeroDivisionError:
        xnorm = np.array([0.5] * len(xs))
    miny = np.min(ys)
    maxy = np.max(ys)
    try:
        yco = 0.98 / (maxy - miny)
        ynorm = np.multiply(np.subtract(ys, [miny] * len(ys)), yco)
    except ZeroDivisionError:
        ynorm = np.array([0.5] * len(ys))
    return dict(zip(ks, zip(map(float, xnorm), map(float, ynorm))))
开发者ID:LogicalDash,项目名称:LiSE,代码行数:29,代码来源:board.py


示例12: adaBoostTrainDecisionStump

 def adaBoostTrainDecisionStump(self,dataArr,classLabels,numInt=40):
     weakDecisionStumpArr = []
     m = np.shape(dataArr)[0]
     weight = np.mat(np.ones((m,1))/m)     # init the weight of the data.Normally, we set the initial weight is 1/n
     aggressionClassEst = np.mat(np.zeros((m,1)))
     for i in range(numInt): # classEst == class estimation
         bestStump,error,classEst = self.buildStump(dataArr,classLabels,weight) # D is a vector of the data's weight
         # print("D: ",weight.T)
         alpha = float(0.5 * np.log((1.0 - error)/max(error , 1e-16)))   # alpha is the weighted of the weak classifier
         bestStump['alpha'] = alpha
         weakDecisionStumpArr.append(bestStump)
         exponent = np.multiply(-1* alpha * np.mat(classLabels).T , classEst) # calculte the exponent [- alpha * Y * Gm(X)]
         print("classEst :",classEst.T)
         weight = np.multiply(weight,np.exp(exponent)) # update the weight of the data, w_m = e^[- alpha * Y * Gm(X)]
         weight = weight/weight.sum()  # D.sum() == Z_m (Normalized Factor) which makes sure the D_(m+1) can be a probability distribution
         # give every estimated class vector (the classified result of the weak classifier) a weight
         aggressionClassEst += alpha*classEst
         print("aggression classEst: ",aggressionClassEst.T)
         # aggressionClassError = np.multiply(np.sign(aggressionClassEst) != np.mat(classLabels).T, np.ones((m,1)))
         # errorRate = aggressionClassError.sum()/m
         errorRate = (np.sign(aggressionClassEst) != np.mat(classLabels).T).sum()/m # calculate the error classification
         # errorRate = np.dot((np.sign(aggressionClassEst) != np.mat(classLabels).T).T,np.ones((m,1)))/m
         print("total error: ",errorRate,"\n")
         if errorRate == 0:
             break
     return weakDecisionStumpArr
开发者ID:MichaelLinn,项目名称:MachineLearningDemo,代码行数:26,代码来源:adaBoost.py


示例13: bin_maker

def bin_maker(bin_size,F_matrix,summed=None):
    """
    Calculate the conditional usage as a function of the flow on the link according to bin_size
    """
    bin_max = np.ceil(max(F_matrix[:,0])/bin_size)*bin_size # round up to nearest bin_size
    nbins = bin_max/bin_size # number of bins
    bin_means = np.linspace(.5*bin_size,bin_max-(.5*bin_size),nbins) # mean values of bins

    H_temp = []
    H = np.zeros((nbins,4)) # [#nodes, mean usage, min usage, max usage]
    for b in range(int(nbins)):
        for t in range(lapse):
            if b*bin_size <= F_matrix[t,0] < (b+1)*bin_size:
                H_temp.append(F_matrix[t,1])
        if len(H_temp)>0:
            H[b,0] = len(H_temp)
            H[b,1] = sum(H_temp)/len(H_temp)
            H[b,2] = min(H_temp)
            H[b,3] = max(H_temp)
        else: # no data in the bin
            H[b,0] = 0
            H[b,1] = 0
            H[b,2] = 0
            H[b,3] = 0
        H_temp=[]

    if summed:
        part_sum = np.multiply(bin_means,bin_size)
        bin_sum = sum(np.multiply(H[:,1],part_sum))
        return np.array([bin_means,H[:,1]]),bin_sum
    else:
        return bin_means,H
开发者ID:asadashfaq,项目名称:FlowcolouringA,代码行数:32,代码来源:usage_old.py


示例14: assign_weights

	def assign_weights(self,network,matTargetNeurons):
		numInput = self.dicProperties["IODim"]
		numNodesReservoir = self.dicProperties["ReservoirDim"]
		numInhib = numInput*numNodesReservoir*self.dicProperties["InhibFrac"]
		nRowLength = len(matTargetNeurons[0])
		numInhibPerRow = int(np.floor(nRowLength*self.dicProperties["InhibFrac"]))
		if self.dicProperties["Distribution"] == "Betweenness":
			if self.lstBetweenness == []:
				self.lstBetweenness = betwCentrality(network)[0].a
			rMaxBetw = self.lstBetweenness.max()
			rMinBetw = self.lstBetweenness.min()
			rMaxWeight = self.dicProperties["Max"]
			rMinWeight = self.dicProperties["Min"]
			for i in range(self.dicProperties["IODim"]):
				self.lstBetweenness = np.multiply(np.add(self.lstBetweenness,-rMinBetw+rMinWeight*rMaxBetw/(rMaxWeight-rMinWeight)),(rMaxWeight-rMinWeight)/rMaxBetw)
				self.__matConnect[i,matTargetNeurons[i]] = self.lstBetweenness[matTargetNeurons[i]] # does not take duplicate indices into account... never mind
			# generate the necessary inhibitory connections
			lstNonZero = np.nonzero(self.__matConnect)
			lstInhib = np.random.randint(0,len(lstNonZero),numInhib)
			self.__matConnect[lstInhib] = -self.__matConnect[lstInhib]
			rFactor = (self.dicProperties["Max"]-self.dicProperties["Min"])/(rMaxBetw-rMinBetw) # entre 0 et Max-Min
			self.__matConnect = np.add(np.multiply(self.__matConnect,rFactor),self.dicProperties["Min"]) # entre Min et Max
		elif self.dicProperties["Distribution"] == "Gaussian":
			for i in range(self.dicProperties["IODim"]):
				self.__matConnect[i,matTargetNeurons[i,:numInhibPerRow]] = -np.random.normal(self.dicProperties["MeanInhib"],self.dicProperties["VarInhib"],numInhibPerRow)
				self.__matConnect[i,matTargetNeurons[i,numInhibPerRow:]] = np.random.normal(self.dicProperties["MeanExc"],self.dicProperties["VarExc"],nRowLength-numInhibPerRow)
		elif self.dicProperties["Distribution"] == "Lognormal":
			for i in range(self.dicProperties["IODim"]):
				self.__matConnect[i,matTargetNeurons[i][:numInhibPerRow]] = -np.random.lognormal(self.dicProperties["LocationInhib"],self.dicProperties["ScaleInhib"],numInhibPerRow)
				self.__matConnect[i,matTargetNeurons[i][numInhibPerRow:]] = np.random.lognormal(self.dicProperties["LocationExc"],self.dicProperties["ScaleExc"],nRowLength-numInhibPerRow)
		else:
			None # I don't know what to do for the degree correlations yet
开发者ID:Silmathoron,项目名称:ResCompPackage,代码行数:32,代码来源:InputConnect.py


示例15: trans_param_to_current_array

 def trans_param_to_current_array(self, quantity_dict, trans_param,
                                  model='LIF', mcnc_grouping=None,
                                  std=None):
     quantity_array = quantity_dict['quantity_array']
     quantity_rate_array = np.abs(np.gradient(quantity_array)) / DT
     if model == 'LIF':
         current_array = trans_param[0] * quantity_array +\
             trans_param[1] * quantity_rate_array + trans_param[2]
         if std is not None:
             std = 0 if std < 0 else std
             current_array += np.random.normal(
                 loc=0., scale=std, size=quantity_array.shape)
     if model == 'Lesniak':
         trans_param = np.tile(trans_param, (4, 1))
         trans_param[:, :2] = np.multiply(
             trans_param[:, :2].T, mcnc_grouping).T
         quantity_array = np.tile(quantity_array, (mcnc_grouping.size, 1)).T
         quantity_rate_array = np.tile(
             quantity_rate_array, (mcnc_grouping.size, 1)).T
         current_array = np.multiply(quantity_array, trans_param[:, 0]) +\
             np.multiply(quantity_rate_array, trans_param[:, 1]) +\
             np.multiply(np.ones_like(quantity_array), trans_param[:, 2])
         if std is not None:
             std = 0 if std < 0 else std
             current_array += np.random.normal(loc=0., scale=std,
                                               size=quantity_array.shape)
     return current_array
开发者ID:yw5aj,项目名称:YoshiRecordingData,代码行数:27,代码来源:fitlif.py


示例16: plot_triplet

def plot_triplet(apn, idx):
  plt.subplot(1,3,1)
  plt.imshow(np.multiply(apn[idx*3+0,:,:,:],1/256))
  plt.subplot(1,3,2)
  plt.imshow(np.multiply(apn[idx*3+1,:,:,:],1/256))
  plt.subplot(1,3,3)
  plt.imshow(np.multiply(apn[idx*3+2,:,:,:],1/256))
开发者ID:21hub,项目名称:facenet,代码行数:7,代码来源:facenet_compare.py


示例17: online_k_means

def online_k_means(k,b,t,X_in):
    random_number = 11232015
    random_num = np.random.randint(X_in.shape[0], size =300 )
    rng = np.random.RandomState(random_number)
    permutation1 = rng.permutation(len(random_num))
    random_num = random_num[permutation1]
    x_input = X_in[random_num]
    c,l = mykmeansplusplus(x_input,k,t)
    v = np.zeros((k))
    for i in range(t):
        random_num = np.random.randint(X_in.shape[0], size = b)
        rng = np.random.RandomState(random_number)
        permutation1 = rng.permutation(len(random_num))
        random_num = random_num[permutation1]
        M = X_in[random_num]
        Y = cdist(M,c,metric='euclidean', p=2, V=None, VI=None, w=None)
        clust_index = np.argmin(Y,axis = 1)
        for i in range(M.shape[0]):
            c_in = clust_index[i]
            v[c_in] += 1
            ita = 1 / v[c_in]
            c[c_in] = np.add(np.multiply((1 - ita),c[c_in]),np.multiply(ita,M[i]))
    Y_l = cdist(X_in,c,metric='euclidean', p=2, V=None, VI=None, w=None)
    l = np.argmin(Y_l,axis = 1)        
    return c,l
开发者ID:Subhankari,项目名称:CML_Homework5,代码行数:25,代码来源:my_kmeans_new.py


示例18: derivadaCusto

	def derivadaCusto(self, x, y):
		# Calcula a derivada em função de W1 e W2
		self.yEstimado = self.propaga(x)
		matrix_x = np.matrix(list(x.values()))
		if(self.tamInput > 1):
			matrix_x = matrix_x.T
		matrix_y = np.matrix(list(y.values()))
		# erro a ser retropropagado
		ek = -np.subtract(matrix_y, self.yEstimado)
		'''
		print("Erro k:")
		print(ek.shape)
		print(ek)
		print("Derivada sigmoid YIN : ", self.derivadaSigmoide(self.yin).shape)
		print(self.derivadaSigmoide(self.yin))
		'''
		delta3 = np.multiply(ek, self.derivadaPrelu(self.yin))#self.derivadaSigmoide(self.yin))
		# Obtém o erro a ser retropropagado de cada camada, multiplicando pela derivada da função de ativação
		#adicionando o termo de regularização no gradiente (+lambda * pesos)
		dJdW2 = np.dot(delta3, self.zin) + self.lambdaVal*self.W2
		'''
		print("dJdW2 ------------ ", dJdW2.shape)
		print(dJdW2)
		print("Z shape:", self.z.shape)
		print(self.z)
		print("Derivada Z shape:", self.derivadaSigmoide(self.z).shape)
		print(self.derivadaSigmoide(self.z))
		print("W2 shape", self.W2.shape)
		print(self.W2)
		'''
		delta2 = np.multiply(np.dot(self.W2, delta3).T, self.derivadaSigmoide(self.z))
		dJdW1 = np.dot(matrix_x, delta2) + self.lambdaVal*self.W1
		return dJdW1, dJdW2
开发者ID:tuany,项目名称:RNN,代码行数:33,代码来源:Neural_Network.py


示例19: update_weights

    def update_weights(self, forward_layer_error_signal_factor):
        """
            Update the weights using gradient descent algorithm.
             @forward_layer_error_signal_factor: forward layers error factor as a vector
                ==> dot(forward_layer.weights.T, forward_layer.gradient)
        """
        gradient_of_error_func = multiply(
            multiply(self.learning_rate, forward_layer_error_signal_factor, forward_layer_error_signal_factor),
            self.activation_function.derivative(f_of_x=self.outputs, out=self.outputs),
            out=self.outputs
        )

        back_propagation_error_factor = dot(self.weights.T, gradient_of_error_func)

        self.previous_weight_update = multiply(
            self.previous_weight_update, self.momentum, out=self.previous_weight_update
        )
        delta_weights = add(
            self.previous_weight_update,
            multiply(gradient_of_error_func.reshape((-1, 1)), self.inputs),
            out=self.previous_weight_update
        )

        self.weights += delta_weights                   # update weights.
        self.bias += gradient_of_error_func             # update bias.

        return back_propagation_error_factor            # back-propagate error factor to previous layer ...
开发者ID:samyvilar,项目名称:ai_assignments,代码行数:27,代码来源:network.py


示例20: ratio_err

def ratio_err(top,bottom,top_low,top_high,bottom_low,bottom_high):
    #uses simple propagation of errors (partial derivatives)
    #note it returns errorbars, not interval

    #-make sure input is numpy arrays-
    top = np.array(top)
    top_low = np.array(top_low)
    top_high = np.array(top_high)
    bottom = np.array(bottom)
    bottom_low = np.array(bottom_low)
    bottom_high = np.array(bottom_high)

    #-calculate errorbars-
    top_errlow = np.subtract(top,top_low)
    top_errhigh = np.subtract(top_high,top)
    bottom_errlow = np.subtract(bottom,bottom_low)
    bottom_errhigh = np.subtract(bottom_high,bottom)

    #-calculate ratio_low-
    ratio_low  = np.sqrt( np.square(np.divide(top_errlow,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errlow)) )
    #-calculate ratio_high-
    ratio_high = np.sqrt( np.square(np.divide(top_errhigh,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errhigh)) )
#    ratio_high = ((top_errhigh/bottom)**2.0 + (top/(bottom**2.0))*bottom_errhigh)**2.0)**0.5

    # return two vectors, err_low and err_high
    return ratio_low,ratio_high
开发者ID:kariannfrank,项目名称:sn1987a,代码行数:26,代码来源:spectra_results_0.py



注:本文中的numpy.multiply函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.nan_to_num函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.moveaxis函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap