• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python sigmoid.sigmoid函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中sigmoid.sigmoid函数的典型用法代码示例。如果您正苦于以下问题:Python sigmoid函数的具体用法?Python sigmoid怎么用?Python sigmoid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sigmoid函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: predict

def predict(theta,board) :
    """
    theta - unrolled Neural Network weights
    board - n*n matrix representing board
    Returns:
        h - n*1 column vector - confidence level for performing next move
    """
    n = size(board,1)

    #neural network parameters
    input_units = n*n
    hidden_units = n*n
    output_units = n*n

    #theta1 - unrolled weights between input and hidden layer
    #theta2 - unrolled weights between hidden and output layer
    theta1 = theta[:,:hidden_units*(input_units+1)]
    theta2 = theta[:,hidden_units*(input_units+1):]

    #reshaping to obtain rolled weights
    theta1 = np.reshape(theta1,(hidden_units,input_units+1))
    theta2 = np.reshape(theta2,(output_units,hidden_units+1))

    #calculating confidence level given board
    #position and neural network weights
    X = board.flatten().T
    X = concatenate((mat(1),X))
    z2 = theta1*X
    a2 = sigmoid(z2)
    a2 = concatenate((mat(1),a2))
    z3 = theta2*a2
    h = sigmoid(z3)
    return h
开发者ID:harishrithish7,项目名称:Tic-Tac-Toe,代码行数:33,代码来源:predict.py


示例2: lrCostFunction

def lrCostFunction(theta, X, y, Lambda):
    """computes the cost of using
    theta as the parameter for regularized logistic regression and the
    gradient of the cost w.r.t. to the parameters.
    """

# ====================== YOUR CODE HERE ======================
# Instructions: Compute the cost of a particular choice of theta.
#               You should set J to the cost.
#
# Hint: The computation of the cost function and gradients can be
#       efficiently vectorized. For example, consider the computation
#
#           sigmoid(X * theta)
#
#       Each row of the resulting matrix will contain the value of the
#       prediction for that example. You can make use of this to vectorize
#       the cost function and gradient computations. 
#

    # =============================================================
    m = y.size
    
    J=( -dot(y,log(sigmoid(dot(X,theta))))-dot((1-y),log(1-sigmoid(dot(X,theta)))))/m +(Lambda/(2*m))*(sum(theta**2)-theta[0]**2)
    #theta0=np.copy(theta)
    #np.put(theta0,0,0)
    #J=dot((sigmoid(dot(X,theta))-y),X)/m+ (Lambda/m)*theta0
    #J= sum(-y*log(sigmoid(dot(X,theta.T)))-(1-y)*log(1-sigmoid(dot(X,theta.T))))/m
    #J= sum(-y*log(sigmoid(dot(X,theta.T)))-(1-y)*log(1-sigmoid(dot(X,theta.T))))/m
    return J
开发者ID:marieyalap,项目名称:Coursera-Stanford-ML-Python,代码行数:30,代码来源:lrCostFunction.py


示例3: charTrain

def charTrain():
    X = np.matrix('0,0,1,0; 0,1,0,0; 0,0,0,1; 0,0,0,1; 1,0,0,0')  # encoding for hello
    numIn, numHid, numOut = 4, 10, 4
    numInTot = numIn + numHid + 1
    theta1 = np.matrix(1 * np.sqrt(6 / (numIn + numHid)) * np.random.randn(numInTot, numHid)) 
    theta2 = np.matrix(1 * np.sqrt(6 / (numOut + numHid)) * np.random.randn(numHid + 1, numOut)) 
    theta1_grad = np.zeros((numInTot, numHid))
    theta2_grad = np.zeros((numHid + 1, numOut))
    hid_last = np.zeros((numHid, 1))
    m = X.shape[0]
    alpha = 0.05
    for ita in range(5000):
        for j in range(m-1): #for every training element, except for the last one, which we don't know what is followed
            y = X[j+1, :]  # given the input char, the next char is expected
            # forward
            context = hid_last
            x_context = np.concatenate((X[j, :], context.T), axis=1)
            a1 = np.matrix(np.concatenate((x_context, np.matrix('[1]')), axis=1)).T
            z2 = theta1.T * a1;
            a2 = np.concatenate((sigmoid(z2), np.matrix('[1]')))
            hid_last = a2[0:-1, 0];
            z3 = theta2.T * a2
            a3 = sigmoid(z3)
            # backward propagation
            d3 = np.multiply(z3.T, (a3.T - y))   # 1*4, d(loss)/d(z) = z * (a3 - y)
            theta2 = theta2 - alpha * a2 * d3  # 11*1 * 1*4 => 11*4a  d(loss)/d(theta2) = d(loss)/d(z3) * d(z3)/d(theta2)
            d2 = np.multiply((theta2 * d3.T), np.multiply(a2, (1 - a2)))  # (11*4 * 4*1) multiply ( 11*1 multiply 11*1) => 11*1
            theta1 = theta1 - alpha * a1 * d2[0:numHid,:].T  # 15*1 * 1*10 => 15*10
    return theta1, theta2, numHid, numOut
开发者ID:ijustloveses,项目名称:machine_learning,代码行数:29,代码来源:my_charrnn.py


示例4: cost_function

def cost_function(cost_function_parameters):
    """Cost function"""
    theta = cost_function_parameters['theta']
    input_layer_size = cost_function_parameters['input_layer_size']
    hidden_layer_size = cost_function_parameters['hidden_layer_size']
    num_labels = cost_function_parameters['number_of_labels']
    x_values = cost_function_parameters['x_values']
    y_values = cost_function_parameters['y_values']
    lambda_value = cost_function_parameters['lambda_value']

    theta_1_parameters = theta[0: (hidden_layer_size * (input_layer_size + 1))]
    theta_2_parameters = theta[(hidden_layer_size * (input_layer_size + 1)):]

    theta_1 = theta_1_parameters.reshape(hidden_layer_size, input_layer_size + 1)
    theta_2 = theta_2_parameters.reshape(num_labels, (hidden_layer_size + 1))

    input_examples_size = x_values.shape[0]

    hidden_layer_input = numpy.c_[numpy.ones(input_examples_size), x_values].dot(theta_1.T)
    hidden_layer_output = sigmoid(hidden_layer_input)

    output_layer_input = numpy.c_[numpy.ones(hidden_layer_output.shape[0]), hidden_layer_output].dot(theta_2.T)
    output = sigmoid(output_layer_input)

    first_part_of_cost = -((y_values) * numpy.log(output))
    second_part_of_cost = ((1.0 - y_values) * numpy.log(1.0-output))

    combined_thetas = numpy.append(theta_1.flatten()[1:], theta_2.flatten()[1:])
    regularization_term = (lambda_value/(2.0 * input_examples_size)) * numpy.sum(numpy.power(combined_thetas, 2))

    j = ((1.0/input_examples_size) * numpy.sum(numpy.sum(first_part_of_cost - second_part_of_cost))) + regularization_term
    return j
开发者ID:multunus,项目名称:autonomous-rc-car,代码行数:32,代码来源:cost_function.py


示例5: lrCostFunction

def lrCostFunction(theta, X, y, lmbda):
    # Initialize some useful values
    m = y.shape[0]  # number of training examples

    # You need to return the following variables correctly
    J = 0
    grad = np.zeros(theta.shape)

    # ====================== YOUR CODE HERE ======================

    def h(X, theta):
        return X.dot(theta)

    J = np.float(-y.T * np.nan_to_num(np.log(sigmoid(h(X, theta))).T) -
                 (1 - y).T * np.nan_to_num(np.log(1 - sigmoid(h(X, theta))).T)) / m
    reg_cost = theta.copy()
    reg_cost[0] = 0
    J += (lmbda * reg_cost.T.dot(reg_cost)) / (2 * m)

    grad = np.asarray((sigmoid(h(X, theta)) - y.T).dot(X) / m)[0]
    reg_grad = theta * (float(lmbda) / m)
    reg_grad[0] = 0
    grad += reg_grad

    # =============================================================

    return (J, grad)
开发者ID:Pebody,项目名称:pymlclass,代码行数:27,代码来源:lrCostFunction.py


示例6: costFunction

def costFunction(Theta, X, Y, lam):
    """Returns cost of Theta using logistic regression"""

    m = len(X)
    n = len(X[0])
    k = len(Y[0])
    k_h = (n + k) // 2      #average of features and categories
    Theta1 = np.reshape(Theta[0:(n+1)*k_h], (n+1, k_h))
    Theta2 = np.reshape(Theta[(n+1)*k_h:], (k_h+1, k))

    one = np.ones(m)
    one = np.reshape(one, (m, 1))
    a1 = np.concatenate((one, X), axis=1)
   
    #compute inputs to hidden layer
    a2 = sigmoid(np.dot(a1, Theta1))
    a2 = np.concatenate((one, a2), axis=1)

    #compute output layer
    a3 = sigmoid(np.dot(a2, Theta2))

    #compute cost
    J = -(1.0/m) * (np.dot(np.log(a3).T,  Y) + \
        np.dot(np.log(1.0 - a3).T, (1.0 - Y)))
    J = J.sum()

    #compute regularization term
    Theta1_sq = np.dot(Theta1.T, Theta1)
    Theta1_sq[0, :] = np.zeros(k_h)
    Theta2_sq = np.dot(Theta2.T, Theta2)
    Theta2_sq[0, :] = np.zeros(k)
    J = J + (lam / 2.0 / m) * (Theta1_sq.sum() + Theta2_sq.sum())
    print 'cost =', J
    return J
开发者ID:XkhldY,项目名称:kaggle_sfcrime,代码行数:34,代码来源:nnCostFunction.py


示例7: costFunction

def costFunction(theta, X, y, return_grad=False):
#COSTFUNCTION Compute cost and gradient for logistic regression
#   J = COSTFUNCTION(theta, X, y) computes the cost of using theta as the
#   parameter for logistic regression and the gradient of the cost
#   w.r.t. to the parameters.

    import numpy as np 
    from sigmoid import sigmoid

    # Initialize some useful values
    m = len(y) # number of training examples

    # You need to return the following variables correctly 
    J = 0
    grad = np.zeros(theta.shape)

    # ====================== YOUR CODE HERE ======================
    # Instructions: Compute the cost of a particular choice of theta.
    #               You should set J to the cost.
    #               Compute the partial derivatives and set grad to the partial
    #               derivatives of the cost w.r.t. each parameter in theta
    #
    # Note: grad should have the same dimensions as theta
    #

    # given the following dimensions:
    # theta.shape = (n+1,1)
    # X.shape     = (m,n+1)
    # the equation's 
    #	theta' times X
    # becomes
    # 	np.dot(X,theta)
    # to obtain a (m,1) vector
    # given that
    #   y.shape     = (m,)
    # we transpose the (m,1) shaped 
    #   np.log( sigmoid( np.dot(X,theta) ) )        , as well as
    #   np.log( 1 - sigmoid( np.dot(X,theta) ) )
    # to obtain (1,m) vectors to be mutually added, 
    # and whose elements are summed to form a scalar 
    one = y * np.transpose(np.log( sigmoid( np.dot(X,theta) ) ))
    two = (1-y) * np.transpose(np.log( 1 - sigmoid( np.dot(X,theta) ) ))
    J = -(1./m)*(one+two).sum()

    # here we need n+1 gradients. 
    # note that 
    #   y.shape                          = (m,)
    #   sigmoid( np.dot(X,theta) ).shape = (m, 1)
    # so we transpose the latter, subtract y, obtaining a vector of (1, m)
    # we multiply such vector by X, whose dimension is 
    #   X.shape = (m, n+1), 
    # and we obtain a (1, n+1) vector, which we also transpose
    # this last vectorized multiplication takes care of the sum
    grad = (1./m) * np.dot(sigmoid( np.dot(X,theta) ).T - y, X).T

    if return_grad == True:
        return J, np.transpose(grad)
    elif return_grad == False:
        return J # for use in fmin/fmin_bfgs optimization function
开发者ID:arturomp,项目名称:coursera-machine-learning-in-python,代码行数:59,代码来源:costFunction.py


示例8: sigmoidGradient

def sigmoidGradient(z):
    """returns the gradient of the sigmoid function evaluated at z
       g = SIGMOIDGRADIENT(z) computes the gradient of the sigmoid function
       evaluated at z. This should work regardless if z is a matrix or a
       vector. In particular, if z is a vector or matrix, you should return
       the gradient for each element."""
    
    return sigmoid(z) * (1-sigmoid(z))
开发者ID:hzitoun,项目名称:coursera_machine_learning_matlab_python,代码行数:8,代码来源:sigmoidGradient.py


示例9: compute_cost

def compute_cost(theta,X,y): #computes cost given predicted and actual values
	m = X.shape[0] #number of training examples
	theta = np.reshape(theta,(len(theta),1))
 
	#y = reshape(y,(len(y),1))
	J = (1./m) * (-np.transpose(y).dot(np.log(sg.sigmoid(X.dot(theta)))) - np.transpose(1-y).dot(np.log(1-sg.sigmoid(X.dot(theta)))))
	grad = np.transpose((1./m)*np.transpose(sg.sigmoid(X.dot(theta)) - y).dot(X))
	#optimize.fmin expects a single value, so cannot return grad
	return J.mean()#,grad
开发者ID:harjeet88,项目名称:logistic-regression,代码行数:9,代码来源:logit.py


示例10: sigmoidGradient

def sigmoidGradient(z):

    # SIGMOIDGRADIENT returns the gradient of the sigmoid function evaluated at z


    g = zeros(z.shape)
    g = sigmoid(z).transpose() * (1.0 - sigmoid(z))
    
    return g
开发者ID:nsfxc,项目名称:text_detection,代码行数:9,代码来源:sigmoidGradient.py


示例11: sigmoidGradient

def sigmoidGradient(z):

    import sigmoid as sg
    import numpy as np
    g = np.zeros(np.size(z));

    g=sg.sigmoid(z)*(1-sg.sigmoid(z));

    return g
开发者ID:billwiliams,项目名称:pythonml,代码行数:9,代码来源:sigmoidGradient.py


示例12: costFunctionReg

def costFunctionReg(theta, X, y, lam):
	dim = X.shape
	m = dim[0]
	theta = theta.reshape(theta.shape[0], 1)
	J = -(1.0/m) * ( numpy.dot(numpy.transpose(y), utils.multimap(math.log, \
	sigmoid.sigmoid(numpy.dot(X, theta)))) + numpy.dot(numpy.transpose(1-y), \
	utils.multimap(math.log, 1 - sigmoid.sigmoid(numpy.dot(X,theta)))) \
	+ lam/2.0*numpy.dot(numpy.transpose(theta[1:, :]),theta[1:,:]) )

	return float(J[0])
开发者ID:kieranroberts,项目名称:logit,代码行数:10,代码来源:costFunctionReg.py


示例13: predict

def predict(X, Theta1, Theta2):
    m, n = X.shape 

    a1 = X
    z2 = a1.dot(Theta1.T)
    a2 = sigmoid(z2)
    a2 = np.concatenate((np.ones((m, 1)), a2), axis=1)
    z3 = a2.dot(Theta2.T)
    a3 = sigmoid(z3)
    return np.argmax(a3, axis= 1)[np.newaxis]
开发者ID:NeuroMonk,项目名称:Machine-Learning-Coursera,代码行数:10,代码来源:predict.py


示例14: predict

def predict(nn_params, layers, X, y, lam, display, path):
	m = X.shape[0]
	Theta = reshapeThetas(nn_params, layers)
	l = len(layers)

	A = []
	A_ones = []
	A_sig = []
	Z = []
	J = 0

	for i in range(0,l):
		A.append(0)
		A_ones.append(0)
		A_sig.append(0)
		Z.append(0)

	A_ones[0] = ones((m,1))+0.0
	A[0] = concatenate((A_ones[0],X),1) 
	Z[1] = dot(A[0],Theta[0].conj().T)


	for i in range(1,l-1):
		A_ones[i] = ones((Z[i].shape[0],1))+0.0
		A_sig[i] = sigmoid(Z[i])
		A[i] = concatenate((A_ones[i], A_sig[i]),1)
		Z[i+1] = dot(A[1],Theta[1].conj().T)

	A[-1] = sigmoid(Z[-1])

	predictions = A[-1].argmax(axis=1)+0.0

	# cost calculation
	if not(isinstance(y, (int, long))): # if there are associated y values, calculate test results
		for i in range(0,layers[-1]):
			J_curr =  (1.0/m)*sum(-1*((y==i)*log(A[-1][:,i])) - (1-(y==i)) * log(1-A[-1][:,i]))
			J += J_curr

		if display == 1: # if the results should be displayed, do so
			print mean(predictions == y)*100, '%'
			print "Cost:",J

		return (J, mean(predictions == y))

	else: # if there are no y values, save predictions to file
		pfile = open(path + '/predictions.txt','w')
		for i in predictions:
			pfile.write(str(int(i))+'\n')
		pfile.close()
		ffile =  open(path + '/feature predict.txt','w')
		A_c = A[-1]
		A_c.tolist()
		for i in range(0,len(A_c)):
			ffile.write(','.join(str(elem) for elem in A_c[i])+'\n')
		ffile.close()
开发者ID:zsstor,项目名称:neural-network,代码行数:55,代码来源:predict.py


示例15: sigmoidGradient

def sigmoidGradient(z):

    # sigmoidGradient returns the gradient of the sigmoid function evaluated at z

    g = zeros(z.shape)
    # =========================== DONE ==================================
    # Instructions: Compute the gradient of the sigmoid function evaluated at
    #               each value of z.
    g += sigmoid(z) * (1 - sigmoid(z))
    
    return g
开发者ID:omoindrot,项目名称:INF582,代码行数:11,代码来源:sigmoidGradient.py


示例16: predict

def predict(Theta1, Theta2, X):
    
    # Useful values
    m = X.shape[0]
    num_labels = Theta2.shape[0]

    a1 = np.vstack((np.ones(m), X.T)).T
    a2 = sigmoid(np.dot(a1, Theta1.T))
    a2 = np.vstack((np.ones(m), a2.T)).T
    a3 = sigmoid(np.dot(a2, Theta2.T))

    return np.argmax(a3, axis=1)
开发者ID:originals-tz,项目名称:Coursera-Machine-Learning,代码行数:12,代码来源:predict.py


示例17: sigmoidGradient

def sigmoidGradient(z):

    g = np.zeros(z.shape)

    # ====================== YOUR CODE HERE ======================
    # Instructions: Compute the gradient of the sigmoid function evaluated at
    #               each value of z (z can be a matrix, vector or scalar).

    g = np.multiply(sigmoid(z), 1 - sigmoid(z))

    # == == == == == == == == == == == == == == == == == == == == =

    return g
开发者ID:Pebody,项目名称:pymlclass,代码行数:13,代码来源:sigmoidGradient.py


示例18: costFunction

def costFunction(theta, X,y):
    """ computes the cost of using theta as the
    parameter for logistic regression and the
    gradient of the cost w.r.t. to the parameters."""
    from numpy import dot
# Initialize some useful values
    m = y.size # number of training examples
    first = -dot(y, log(sigmoid(dot(X,theta))))
    second = -dot((1-y), log(1-sigmoid(dot(X,theta))))
    #first = -dot(y, log(sigmoid(dot(X,theta))))
    #second = -dot((ones(m)-y), log(ones(m)-sigmoid(dot(X,theta))))
    J=(first+second)/m
    return J
开发者ID:linnlinn,项目名称:Coursera-Stanford-ML-Python,代码行数:13,代码来源:costFunction.py


示例19: sigmoidGradient

def sigmoidGradient(z):
    """computes the gradient of the sigmoid function
    evaluated at z. This should work regardless if z is a matrix or a
    vector. In particular, if z is a vector or matrix, you should return
    the gradient for each element."""

# ====================== YOUR CODE HERE ======================
# Instructions: Compute the gradient of the sigmoid function evaluated at
#               each value of z (z can be a matrix, vector or scalar).


# =============================================================
    g= sigmoid(z)*(1-sigmoid(z))
    return g
开发者ID:marieyalap,项目名称:Coursera-Stanford-ML-Python,代码行数:14,代码来源:sigmoidGradient.py


示例20: lrCostFunction

def lrCostFunction(theta, X, y, lambda_reg, return_grad=False):
#LRCOSTFUNCTION Compute cost and gradient for logistic regression with 
#regularization
#   J = LRCOSTFUNCTION(theta, X, y, lambda_reg) computes the cost of using
#   theta as the parameter for regularized logistic regression and the
#   gradient of the cost w.r.t. to the parameters. 

    import numpy as np
    from sigmoid import sigmoid
    import sys

    # Initialize some useful values
    m = len(y) # number of training examples

    # You need to return the following variables correctly 
    J = 0
    grad = np.zeros(theta.shape)

    # ====================== YOUR CODE HERE ======================
    # Instructions: Compute the cost of a particular choice of theta.
    #               You should set J to the cost.
    #               Compute the partial derivatives and set grad to the partial
    #               derivatives of the cost w.r.t. each parameter in theta
    #

    # taken from costFunctionReg.py
    one = y * np.transpose(np.log( sigmoid( np.dot(X,theta) ) ))
    two = (1-y) * np.transpose(np.log( 1 - sigmoid( np.dot(X,theta) ) ))
    reg = ( float(lambda_reg) / (2*m)) * np.power(theta[1:theta.shape[0]],2).sum()
    J = -(1./m)*(one+two).sum() + reg

    grad = (1./m) * np.dot(sigmoid( np.dot(X,theta) ).T - y, X).T + ( float(lambda_reg) / m )*theta

    # the case of j = 0 (recall that grad is a n+1 vector)
    grad_no_regularization = (1./m) * np.dot(sigmoid( np.dot(X,theta) ).T - y, X).T

    # and then assign only the first element of grad_no_regularization to grad
    grad[0] = grad_no_regularization[0]

    # display cost at each iteration
    sys.stdout.write("Cost: %f   \r" % (J) )
    sys.stdout.flush()

    if return_grad:
        return J, grad.flatten()
    else:
        return J

    # =============================================================
开发者ID:arturomp,项目名称:coursera-machine-learning-in-python,代码行数:49,代码来源:lrCostFunction.py



注:本文中的sigmoid.sigmoid函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python signal.alarm函数代码示例发布时间:2022-05-27
下一篇:
Python sigil_bs4.BeautifulSoup类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap