• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utils.sigmoid函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中utils.sigmoid函数的典型用法代码示例。如果您正苦于以下问题:Python sigmoid函数的具体用法?Python sigmoid怎么用?Python sigmoid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sigmoid函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: forward

 def forward(self, x_t, h_tm1, c_tm1):
     i_t = sigmoid(T.dot(x_t, self.W_xi) + T.dot(h_tm1, self.W_hi) + c_tm1 * self.W_ci)
     f_t = sigmoid(T.dot(x_t, self.W_xf) + T.dot(h_tm1, self.W_hf) + c_tm1 * self.W_cf)
     c_t = f_t * c_tm1 + i_t * self.activation(T.dot(x_t, self.W_xc) + T.dot(h_tm1, self.W_hc))
     o_t = sigmoid(T.dot(x_t, self.W_xo) + T.dot(h_tm1, self.W_ho) + c_t * self.W_co)
     h_t = o_t * self.activation(c_t)
     return h_t, c_t
开发者ID:hiroki13,项目名称:neural-language-models,代码行数:7,代码来源:layer.py


示例2: logistic_predict

def logistic_predict(weights, data):
    """
    Compute the probabilities predicted by the logistic classifier.

    Note: N is the number of examples and 
          M is the number of features per example.

    Inputs:
        weights:    (M+1) x 1 vector of weights, where the last element
                    corresponds to the bias (intercepts).
        data:       N x M data matrix where each row corresponds 
                    to one data point.
    Outputs:
        y:          :N x 1 vector of probabilities. This is the output of the classifier.
    """
    # In case of MNIST classification of 4 and 9, output will be integer values
    # TODO: Finish this function
    N, M = data.shape
    y = [0]*N
    for i in range(0, N):
        z = 0
        for j in range(0, M):
            z = z + weights[j] * data[i,j] + weights[-1]
        y[i] = sigmoid(z)
        #iprint 'z y[i]', z, y[i]
    augdata = np.ones((N, M+1))
    augdata[:, :-1] = data
    z = np.dot(augdata, weights) # z is N x 1
    y = sigmoid(z)
    return y
开发者ID:tianrui,项目名称:CSC411,代码行数:30,代码来源:logistic.py


示例3: grad_loss

    def grad_loss(self, *args):
        """
        Compute the gradient logistic loss function 


        Inputs:
        - X: N x D array of data; each row is a data point.
        - y: 1-dimensional array of length N with real values.
        - reg: (float) regularization strength.

        Returns: A tuple containing:
        - loss as a single float
        - gradient with respect to self.theta; an array of the same shape as theta
        """
        theta,X,y,reg = args
        m,dim = X.shape
        grad = np.zeros((dim,))
        ##########################################################################
        # Compute the gradient of the loss function for unregularized logistic   #
        # regression                                                             #
        # TODO: 1 line of code expected                                          #
        ##########################################################################
        grad = X.T.dot((utils.sigmoid(X.dot(theta)) - y)) / m + reg * theta / m
        grad[0] = X[:, :1].T.dot((utils.sigmoid(X.dot(theta)) - y)) / m

        ###########################################################################
        #                           END OF YOUR CODE                              #
        ###########################################################################
        return grad
开发者ID:atom-zju,项目名称:comp540_HW2,代码行数:29,代码来源:logistic_regressor.py


示例4: cost

 def cost(self, theta1, theta2):
     z1 = np.dot(self.train, theta1)
     a2 = utils.sigmoid(z1)
     a2 = np.append(np.ones((a2.shape[0],1)), a2, 1)
     z2 = np.dot(a2, theta2)
     h = utils.sigmoid(z2)
     return -sum(sum(self.goal*np.log(h) + (1-self.goal)*np.log(1-h)))/self.m
开发者ID:EamonKavanagh,项目名称:neuralnetwork,代码行数:7,代码来源:Network.py


示例5: _step

		def _step(x_t, ct_1, ht_1, Wi, Wf, Wo, Wc, Whi, Whf, Who, Whc, bi, bf, bo, bc):
			i = sigmoid(T.dot(x_t, Wi) + T.dot(ht_1, Whi) + bi)
			f = sigmoid(T.dot(x_t, Wf) + T.dot(ht_1, Whf) + bf)
			o = sigmoid(T.dot(x_t, Wo) + T.dot(ht_1, Who) + bo)
			c = tanh(T.dot(x_t, Wc) + T.dot(ht_1, Whc) + bc)
			c_new = i * c + f * ct_1
			h_new = o * tanh(c_new)
			return c_new, h_new
开发者ID:aciccarelli,项目名称:DNN_Lab_UPF,代码行数:8,代码来源:char_rnn_lstm.py


示例6: _step

		def _step(x_t, ct_1, ht_1, W, Wh, b, dim):
			tmp = T.dot(x_t, W) + T.dot(ht_1, Wh) + b
			i = sigmoid(_slice(tmp, 0, dim))
			f = sigmoid(_slice(tmp, 1, dim))
			o = sigmoid(_slice(tmp, 2, dim))
			c = tanh(_slice(tmp, 3, dim))
			c_new = i * c + f * ct_1
			h_new = o * tanh(c_new)
			return c_new, h_new
开发者ID:aciccarelli,项目名称:DNN_Lab_UPF,代码行数:9,代码来源:char_rnn_lstm_fast.py


示例7: predict

 def predict(self, newData=None):
     if newData is None:
         newData = self.train
     else:
         newData = np.append(np.ones((newData.shape[0],1)), newData, 1)
         
     z = utils.sigmoid(np.dot(newData, self.inputWeight))
     z = np.append(np.ones((z.shape[0],1)), z, 1)
     digitProb = utils.sigmoid(np.dot(z, self.hiddenWeight))
     return np.argmax(digitProb,1)
开发者ID:EamonKavanagh,项目名称:neuralnetwork,代码行数:10,代码来源:Network.py


示例8: _step_index

		def _step_index(x_t, ct_1, ht_1, Wi, Wf, Wo, Wc, Whi, Whf, Who, Whc, bi, bf, bo, bc):
			# x_t: array of type int32
			# use indexing on Wi, Wf, Wo and Wc matrices instead of computing the product with the one-hot representation of the input for computational and memory efficiency
			i = sigmoid(Wi[x_t] + T.dot(ht_1, Whi) + bi)
			f = sigmoid(Wf[x_t] + T.dot(ht_1, Whf) + bf)
			o = sigmoid(Wo[x_t] + T.dot(ht_1, Who) + bo)
			c = tanh(Wc[x_t] + T.dot(ht_1, Whc) + bc)
			c_new = i * c + f * ct_1
			h_new = o * tanh(c_new)
			return c_new, h_new
开发者ID:aciccarelli,项目名称:DNN_Lab_UPF,代码行数:10,代码来源:char_rnn_lstm.py


示例9: _step_index

		def _step_index(x_t, ct_1, ht_1, W, Wh, b, dim):
			# x_t: array of type int32
			# use indexing on W matrix instead of computing dot product with the one-hot representation of the input for computational and memory efficiency
			tmp = W[x_t] + T.dot(ht_1, Wh) + b
			i = sigmoid(_slice(tmp, 0, dim))
			f = sigmoid(_slice(tmp, 1, dim))
			o = sigmoid(_slice(tmp, 2, dim))
			c = tanh(_slice(tmp, 3, dim))
			c_new = i * c + f * ct_1
			h_new = o * tanh(c_new)
			return c_new, h_new
开发者ID:aciccarelli,项目名称:DNN_Lab_UPF,代码行数:11,代码来源:char_rnn_lstm_fast.py


示例10: forward

def forward(network, x):
  W1, W2, W3 = network['W1'], network['W2'], network['W3']
  b1, b2, b3 = network['b1'], network['b2'], network['b3']

  a1 = np.dot(x, W1) + b1
  z1 = sigmoid(a1)
  a2 = np.dot(z1, W2) + b2
  z2 = sigmoid(a2)
  a3 = np.dot(z2, W3) + b3
  y = identity_function(a3)

  return y
开发者ID:hunering,项目名称:demo-code,代码行数:12,代码来源:3.6.py


示例11: get_reconstruction_cross_entropy

    def get_reconstruction_cross_entropy(self):
        pre_sigmoid_activation_h = numpy.dot(self.input, self.W) + self.hbias
        sigmoid_activation_h = sigmoid(pre_sigmoid_activation_h)
        
        pre_sigmoid_activation_v = numpy.dot(sigmoid_activation_h, self.W.T) + self.vbias
        sigmoid_activation_v = sigmoid(pre_sigmoid_activation_v)

        cross_entropy = -numpy.mean(
            numpy.sum(self.input * numpy.log(sigmoid_activation_v) + 
            (1 - self.input) * numpy.log(1 - sigmoid_activation_v),  axis=1))
        
        return cross_entropy
开发者ID:BinbinBian,项目名称:DeepLearning,代码行数:12,代码来源:RBM.py


示例12: _CD1

	def _CD1(self, visible_data, weights, visible_bias, hidden_bias):
		N = np.shape(visible_data)[0]
		# Positive phase
		visible_state = visible_data
		
		if self.visible_type == "SIGMOID" :
			visible_state = self._samplebinary(visible_state)
		elif self.visible_type == "LINEAR" :
			visible_state = self._add_gaussian_noise(visible_state);

		
		nw = np.dot(visible_state, weights) + np.tile(hidden_bias, (N, 1))
		if self.hidden_type == "SIGMOID":
			hidden_probability = u.sigmoid(nw) 
			hidden_state = self._samplebinary(hidden_probability)
		elif self.hidden_type == "LINEAR":
			hidden_state = self._add_gaussian_noise(nw)
			
		gradient1 = self._gradient_weights(visible_state, hidden_state, weights)
		visible_biases1 = self._gradient_biases(visible_state, visible_bias)
		hidden_biases1 = self._gradient_biases(hidden_state, hidden_bias)

		# Negative phase
		# Skip sampling as well...
		visible_state = np.dot(hidden_state, weights.T) + np.tile(visible_bias, (N, 1))
		
		if self.visible_type == "SIGMOID":
			visible_state = u.sigmoid(visible_state)
			#visible_probability = u.sigmoid(visible_state)
			#visible_state = self._samplebinary(visible_probability)
			
		# skip sampling here
		
		nw = np.dot(visible_state, weights) + np.tile(hidden_bias, (N, 1))
		if self.hidden_type == "SIGMOID":
			hidden_probability = hidden_probability = u.sigmoid(nw) 
			hidden_state = hidden_probability
		elif self.hidden_type == "LINEAR" :
			hidden_state = nw

		gradient2 = self._gradient_weights(visible_state, hidden_state, weights)
		visible_biases2 = self._gradient_biases(visible_state, visible_bias)
		hidden_biases2 = self._gradient_biases(hidden_state, hidden_bias)
		
		# gradients
		weights = gradient1 - gradient2;
		visible_biases = visible_biases1 - visible_biases2;
		hidden_biases= hidden_biases1 - hidden_biases2;

		return weights, visible_biases, hidden_biases
开发者ID:kuntzer,项目名称:pylae,代码行数:50,代码来源:RBM_cd1.py


示例13: train

def train(set_, dimension, lambda_):
    temp_w = np.zeros(dimension)
    w0 = 0.
    w = temp_w

    # print ("Lambda: " + str(lambda_))

    prev_error = 0.

    h = [0.5] * len(set_)
    current_error = calc_error(set_, w, lambda_, h, dimension)
    # num_iter = 0

    while abs(current_error - prev_error) > 0.001:
        delta_w0 = 0.
        delta_w = np.zeros(dimension)

        # print ("Current error: " + str(current_error))

        for i in range(len(set_)):
            h = utils.sigmoid(np.dot(set_[i][1], w) + w0)
            y = set_[i][0]

            delta_w0 += float(h) - y
            temp_x = (float(h) - y) * set_[i][1]
            delta_w = delta_w + temp_x

        n, error, w, w0 = line_search(set_, dimension, lambda_, w, w0,
            delta_w, delta_w0, current_error)

        # print ("Line search result: ")
        # print (str(w0) + " " + str(w))

        if n == 0:
            break

        # num_iter += 1

        prev_error = current_error
        current_error = error
        # print (current_error)

    # print ("Num iter: " + str(num_iter))
    # print ("params found: " + str(w0) + str(w))
    # print ("-------------------------------")
    # print()
    h = [float(utils.sigmoid(np.dot(tup[1], w) + w0)) 
        for tup in set_]
    return w, w0, calc_error(set_, w, 0, h, dimension)
开发者ID:darxsys,项目名称:ML,代码行数:49,代码来源:classifier.py


示例14: minibatch_update

    def minibatch_update(self,x,y,lr,regularization):
        n_sample = x.shape[0]
        info = x
        hidden_cache = []
        for i in xrange(self.n_hidden + 1):
            if i == self.n_hidden:
                probs = softmax(info.dot(self.W[i]) + self.b[i])
            else:
                info = sigmoid(info.dot(self.W[i]) + self.b[i])
                hidden_cache.append(info)
        loss = neg_log_likelihood(probs,y)
        probs[np.arange(n_sample),y] -= 1.0
        errors = probs
        for i in range(self.n_hidden,-1,-1):
            if i >= 1:
                hidden_out = hidden_cache[i - 1]
                grad_hidden_out = errors.dot(self.W[i].T)
                self.W[i] -= (lr * (hidden_out.T).dot(errors) + regularization * self.W[i])
                self.b[i] -= lr * np.sum(errors,axis = 0)
                errors = hidden_out * (1 - hidden_out) * grad_hidden_out
            else:
                hidden_out = x
                self.W[i] -= (lr * (hidden_out.T).dot(errors) + regularization * self.W[i])
                self.b[i] -= lr * np.sum(errors,axis = 0)

        return loss
开发者ID:situgongyuan,项目名称:AutoEncoder,代码行数:26,代码来源:StackAutoEncoder.py


示例15: loss

    def loss(self, *args):
        """
        Compute the logistic loss function 


        Inputs:
        - X: N x D array of data; each row is a data point.
        - y: 1-dimensional array of length N with real values.

        Returns: loss as a single float
        """
        theta,X,y = args
        m,dim = X.shape
        J = 0

        ##########################################################################
        # Compute the loss function for unregularized logistic regression        #
        # TODO: 1-2 lines of code expected                                       #
        ##########################################################################
        hx = utils.sigmoid(X.dot(theta))
        J = -1*(np.log(hx).T.dot(y)+(np.log(1-hx)).T.dot(1-y)) / m
        ###########################################################################
        #                           END OF YOUR CODE                              #
        ###########################################################################
        return J
开发者ID:atom-zju,项目名称:comp540_HW2,代码行数:25,代码来源:logistic_regressor.py


示例16: propup

    def propup(self, v):
        # stacking 2d convolutions here along depth dimension
        # https://github.com/lmjohns3/py-rbm/blob/master/lmj/rbm.py seems
        # to use 1-d convolutions, and I'm not sure is that's ok
        # not going to escape a couple of loops though

        # using theano's conventions:
        # h is 4d matrix (num_examples, num_feature_maps,
        # feature_map_height, feature_map_width)
        # one feature map kinda corresponds to one hidden unit
        # by the same convention, v is 4d matrix too: (num_examples,
        # num_images per example (1, or 3 for RGB), image_height,
        # image_widht)
        # the same format is for weights: (number of feature maps for visible
        # layer (1 or 3), number of feature maps for hidden layer,
        # filter height, filter width)

        num_examples = v.shape[0]
        activations = np.zeros(
            (
                num_examples,
                self.num_fm,
                self.img_height - self.fm_height + 1,
                self.img_width - self.fm_width + 1
            )
        )
        for i in xrange(num_examples):
            for j in xrange(self.num_fm):
                activations[i, j, :, :] = convolve2d(v[i, 0, :, :], self.w[0, j, ::-1, ::-1], mode='valid')
        return sigmoid(activations + self.b_hid[None, :, None, None])
开发者ID:ahmedassal,项目名称:ml-playground,代码行数:30,代码来源:convolutional.py


示例17: think

	def think(self, inputs):
		cur = inputs
		states = [cur]
		for syn in self.synapses:
			cur = utils.sigmoid(np.dot(cur, syn))
			states.append(cur)
		return states
开发者ID:n6g7,项目名称:annath,代码行数:7,代码来源:neuralnet.py


示例18: logistic

def logistic(weights, data, targets, hyperparameters):
    """
    Calculate negative log likelihood and its derivatives with respect to weights.
    Also return the predictions.

    Note: N is the number of examples and 
          M is the number of features per example.

    Inputs:
        weights:    (M+1) x 1 vector of weights, where the last element
                    corresponds to bias (intercepts).
        data:       N x M data matrix where each row corresponds 
                    to one data point.
        targets:    N x 1 vector of binary targets. Values should be either 0 or 1.
        hyperparameters: The hyperparameters dictionary.

    Outputs:
        f:       The sum of the loss over all data points. This is the objective that we want to minimize.
        df:      (M+1) x 1 vector of derivative of f w.r.t. weights.
        y:       N x 1 vector of probabilities.
    """
    
    t = np.transpose(np.repeat(np.reshape(weights[:-1], (len(weights)-1, 1)), len(data), axis = 1))
    f_e = data * t
    z_sums = np.sum(f_e, axis=1)
    y = sigmoid(z_sums +weights[-1])
    f = np.sum(np.log(1 + np.exp(-z_sums - weights[-1])) + (1 - np.transpose(targets)) * (z_sums + weights[-1]))
    df = np.sum(data * np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0)
    df = np.append(df, np.sum(np.transpose(((-np.exp(-z_sums - weights[-1]) / (1 + np.exp(-z_sums - weights[-1]))) + (1 - np.transpose(targets)))), axis = 0))
    df = np.reshape(df, ((len(df), 1)))

    return f, df, np.reshape(y, (len(y), 1))
开发者ID:DevonWelch,项目名称:School,代码行数:32,代码来源:logistic.py


示例19: feed_forward

 def feed_forward(self, train_input):
     self.layers[0].input = train_input
     self.layers[0].output = train_input
     for i in xrange(len(self.layers) - 1):
         self.layers[i + 1].input = (self.weights[i].transpose() * self.layers[i].output) + self.bias[i]
         self.layers[i + 1].output = sigmoid(self.layers[i + 1].input)
     return self.layers[-1].output
开发者ID:sharare90,项目名称:master,代码行数:7,代码来源:models.py


示例20: loss

    def loss(self, *args):
        """
        Compute the logistic loss function 


        Inputs:
        - X: N x D array of data; each row is a data point.
        - y: 1-dimensional array of length N with real values.

        Returns: loss as a single float
        """
        theta,X,y = args
        m,dim = X.shape
        J = 0

        ##########################################################################
        # Compute the loss function for unregularized logistic regression        #
        # TODO: 1-2 lines of code expected                                       #
        ##########################################################################
        J = 1. / m * sum([-y[i] * np.log(utils.sigmoid(theta.dot(X[i]))) - (1 - y[i]) * np.log(1 - utils.sigmoid(theta.dot(X[i]))) for i in xrange(m)])

        ###########################################################################
        #                           END OF YOUR CODE                              #
        ###########################################################################
        return J
开发者ID:phrayezzen,项目名称:COMP540,代码行数:25,代码来源:logistic_regressor.py



注:本文中的utils.sigmoid函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.siteInfo函数代码示例发布时间:2022-05-26
下一篇:
Python utils.show函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap