• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Golang mat64.DenseCopyOf函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Golang中github.com/gonum/matrix/mat64.DenseCopyOf函数的典型用法代码示例。如果您正苦于以下问题:Golang DenseCopyOf函数的具体用法?Golang DenseCopyOf怎么用?Golang DenseCopyOf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了DenseCopyOf函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。

示例1: duplicateSubtasks

func (this *TaskGraphStructure) duplicateSubtasks(father *Task, node string, instance TaskInstance) *TaskGraphStructure {
	taskStructure := NewTaskGraphStructure()
	// Get all the tasks with origin father.Name
	myindex := 0
	// Define a new origin composed of the Id
	for _, task := range this.Tasks {
		if father.Father == task.OriginId && task.Id != father.Id {
			// task match, create a new task with the same informations...
			newTask := NewTask()
			newTask.Id = myindex
			newTask.Name = task.Name
			newTask.Node = node
			newTask.Father = task.Id
			newTask.OriginId = father.Id
			newTask.Origin = father.Name
			newTask.Module = instance.Module
			newTask.Args = instance.Args
			newTask.Debug = "duplicateSubtasks"
			// ... Add it to the structure...
			taskStructure.Tasks[myindex] = newTask
			// ... Extract the matrix associated
			taskStructure.AdjacencyMatrix = mat64.DenseCopyOf(taskStructure.AdjacencyMatrix.Grow(1, 1))
			taskStructure.DegreeMatrix = mat64.DenseCopyOf(taskStructure.DegreeMatrix.Grow(1, 1))
			myindex += 1
			// And add it to the structure as well
		}
	}
	row, col := taskStructure.AdjacencyMatrix.Dims()
	for r := 0; r < row; r++ {
		for c := 0; c < col; c++ {
			taskStructure.AdjacencyMatrix.Set(r, c, this.AdjacencyMatrix.At(taskStructure.Tasks[r].Father, taskStructure.Tasks[c].Father))
		}
	}
	return taskStructure
}
开发者ID:jmptrader,项目名称:gautomator,代码行数:35,代码来源:taskhandler.go


示例2: BatchGradientDescent

// Batch gradient descent finds the local minimum of a function.
// See http://en.wikipedia.org/wiki/Gradient_descent for more details.
func BatchGradientDescent(x, y, theta *mat64.Dense, alpha float64, epoch int) *mat64.Dense {
	m, _ := y.Dims()
	for i := 0; i < epoch; i++ {
		xFlat := mat64.DenseCopyOf(x)
		xFlat.TCopy(xFlat)
		temp := mat64.DenseCopyOf(x)

		// Calculate our best prediction, given theta
		temp.Mul(temp, theta)

		// Calculate our error from the real values
		temp.Sub(temp, y)
		xFlat.Mul(xFlat, temp)

		// Temporary hack to get around the fact there is no scalar division in mat64
		xFlatRow, _ := xFlat.Dims()
		gradient := make([]float64, 0)
		for k := 0; k < xFlatRow; k++ {
			row := xFlat.RowView(k)
			for v := range row {
				divd := row[v] / float64(m) * alpha
				gradient = append(gradient, divd)
			}
		}
		grows := len(gradient)
		grad := mat64.NewDense(grows, 1, gradient)
		theta.Sub(theta, grad)
	}
	return theta
}
开发者ID:24hours,项目名称:golearn,代码行数:32,代码来源:gradient_descent.go


示例3: StochasticGradientDescent

// Stochastic gradient descent updates the parameters of theta on a random row selection from a matrix.
// It is faster as it does not compute the cost function over the entire dataset every time.
// It instead calculates the error parameters over only one row of the dataset at a time.
// In return, there is a trade off for accuracy. This is minimised by running multiple SGD processes
// (the number of goroutines spawned is specified by the procs variable) in parallel and taking an average of the result.
func StochasticGradientDescent(x, y, theta *mat64.Dense, alpha float64, epoch, procs int) *mat64.Dense {
	m, _ := y.Dims()
	resultPipe := make(chan *mat64.Dense)
	results := make([]*mat64.Dense, 0)

	for p := 0; p < procs; p++ {
		go func() {
			// Is this just a pointer to theta?
			thetaCopy := mat64.DenseCopyOf(theta)
			for i := 0; i < epoch; i++ {
				for k := 0; k < m; k++ {
					datXtemp := x.RowView(k)
					datYtemp := y.RowView(k)
					datX := mat64.NewDense(1, len(datXtemp), datXtemp)
					datY := mat64.NewDense(1, 1, datYtemp)
					datXFlat := mat64.DenseCopyOf(datX)
					datXFlat.TCopy(datXFlat)
					datX.Mul(datX, thetaCopy)
					datX.Sub(datX, datY)
					datXFlat.Mul(datXFlat, datX)

					// Horrible hack to get around the fact there is no elementwise division in mat64
					xFlatRow, _ := datXFlat.Dims()
					gradient := make([]float64, 0)
					for i := 0; i < xFlatRow; i++ {
						row := datXFlat.RowView(i)
						for i := range row {
							divd := row[i] / float64(m) * alpha
							gradient = append(gradient, divd)
						}
					}
					grows := len(gradient)
					grad := mat64.NewDense(grows, 1, gradient)
					thetaCopy.Sub(thetaCopy, grad)
				}

			}
			resultPipe <- thetaCopy
		}()
	}

	for {
		select {
		case d := <-resultPipe:
			results = append(results, d)
			if len(results) == procs {
				return averageTheta(results)
			}
		}
	}
}
开发者ID:24hours,项目名称:golearn,代码行数:56,代码来源:gradient_descent.go


示例4: AugmentTaskStructure

// Returns a combination of the current structure
// and the one passed as argument
func (this *TaskGraphStructure) AugmentTaskStructure(taskStructure *TaskGraphStructure) *TaskGraphStructure {
	// merging adjacency matrix
	initialRowLen, initialColLen := this.AdjacencyMatrix.Dims()
	addedRowLen, addedColLen := taskStructure.AdjacencyMatrix.Dims()
	this.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(addedRowLen, addedColLen))
	//a, b := this.AdjacencyMatrix.Dims()
	for r := 0; r < initialRowLen+addedRowLen; r++ {
		for c := 0; c < initialColLen+addedColLen; c++ {
			switch {
			case r < initialRowLen && c < initialColLen:
				// If we are in the original matrix: do nothing
			case r < initialRowLen && c > initialColLen:
				// If outside, put some zero
				this.AdjacencyMatrix.Set(r, c, float64(0))
			case r > initialRowLen && c < initialColLen:
				// If outside, put some zero
				this.AdjacencyMatrix.Set(r, c, float64(0))
			case r >= initialRowLen && c >= initialColLen:
				// Add the new matrix
				this.AdjacencyMatrix.Set(r, c, taskStructure.AdjacencyMatrix.At(r-initialRowLen, c-initialColLen))
			}
		}
	}
	// merging degree matrix
	initialRowLen, initialColLen = this.DegreeMatrix.Dims()
	addedRowLen, addedColLen = taskStructure.DegreeMatrix.Dims()
	this.DegreeMatrix = mat64.DenseCopyOf(this.DegreeMatrix.Grow(addedRowLen, addedColLen))
	for r := 0; r < initialRowLen+addedRowLen; r++ {
		for c := 0; c < initialColLen+addedColLen; c++ {
			switch {
			case r < initialRowLen && c < initialColLen:
				// If we are in the original matrix: do nothing
			case r < initialRowLen && c > initialColLen:
				// If outside, set zero
				this.DegreeMatrix.Set(r, c, float64(0))
			case r > initialRowLen && c < initialColLen:
				// If outside, set zero
				this.DegreeMatrix.Set(r, c, float64(0))
			case r >= initialRowLen && c >= initialColLen:
				// Add the new matrix
				this.DegreeMatrix.Set(r, c, taskStructure.DegreeMatrix.At(r-initialRowLen, c-initialColLen))
			}
		}
	}
	actualSize := len(this.Tasks)
	for i, task := range taskStructure.Tasks {
		task.Id = actualSize + i
		this.Tasks[actualSize+i] = task
	}
	return this
}
开发者ID:jmptrader,项目名称:gautomator,代码行数:53,代码来源:taskhandler.go


示例5: AddNode

func (this *TaskGraphStructure) AddNode(parentGraph string, name string, attrs map[string]string) {
	for _, taskObject := range this.Tasks {
		if taskObject != nil && taskObject.Name == name {
			return
		}
	}
	id := len(this.Tasks)
	taskObject := NewTask()
	taskObject.Name = name
	taskObject.Id = id
	taskObject.Origin = parentGraph
	this.Tasks[id] = taskObject
	this.DegreeMatrix = mat64.DenseCopyOf(this.DegreeMatrix.Grow(1, 1))
	this.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(1, 1))
}
开发者ID:jmptrader,项目名称:gautomator,代码行数:15,代码来源:taskparser.go


示例6: AddPortEdge

func (this *TaskGraphStructure) AddPortEdge(src, srcPort, dst, dstPort string, directed bool, attrs map[string]string) {
	lastIndex := len(this.Tasks)
	// Find the index of the task src and the index of the task dst
	srcTaskId := -1
	dstTaskId := -1
	increment := 0 // The number of new lines and cols needed
	for taskId, taskObject := range this.Tasks {
		if taskObject != nil {
			// If the task exists, add src as a dependency
			if taskObject.Name == dst {
				dstTaskId = taskId
			}
			if taskObject.Name == src {
				srcTaskId = taskId
			}
		}
	}
	// If the task does not exists, create it and add it to the structure
	if srcTaskId == -1 {
		taskObject := NewTask()
		taskObject.Name = src
		this.Tasks[lastIndex] = taskObject
		increment += 1
		srcTaskId = lastIndex
		taskObject.Id = srcTaskId
		lastIndex += 1
	}
	// If the task does not exists, create it and add it to the structure
	if dstTaskId == -1 {
		taskObject := NewTask()
		taskObject.Name = dst
		this.Tasks[lastIndex] = taskObject
		increment += 1
		dstTaskId = lastIndex
		taskObject.Id = dstTaskId
		lastIndex += 1
	}
	// If the size of the increment is not null
	// use Grow...
	if increment > 0 {
		this.DegreeMatrix = mat64.DenseCopyOf(this.DegreeMatrix.Grow(increment, increment))
		this.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(increment, increment))
	}
	// Now fill the matrix
	this.DegreeMatrix.Set(dstTaskId, dstTaskId, this.DegreeMatrix.At(dstTaskId, dstTaskId)+1)
	this.DegreeMatrix.Set(srcTaskId, srcTaskId, this.DegreeMatrix.At(srcTaskId, srcTaskId)+1)
	this.AdjacencyMatrix.Set(srcTaskId, dstTaskId, 1)
}
开发者ID:jmptrader,项目名称:gautomator,代码行数:48,代码来源:taskparser.go


示例7: VarianceCovarianceMatrix

// Calculates the variance-covariance matrix of the regression coefficients
// defined as sigma*(XtX)-1
// Using QR decomposition: X = QR
// ((QR)tQR)-1 ---> (RtQtQR)-1 ---> (RtR)-1 ---> R-1Rt-1 --> sigma*R-1Rt-1
//
func (o *OLS) VarianceCovarianceMatrix() *mat64.Dense {
	x := mat64.DenseCopyOf(o.x.data)
	_, p := x.Dims()

	// it's easier to do things with X = QR
	qrFactor := mat64.QR(x)
	R := qrFactor.R()
	Rt := R.T()

	RtInv, err := mat64.Inverse(Rt)
	if err != nil {
		panic("Rt is not invertible")
	}

	Rinverse, err := mat64.Inverse(R)
	if err != nil {
		panic("R matrix is not invertible")
	}

	varCov := mat64.NewDense(p, p, nil)
	varCov.Mul(Rinverse, RtInv)

	// multiple each element by the mse
	mse := o.MeanSquaredError()
	mulEach := func(r, c int, v float64) float64 { return v * mse }
	varCov.Apply(mulEach, varCov)

	return varCov
}
开发者ID:timkaye11,项目名称:glasso,代码行数:34,代码来源:diagnostics.go


示例8: VarianceInflationFactors

// A simple approach to identify collinearity among explanatory variables is the use of variance inflation factors (VIF).
// VIF calculations are straightforward and easily comprehensible; the higher the value, the higher the collinearity
// A VIF for a single explanatory variable is obtained using the r-squared value of the regression of that
// variable against all other explanatory variables:
//
// VIF_{j} = \frac{1}{1 - R_{j}^2}
//
func (o *OLS) VarianceInflationFactors() []float64 {
	// save a copy of the data
	orig := mat64.DenseCopyOf(o.x.data)

	m := NewOLS(DfFromMat(orig))

	n, p := orig.Dims()

	vifs := make([]float64, p)

	for idx := 0; idx < p; idx++ {
		x := o.x.data

		col := x.Col(nil, idx)

		x.SetCol(idx, rep(1.0, n))

		err := m.Train(col)
		if err != nil {
			panic("Error Occured calculating VIF")
		}

		vifs[idx] = 1.0 / (1.0 - m.RSquared())
	}

	// reset the data
	o.x.data = orig

	return vifs
}
开发者ID:timkaye11,项目名称:glasso,代码行数:37,代码来源:diagnostics.go


示例9: F_Test

// The F statistic measures the change in residual sum-of-squares per
// additional parameter in the bigger model, and it is normalized by an estimate of sigma2
//
//
func (o *OLS) F_Test(toRemove ...int) (fval, pval float64) {
	if len(toRemove) > (o.p - 1) {
		panic("Too many columns to remove")
	}

	data := mat64.DenseCopyOf(o.x.data)
	for _, col := range toRemove {
		data = removeCol(data, col)
	}

	ols := NewOLS(DfFromMat(data))

	err := ols.Train(o.response)
	if err != nil {
		panic(err)
	}

	d1 := float64(o.p - ols.p)
	d2 := float64(o.n - o.p)

	f := (ols.ResidualSumofSquares() - o.ResidualSumofSquares()) / d1
	f /= o.ResidualSumofSquares() / d2

	Fdist := stat.F_CDF(d1, d2)
	p := 1 - Fdist(f)

	return f, p
}
开发者ID:timkaye11,项目名称:glasso,代码行数:32,代码来源:diagnostics.go


示例10: AppendCol

func (df *DataFrame) AppendCol(newCol []float64) {

	df.data = mat64.DenseCopyOf(df.data.Grow(0, 1))
	df.rows, df.cols = df.data.Dims()

	df.data.SetCol(df.cols-1, newCol)
	return
}
开发者ID:timkaye11,项目名称:glasso,代码行数:8,代码来源:df.go


示例11: AppendRow

func (df *DataFrame) AppendRow(newRow []float64) {

	df.data = mat64.DenseCopyOf(df.data.Grow(1, 0))
	df.rows, df.cols = df.data.Dims()

	df.data.SetRow(df.rows-1, newRow)
	return
}
开发者ID:timkaye11,项目名称:glasso,代码行数:8,代码来源:df.go


示例12: Forward

// Forward propagate the examples through the network.
func (m *Mind) Forward(in *mat64.Dense) {
	input := mat64.DenseCopyOf(in)
	m.Results.HiddenSum = mat64.NewDense(1, 1, nil)

	ir, ic := input.Dims()
	or, oc := m.Weights.InputHidden.Dims()
	log.Println("input dims(r,c):", ir, ic)
	log.Println("InputHidden dims(r,c):", or, oc)

	input.Product(m.Weights.InputHidden)
	m.Results.HiddenSum = mat64.DenseCopyOf(input)
	m.Results.HiddenResult = m.Activate(m.Results.HiddenSum)
	//m.Results.OutputSum = mat64.NewDense(1, 1, nil)
	m.Results.HiddenResult.Product(m.Weights.HiddenOutput)
	m.Results.OutputSum = mat64.DenseCopyOf(m.Results.HiddenResult)
	m.Results.OutputResult = m.Activate(m.Results.OutputSum)
}
开发者ID:vibbix,项目名称:go-mind,代码行数:18,代码来源:mind.go


示例13: Error

// Error computes the back-propagation error from a given size * 1 output
// vector and a size * 1 error vector for a given number of iterations.
//
// outArg should be the response from Activate.
//
// errArg should be the difference between the output neuron's output and
// that expected, and should be zero everywhere else.
//
// If the network is conceptually organised into n layers, maxIterations
// should be set to n.
func (n *Network) Error(outArg, errArg *mat64.Dense, maxIterations int) *mat64.Dense {

	// Copy the arguments
	out := mat64.DenseCopyOf(outArg)
	err := mat64.DenseCopyOf(errArg)

	// err should be the difference between observed and expected
	// for observation nodes only (everything else should be zero)

	// Allocate output vector
	outRows, outCols := out.Dims()
	if outCols != 1 {
		panic("Unsupported output size")
	}

	ret := mat64.NewDense(outRows, 1, make([]float64, outRows))

	// Do differential calculation
	diffFunc := func(r, c int, v float64) float64 {
		return n.funcs[r].Backward(v)
	}
	out.Apply(diffFunc, out)

	// Transpose weights matrix
	reverseWeights := mat64.DenseCopyOf(n.weights)
	reverseWeights.TCopy(n.weights)

	// We only need a certain number of passes
	for i := 0; i < maxIterations; i++ {

		// Element-wise multiply errors and derivatives
		err.MulElem(err, out)

		// Add the accumulated error
		ret.Add(ret, err)

		if i != maxIterations-1 {
			// Feed the errors backwards through the network
			err.Mul(reverseWeights, err)
		}
	}

	return ret

}
开发者ID:nickpoorman,项目名称:golearn,代码行数:55,代码来源:network.go


示例14: LinearLeastSquares

// LinearLeastSquares computes the least squares fit for the function
//
//   f(x) = ╬њРѓђtermsРѓђ(x) + ╬њРѓЂtermsРѓЂ(x) + ...
//
// to the data (xs[i], ys[i]). It returns the parameters ╬њРѓђ, ╬њРѓЂ, ...
// that minimize the sum of the squares of the residuals of f:
//
//   РѕЉ (ys[i] - f(xs[i]))┬▓
//
// If weights is non-nil, it is used to weight these residuals:
//
//   РѕЉ weights[i] ├Ќ (ys[i] - f(xs[i]))┬▓
//
// The function f is specified by one Go function for each linear
// term. For efficiency, the Go function is vectorized: it will be
// passed a slice of x values in xs and must fill the slice termOut
// with the value of the term for each value in xs.
func LinearLeastSquares(xs, ys, weights []float64, terms ...func(xs, termOut []float64)) (params []float64) {
	// The optimal parameters are found by solving for ╬њ╠ѓ in the
	// "normal equations":
	//
	//    (­ЮљЌрхђ­Юљќ­ЮљЌ)╬њ╠ѓ = ­ЮљЌрхђ­Юљќ­Юљ▓
	//
	// where ­Юљќ is a diagonal weight matrix (or the identity matrix
	// for the unweighted case).

	// TODO: Consider using orthogonal decomposition.

	if len(xs) != len(ys) {
		panic("len(xs) != len(ys)")
	}
	if weights != nil && len(xs) != len(weights) {
		panic("len(xs) != len(weights")
	}

	// Construct ­ЮљЌрхђ. This is the more convenient representation
	// for efficiently calling the term functions.
	xTVals := make([]float64, len(terms)*len(xs))
	for i, term := range terms {
		term(xs, xTVals[i*len(xs):i*len(xs)+len(xs)])
	}
	XT := mat64.NewDense(len(terms), len(xs), xTVals)
	X := XT.T()

	// Construct ­ЮљЌрхђ­Юљќ.
	var XTW *mat64.Dense
	if weights == nil {
		// ­Юљќ is the identity matrix.
		XTW = XT
	} else {
		// Since ­Юљќ is a diagonal matrix, we do this directly.
		XTW = mat64.DenseCopyOf(XT)
		WDiag := mat64.NewVector(len(weights), weights)
		for row := 0; row < len(terms); row++ {
			rowView := XTW.RowView(row)
			rowView.MulElemVec(rowView, WDiag)
		}
	}

	// Construct ­Юљ▓.
	y := mat64.NewVector(len(ys), ys)

	// Compute ╬њ╠ѓ.
	lhs := mat64.NewDense(len(terms), len(terms), nil)
	lhs.Mul(XTW, X)

	rhs := mat64.NewVector(len(terms), nil)
	rhs.MulVec(XTW, y)

	BVals := make([]float64, len(terms))
	B := mat64.NewVector(len(terms), BVals)
	B.SolveVec(lhs, rhs)
	return BVals
}
开发者ID:rsc,项目名称:go-misc,代码行数:74,代码来源:lsquares.go


示例15: EigenWrap

//gnEigen wraps the matrix.DenseMatrix.Eigen() function in order to guarantee
//That the eigenvectors and eigenvalues are sorted according to the eigenvalues
//It also guarantees orthonormality and handness. I don't know how many of
//these are already guaranteed by Eig(). Will delete the unneeded parts
//And even this whole function when sure. The main reason for this function
//Is the compatibiliy with go.matrix. This function should dissapear when we
//have a pure Go blas.
func EigenWrap(in *Matrix, epsilon float64) (*Matrix, []float64, error) {
	if epsilon < 0 {
		epsilon = appzero
	}
	efacs := mat64.Eigen(mat64.DenseCopyOf(in.Dense), epsilon)
	evecsprev := &Matrix{efacs.V}
	evalsmat := efacs.D()
	d, _ := evalsmat.Dims()
	evals := make([]float64, d, d)
	for k, _ := range evals {
		evals[k] = evalsmat.At(k, k)
	}
	evecs := Zeros(3)
	fn := func() { evecs.Copy(evecsprev.T()) }
	err := mat64.Maybe(fn)
	if err != nil {
		return nil, nil, Error{err.Error(), []string{"mat64.Copy/math64.T", "EigenWrap"}, true}

	}
	//evecs.TCopy(evecs.Dense)
	eig := eigenpair{evecs, evals[:]}
	sort.Sort(eig)
	//Here I should orthonormalize vectors if needed instead of just complaining.
	//I think orthonormality is guaranteed by  DenseMatrix.Eig() If it is, Ill delete all this
	//If not I'll add ortonormalization routines.
	eigrows, _ := eig.evecs.Dims()
	for i := 0; i < eigrows; i++ {
		vectori := eig.evecs.VecView(i)
		for j := i + 1; j < eigrows; j++ {
			vectorj := eig.evecs.VecView(j)
			if math.Abs(vectori.Dot(vectorj)) > epsilon && i != j {
				reterr := Error{fmt.Sprintln("Eigenvectors ", i, "and", j, " not orthogonal. v", i, ":", vectori, "\nv", j, ":", vectorj, "\nDot:", math.Abs(vectori.Dot(vectorj)), "eigmatrix:", eig.evecs), []string{"EigenWrap"}, true}
				return eig.evecs, evals[:], reterr
			}
		}
		if math.Abs(vectori.Norm(0)-1) > epsilon {
			//Of course I could just normalize the vectors instead of complaining.
			//err= fmt.Errorf("Vectors not normalized %s",err.Error())

		}
	}
	//Checking and fixing the handness of the matrix.This if-else is Jannes idea,
	//I don't really know whether it works.
	//	eig.evecs.TCopy(eig.evecs)
	if det(eig.evecs) < 0 { //Right now, this will fail if the matrix is not 3x3 (30/10/2013)
		eig.evecs.Scale(-1, eig.evecs) //SSC
	} else {
		/*
			eig.evecs.TransposeInPlace()
			eig.evecs.ScaleRow(0,-1)
			eig.evecs.ScaleRow(2,-1)
			eig.evecs.TransposeInPlace()
		*/
	}
	//	eig.evecs.TCopy(eig.evecs)
	return eig.evecs, eig.evals, nil //Returns a slice of evals
}
开发者ID:rmera,项目名称:gochem,代码行数:64,代码来源:gonum.go


示例16: Train

func (o *OLS) Train(yvector []float64) error {
	// sanity check
	if len(yvector) != o.n {
		return DimensionError
	}

	copy(o.response, yvector)
	y := mat64.NewDense(len(yvector), 1, yvector)

	o.x.PushCol(rep(1.0, o.x.rows))
	x := o.x.data

	// it's easier to do things with X = QR
	qrFactor := mat64.QR(mat64.DenseCopyOf(x))
	Q := qrFactor.Q()

	betas := qrFactor.Solve(mat64.DenseCopyOf(y))
	o.betas = betas.Col(nil, 0)
	if len(o.betas) != o.p {
		log.Printf("Unexpected dimension error. Betas: %v", o.betas)
	}

	// calculate residuals and fitted vals
	/*
		fitted := &mat64.Dense{}
		fitted.Mul(x, betas)
		o.fitted = fitted.Col(nil, 0)
		y.Sub(y, fitted)
		o.residuals = y.Col(nil, 0)
	*/

	// y_hat = Q Qt y
	// e = y - y_hat
	qqt := &mat64.Dense{}
	qqt.Mul(Q, Q.T())
	yhat := &mat64.Dense{}
	yhat.Mul(qqt, y)
	o.fitted = yhat.Col(nil, 0)
	y.Sub(y, yhat)
	o.residuals = y.Col(nil, 0)

	return nil
}
开发者ID:timkaye11,项目名称:glasso,代码行数:43,代码来源:ols.go


示例17: norm

func (em EM) norm(x []float64, j int) float64 {
	xMat := mat64.NewDense(1, len(x), x)
	muMat := mat64.NewDense(1, len(em.mu[j]), em.mu[j])
	first := mat64.NewDense(1, len(em.mu[j]), nil)
	first.Sub(xMat, muMat)
	second := mat64.DenseCopyOf(first.T())
	resultMat := mat64.NewDense(1, 1, nil)
	resultMat.Mul(first, second)
	var jisuu = 0.5 * float64(em.d)
	return math.Exp(resultMat.At(0, 0)/(-2.0)/(em.sigma[j]*em.sigma[j])) / math.Pow(2*math.Pi*em.sigma[j]*em.sigma[j], jisuu)
}
开发者ID:6br,项目名称:goem,代码行数:11,代码来源:goem.go


示例18: Back

// Back propagate the error and update the weights.
func (m *Mind) Back(input *mat64.Dense, output *mat64.Dense) {
	ErrorOutputLayer := mat64.NewDense(1, 1, nil)
	ErrorOutputLayer.Sub(output, m.Results.OutputResult)
	DeltaOutputLayer := m.ActivatePrime(m.Results.OutputSum)
	DeltaOutputLayer.MulElem(DeltaOutputLayer, ErrorOutputLayer)

	HiddenOutputChanges := mat64.DenseCopyOf(m.Results.HiddenResult.T())
	HiddenOutputChanges.Product(DeltaOutputLayer)
	HiddenOutputChanges.Scale(m.LearningRate, HiddenOutputChanges)
	m.Weights.HiddenOutput.Add(m.Weights.HiddenOutput, HiddenOutputChanges)

	DeltaHiddenLayer := mat64.DenseCopyOf(DeltaOutputLayer)
	DeltaHiddenLayer.Product(DeltaOutputLayer, m.Weights.HiddenOutput.T())
	DeltaHiddenLayer.MulElem(DeltaHiddenLayer, m.ActivatePrime(m.Results.HiddenSum))

	InputHiddenChanges := mat64.DenseCopyOf(input.T())
	InputHiddenChanges.Product(DeltaHiddenLayer)
	InputHiddenChanges.Scale(m.LearningRate, InputHiddenChanges)
	m.Weights.InputHidden.Add(m.Weights.InputHidden, InputHiddenChanges)
}
开发者ID:vibbix,项目名称:go-mind,代码行数:21,代码来源:mind.go


示例19: BenchmarkCovToCorr

func BenchmarkCovToCorr(b *testing.B) {
	// generate a 10x10 covariance matrix
	m := randMat(small, small)
	c := CovarianceMatrix(nil, m, nil)
	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		b.StopTimer()
		cc := mat64.DenseCopyOf(c)
		b.StartTimer()
		covToCorr(cc)
	}
}
开发者ID:shazow,项目名称:stat,代码行数:12,代码来源:covariancematrix_test.go


示例20: instanciate

// Duplicate the task passed as argument, and returns the new task
func (this *TaskGraphStructure) instanciate(instance TaskInstance) []*Task {
	returnTasks := make([]*Task, 0)
	// First duplicate the tasks with same name
	for _, task := range this.Tasks {
		if task.Name == instance.Taskname {
			for _, node := range instance.Hosts {
				switch {
				case task.Father == FATHER:
					// Then duplicate
					log.Printf("Duplicating %v on node %v", task.Name, node)
					row, col := this.AdjacencyMatrix.Dims()
					newId := row
					newTask := NewTask()
					newTask.Father = task.Id
					newTask.OriginId = task.Id
					newTask.Id = newId
					newTask.Name = task.Name
					if task.Module != "meta" {
						newTask.Module = instance.Module
					}
					newTask.Origin = task.Origin
					newTask.Node = node // Set the node to the new one
					newTask.Args = instance.Args
					this.Tasks[newId] = newTask
					returnTasks = append(returnTasks, newTask)
					this.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(1, 1))
					for r := 0; r < row; r++ {
						for c := 0; c < col; c++ {
							if this.Tasks[r].Origin != instance.Taskname {
								this.AdjacencyMatrix.Set(r, newId, this.AdjacencyMatrix.At(r, task.Id))
							}
							if this.Tasks[c].Origin != instance.Taskname {
								this.AdjacencyMatrix.Set(newId, c, this.AdjacencyMatrix.At(task.Id, c))
							}
						}
					}
					this = this.AugmentTaskStructure(this.duplicateSubtasks(newTask, node, instance))
				case task.Father == ORPHAN:
					// Do not duplicate, simply adapt
					task.Debug = "instanciate/ORPHAN"
					task.Node = node
					task.Module = instance.Module
					task.Args = instance.Args
					this.adaptSubtask(task, node, instance)
					task.Father = FATHER
				}
				// Then duplicate the tasks with same Father
			}
		}
	}
	return returnTasks
}
开发者ID:jmptrader,项目名称:gautomator,代码行数:53,代码来源:taskhandler.go



注:本文中的github.com/gonum/matrix/mat64.DenseCopyOf函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Golang mat64.NewDense函数代码示例发布时间:2022-05-23
下一篇:
Golang mat64.Col函数代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap