本文整理汇总了Golang中github.com/gonum/matrix/mat64.NewVector函数的典型用法代码示例。如果您正苦于以下问题:Golang NewVector函数的具体用法?Golang NewVector怎么用?Golang NewVector使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewVector函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: InitDirection
func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
dim := len(loc.X)
b.dim = dim
b.x = resize(b.x, dim)
copy(b.x, loc.X)
b.grad = resize(b.grad, dim)
copy(b.grad, loc.Gradient)
b.y = resize(b.y, dim)
b.s = resize(b.s, dim)
b.tmp = resize(b.tmp, dim)
b.yVec = mat64.NewVector(dim, b.y)
b.sVec = mat64.NewVector(dim, b.s)
b.tmpVec = mat64.NewVector(dim, b.tmp)
if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim {
b.invHess = mat64.NewSymDense(dim, nil)
} else {
b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim])
}
// The values of the hessian are initialized in the first call to NextDirection
// initial direcion is just negative of gradient because the hessian is 1
copy(dir, loc.Gradient)
floats.Scale(-1, dir)
b.first = true
return 1 / floats.Norm(dir, 2)
}
开发者ID:jacobxk,项目名称:optimize,代码行数:32,代码来源:bfgs.go
示例2: GradientDescent
func GradientDescent(X *mat64.Dense, y *mat64.Vector, alpha, tolerance float64, maxIters int) *mat64.Vector {
// m = Number of Training Examples
// n = Number of Features
m, n := X.Dims()
h := mat64.NewVector(m, nil)
partials := mat64.NewVector(n, nil)
new_theta := mat64.NewVector(n, nil)
Regression:
for i := 0; i < maxIters; i++ {
// Calculate partial derivatives
h.MulVec(X, new_theta)
for el := 0; el < m; el++ {
val := (h.At(el, 0) - y.At(el, 0)) / float64(m)
h.SetVec(el, val)
}
partials.MulVec(X.T(), h)
// Update theta values
for el := 0; el < n; el++ {
new_val := new_theta.At(el, 0) - (alpha * partials.At(el, 0))
new_theta.SetVec(el, new_val)
}
// Check the "distance" to the local minumum
dist := math.Sqrt(mat64.Dot(partials, partials))
if dist <= tolerance {
break Regression
}
}
return new_theta
}
开发者ID:erubboli,项目名称:mlt,代码行数:33,代码来源:gradient_descent.go
示例3: Init
func (bicg *BiCG) Init(ctx *Context) Operation {
if bicg.BreakdownTolerance == 0 {
bicg.BreakdownTolerance = 1e-6
}
bicg.rho = math.NaN()
dim := ctx.X.Len()
if ctx.P == nil || ctx.P.Len() != dim {
ctx.P = mat64.NewVector(dim, nil)
}
if ctx.Ap == nil || ctx.Ap.Len() != dim {
ctx.Ap = mat64.NewVector(dim, nil)
}
if ctx.Q == nil || ctx.Q.Len() != dim {
ctx.Q = mat64.NewVector(dim, nil)
}
if ctx.Aq == nil || ctx.Aq.Len() != dim {
ctx.Aq = mat64.NewVector(dim, nil)
}
if ctx.Z == nil || ctx.Z.Len() != dim {
ctx.Z = mat64.NewVector(dim, nil)
}
bicg.resume = 2
return SolvePreconditioner
// Solve M z = r_{i-1}
}
开发者ID:vladimir-ch,项目名称:sparse,代码行数:27,代码来源:bicg.go
示例4: Fit
func (lr *LinearRegression) Fit() {
h := *mat64.NewVector(lr.m, nil)
partials := mat64.NewVector(lr.n, nil)
alpha_m := lr.alpha / float64(lr.m)
Descent:
for i := 0; i < lr.maxIters; i++ {
// Calculate partial derivatives
h.MulVec(lr.x, lr.Theta)
for x := 0; x < lr.m; x++ {
h.SetVec(x, h.At(x, 0)-lr.y.At(x, 0))
}
partials.MulVec(lr.x.T(), &h)
// Update theta values with the precalculated partials
for x := 0; x < lr.n; x++ {
theta_j := lr.Theta.At(x, 0) - alpha_m*partials.At(x, 0)
lr.Theta.SetVec(x, theta_j)
}
// Check the "distance" to the local minumum
dist := math.Sqrt(mat64.Dot(partials, partials))
if dist <= lr.tolerance {
break Descent
}
}
}
开发者ID:erubboli,项目名称:mlt,代码行数:28,代码来源:linear_regression.go
示例5: TestHypothesis
func TestHypothesis(t *testing.T) {
for _, test := range []struct {
theta *mat64.Vector
x *mat64.Vector
y float64
}{
{
mat64.NewVector(2, []float64{0, 2}),
mat64.NewVector(2, []float64{0, 1}),
2.0,
}, {
mat64.NewVector(2, []float64{0, 2}),
mat64.NewVector(2, []float64{0, 2}),
4.0,
}, {
mat64.NewVector(2, []float64{0, 2}),
mat64.NewVector(2, []float64{0, 10}),
20.0,
}, {
mat64.NewVector(2, []float64{1, 2}),
mat64.NewVector(2, []float64{1, 10}),
21.0,
}, {
mat64.NewVector(3, []float64{1, 2.5, 5}),
mat64.NewVector(3, []float64{10, 20, 0}),
60.0,
},
} {
h := Hypothesis(test.x, test.theta)
if h != test.y {
t.Errorf("Hypothesis(%v,%v) is expected to be equal to %v, found %v", test.x, test.theta, test.y, h)
}
}
}
开发者ID:erubboli,项目名称:mlt,代码行数:35,代码来源:hypothesis_test.go
示例6: InitDirection
func (b *BFGS) InitDirection(loc *Location, dir []float64) (stepSize float64) {
dim := len(loc.X)
b.dim = dim
b.first = true
x := mat64.NewVector(dim, loc.X)
grad := mat64.NewVector(dim, loc.Gradient)
b.x.CloneVec(x)
b.grad.CloneVec(grad)
b.y.Reset()
b.s.Reset()
b.tmp.Reset()
if b.invHess == nil || cap(b.invHess.RawSymmetric().Data) < dim*dim {
b.invHess = mat64.NewSymDense(dim, nil)
} else {
b.invHess = mat64.NewSymDense(dim, b.invHess.RawSymmetric().Data[:dim*dim])
}
// The values of the inverse Hessian are initialized in the first call to
// NextDirection.
// Initial direction is just negative of the gradient because the Hessian
// is an identity matrix.
d := mat64.NewVector(dim, dir)
d.ScaleVec(-1, grad)
return 1 / mat64.Norm(d, 2)
}
开发者ID:jgcarvalho,项目名称:zdd,代码行数:28,代码来源:bfgs.go
示例7: transformNormal
// transformNormal performs the same operation as TransformNormal except no
// safety checks are performed and both input slices must be non-nil.
func (n *Normal) transformNormal(dst, normal []float64) []float64 {
srcVec := mat64.NewVector(n.dim, normal)
dstVec := mat64.NewVector(n.dim, dst)
dstVec.MulVec(&n.lower, srcVec)
floats.Add(dst, n.mu)
return dst
}
开发者ID:sbinet,项目名称:gonum-stat,代码行数:9,代码来源:normal.go
示例8: LinearLeastSquares
// LinearLeastSquares computes the least squares fit for the function
//
// f(x) = ╬њРѓђtermsРѓђ(x) + ╬њРѓЂtermsРѓЂ(x) + ...
//
// to the data (xs[i], ys[i]). It returns the parameters ╬њРѓђ, ╬њРѓЂ, ...
// that minimize the sum of the squares of the residuals of f:
//
// РѕЉ (ys[i] - f(xs[i]))┬▓
//
// If weights is non-nil, it is used to weight these residuals:
//
// РѕЉ weights[i] ├Ќ (ys[i] - f(xs[i]))┬▓
//
// The function f is specified by one Go function for each linear
// term. For efficiency, the Go function is vectorized: it will be
// passed a slice of x values in xs and must fill the slice termOut
// with the value of the term for each value in xs.
func LinearLeastSquares(xs, ys, weights []float64, terms ...func(xs, termOut []float64)) (params []float64) {
// The optimal parameters are found by solving for ╬њ╠ѓ in the
// "normal equations":
//
// (ЮљЌрхђЮљќЮљЌ)╬њ╠ѓ = ЮљЌрхђЮљќЮљ▓
//
// where Юљќ is a diagonal weight matrix (or the identity matrix
// for the unweighted case).
// TODO: Consider using orthogonal decomposition.
if len(xs) != len(ys) {
panic("len(xs) != len(ys)")
}
if weights != nil && len(xs) != len(weights) {
panic("len(xs) != len(weights")
}
// Construct ЮљЌрхђ. This is the more convenient representation
// for efficiently calling the term functions.
xTVals := make([]float64, len(terms)*len(xs))
for i, term := range terms {
term(xs, xTVals[i*len(xs):i*len(xs)+len(xs)])
}
XT := mat64.NewDense(len(terms), len(xs), xTVals)
X := XT.T()
// Construct ЮљЌрхђЮљќ.
var XTW *mat64.Dense
if weights == nil {
// Юљќ is the identity matrix.
XTW = XT
} else {
// Since Юљќ is a diagonal matrix, we do this directly.
XTW = mat64.DenseCopyOf(XT)
WDiag := mat64.NewVector(len(weights), weights)
for row := 0; row < len(terms); row++ {
rowView := XTW.RowView(row)
rowView.MulElemVec(rowView, WDiag)
}
}
// Construct Юљ▓.
y := mat64.NewVector(len(ys), ys)
// Compute ╬њ╠ѓ.
lhs := mat64.NewDense(len(terms), len(terms), nil)
lhs.Mul(XTW, X)
rhs := mat64.NewVector(len(terms), nil)
rhs.MulVec(XTW, y)
BVals := make([]float64, len(terms))
B := mat64.NewVector(len(terms), BVals)
B.SolveVec(lhs, rhs)
return BVals
}
开发者ID:rsc,项目名称:go-misc,代码行数:74,代码来源:lsquares.go
示例9: TestVectorDistance
// Calculates the distance between to vectors
func TestVectorDistance(t *testing.T) {
vec1 := mat.NewVector(3, []float64{4, 6, 2})
vec2 := mat.NewVector(3, []float64{1, 9, 3})
expectedAns := float64(19)
if expectedAns != vectorDistance(vec1, vec2) {
t.Errorf("Expected %f, got %f", expectedAns, vectorDistance(vec1, vec2))
}
}
开发者ID:kingzbauer,项目名称:kmeans,代码行数:10,代码来源:utils_test.go
示例10: NextDirection
func (n *Newton) NextDirection(loc *Location, dir []float64) (stepSize float64) {
// This method implements Algorithm 3.3 (Cholesky with Added Multiple of
// the Identity) from Nocedal, Wright (2006), 2nd edition.
dim := len(loc.X)
n.hess.CopySym(loc.Hessian)
// Find the smallest diagonal entry of the Hesssian.
minA := n.hess.At(0, 0)
for i := 1; i < dim; i++ {
a := n.hess.At(i, i)
if a < minA {
minA = a
}
}
// If the smallest diagonal entry is positive, the Hessian may be positive
// definite, and so first attempt to apply the Cholesky factorization to
// the un-modified Hessian. If the smallest entry is negative, use the
// final tau from the last iteration if regularization was needed,
// otherwise guess an appropriate value for tau.
if minA > 0 {
n.tau = 0
} else if n.tau == 0 {
n.tau = -minA + 0.001
}
for k := 0; k < maxNewtonModifications; k++ {
if n.tau != 0 {
// Add a multiple of identity to the Hessian.
for i := 0; i < dim; i++ {
n.hess.SetSym(i, i, loc.Hessian.At(i, i)+n.tau)
}
}
// Try to apply the Cholesky factorization.
pd := n.chol.Factorize(n.hess)
if pd {
d := mat64.NewVector(dim, dir)
// Store the solution in d's backing array, dir.
d.SolveCholeskyVec(&n.chol, mat64.NewVector(dim, loc.Gradient))
floats.Scale(-1, dir)
return 1
}
// Modified Hessian is not PD, so increase tau.
n.tau = math.Max(n.Increase*n.tau, 0.001)
}
// Hessian modification failed to get a PD matrix. Return the negative
// gradient as the descent direction.
copy(dir, loc.Gradient)
floats.Scale(-1, dir)
return 1
}
开发者ID:jacobxk,项目名称:optimize,代码行数:52,代码来源:newton.go
示例11: newMargLikeMemory
func newMargLikeMemory(hyper, outputs int) *margLikeMemory {
m := &margLikeMemory{
lastX: make([]float64, hyper),
k: mat64.NewSymDense(outputs, nil),
chol: &mat64.Cholesky{},
alpha: mat64.NewVector(outputs, nil),
tmp: mat64.NewVector(1, nil),
dKdTheta: make([]*mat64.SymDense, hyper),
kInvDK: mat64.NewDense(outputs, outputs, nil),
}
for i := 0; i < hyper; i++ {
m.dKdTheta[i] = mat64.NewSymDense(outputs, nil)
}
return m
}
开发者ID:btracey,项目名称:gaussproc,代码行数:15,代码来源:gp.go
示例12: Solve
func Solve(a sparse.Matrix, b, xInit *mat64.Vector, settings *Settings, method Method) (result Result, err error) {
stats := Stats{
StartTime: time.Now(),
}
dim, c := a.Dims()
if dim != c {
panic("iterative: matrix is not square")
}
if xInit != nil && dim != xInit.Len() {
panic("iterative: mismatched size of the initial guess")
}
if b.Len() != dim {
panic("iterative: mismatched size of the right-hand side vector")
}
if xInit == nil {
xInit = mat64.NewVector(dim, nil)
}
if settings == nil {
settings = DefaultSettings(dim)
}
ctx := Context{
X: mat64.NewVector(dim, nil),
Residual: mat64.NewVector(dim, nil),
}
// X = xInit
ctx.X.CopyVec(xInit)
if mat64.Norm(ctx.X, math.Inf(1)) > 0 {
// Residual = Ax
sparse.MulMatVec(ctx.Residual, 1, false, a, ctx.X)
stats.MatVecMultiplies++
}
// Residual = Ax - b
ctx.Residual.SubVec(ctx.Residual, b)
if mat64.Norm(ctx.Residual, 2) >= settings.Tolerance {
err = iterate(method, a, b, settings, &ctx, &stats)
}
result = Result{
X: ctx.X,
Stats: stats,
Runtime: time.Since(stats.StartTime),
}
return result, err
}
开发者ID:vladimir-ch,项目名称:sparse,代码行数:48,代码来源:iterative.go
示例13: findLinearlyIndependent
// findLinearlyIndependnt finds a set of linearly independent columns of A, and
// returns the column indexes of the linearly independent columns.
func findLinearlyIndependent(A mat64.Matrix) []int {
m, n := A.Dims()
idxs := make([]int, 0, m)
columns := mat64.NewDense(m, m, nil)
newCol := make([]float64, m)
// Walk in reverse order because slack variables are typically the last columns
// of A.
for i := n - 1; i >= 0; i-- {
if len(idxs) == m {
break
}
mat64.Col(newCol, i, A)
if len(idxs) == 0 {
// A column is linearly independent from the null set.
// This is what needs to be changed if zero columns are allowed, as
// a column of all zeros is not linearly independent from itself.
columns.SetCol(len(idxs), newCol)
idxs = append(idxs, i)
continue
}
if linearlyDependent(mat64.NewVector(m, newCol), columns.View(0, 0, m, len(idxs))) {
continue
}
columns.SetCol(len(idxs), newCol)
idxs = append(idxs, i)
}
return idxs
}
开发者ID:sbinet,项目名称:gonum-optimize,代码行数:30,代码来源:simplex.go
示例14: TestGather
func TestGather(t *testing.T) {
for i, test := range []struct {
y []float64
indices []int
want []float64
}{
{
y: []float64{1, 2, 3, 4},
indices: []int{0, 2, 3},
want: []float64{1, 3, 4},
},
{
indices: []int{0, 2, 3, 6},
y: []float64{1, 2, 3, 4, 5, 6, 7, 8},
want: []float64{1, 3, 4, 7},
},
} {
y := mat64.NewVector(len(test.y), test.y)
var x Vector
Gather(&x, y, test.indices)
if x.N != y.Len() {
t.Errorf("%d: wrong dimension, want = %v, got = %v ", i, y.Len(), x.N)
}
if !reflect.DeepEqual(x.Data, test.want) {
t.Errorf("%d: data not equal, want = %v, got %v\n", i, test.want, x.Data)
}
if !reflect.DeepEqual(x.Indices, test.indices) {
t.Errorf("%d: indices not equal, want = %v, got %v\n", i, test.indices, x.Indices)
}
}
}
开发者ID:vladimir-ch,项目名称:sparse,代码行数:35,代码来源:level1_test.go
示例15: TestLeastSquares
func TestLeastSquares(t *testing.T) {
matA := mat64.NewDense(5, 3, []float64{
1, -2, 4,
1, -1, 1,
1, 0, 0,
1, 1, 1,
1, 2, 4,
})
vecb := mat64.NewVector(5, []float64{
0,
0,
1,
0,
0,
})
x := vec3(linalg.LeastSquares(matA, vecb))
expected := Vec3{34.0 / 70.0, 0.0, -10.0 / 70.0}
if x != expected {
t.Errorf("expected %v, got %v", expected, x)
}
}
开发者ID:shogg,项目名称:math,代码行数:25,代码来源:leastsquares_test.go
示例16: TestScatter
func TestScatter(t *testing.T) {
for i, test := range []struct {
x, y []float64
indices []int
want []float64
}{
{
x: []float64{1, 2, 3},
indices: []int{0, 2, 3},
y: []float64{math.NaN(), 0, math.NaN(), math.NaN()},
want: []float64{1, 0, 2, 3},
},
{
x: []float64{1, 2, 3},
indices: []int{0, 4, 6},
y: []float64{math.NaN(), 0, 0, 0, math.NaN(), 0, math.NaN(), 0},
want: []float64{1, 0, 0, 0, 2, 0, 3, 0},
},
} {
y := mat64.NewVector(len(test.y), test.y)
x := NewVector(len(test.y), test.x, test.indices)
Scatter(y, x)
if !reflect.DeepEqual(test.y, test.want) {
t.Errorf("%d: want = %v, got %v\n", i, test.want, test.y)
}
}
}
开发者ID:vladimir-ch,项目名称:sparse,代码行数:31,代码来源:level1_test.go
示例17: MeanBatch
// MeanBatch predicts the mean at the set of locations specified by x. Stores in-place into yPred
// If yPred is nil new memory is allocated.
func (g *GP) MeanBatch(yPred []float64, x mat64.Matrix) []float64 {
rx, cx := x.Dims()
if cx != g.inputDim {
panic(badInputLength)
}
if yPred == nil {
yPred = make([]float64, rx)
}
ry := len(yPred)
if rx != ry {
panic(badOutputLength)
}
nSamples, _ := g.inputs.Dims()
covariance := mat64.NewDense(nSamples, rx, nil)
row := make([]float64, g.inputDim)
for j := 0; j < rx; j++ {
for k := 0; k < g.inputDim; k++ {
row[k] = x.At(j, k)
}
for i := 0; i < nSamples; i++ {
v := g.kernel.Distance(g.inputs.RawRowView(i), row)
covariance.Set(i, j, v)
}
}
yPredVec := mat64.NewVector(len(yPred), yPred)
yPredVec.MulVec(covariance.T(), g.sigInvY)
// Rescale the outputs
for i, v := range yPred {
yPred[i] = v*g.std + g.mean
}
return yPred
}
开发者ID:btracey,项目名称:gaussproc,代码行数:35,代码来源:gp.go
示例18: TestDot
func TestDot(t *testing.T) {
for _, test := range []struct {
n int
x, y []float64
indices []int
want float64
}{
{
n: 5,
x: []float64{1, 2, 3},
indices: []int{0, 2, 4},
y: []float64{1, math.NaN(), 3, math.NaN(), 5},
want: 22,
},
} {
x := NewVector(test.n, test.x, test.indices)
y := mat64.NewVector(len(test.y), test.y)
got := Dot(x, y)
if got != test.want {
t.Errorf("want = %v, got %v\n", test.want, got)
}
}
}
开发者ID:vladimir-ch,项目名称:sparse,代码行数:25,代码来源:level1_test.go
示例19: Train
// Train sets the paramters of the gaussian process. If noise == true,
// the noise parameter is adjusted, otherwise it is not.
// TODO(btracey): Need to implement barrier method for parameters. Steps get crazy.
func (g *GP) Train(trainNoise bool) error {
// TODO(btracey): Implement a memory struct that can be passed around with
// all of this data.
initHyper := g.kernel.Hyper(nil)
nKerHyper := len(initHyper)
if trainNoise {
initHyper = append(initHyper, math.Log(g.noise))
}
mem := newMargLikeMemory(len(initHyper), len(g.outputs))
f := func(x []float64) float64 {
fmt.Println("x =", x)
obj := g.marginalLikelihood(x, trainNoise, mem)
fmt.Println("obj =", obj)
return obj
}
df := func(x, grad []float64) {
g.marginalLikelihoodDerivative(x, grad, trainNoise, mem)
fmt.Println("x = ", x)
fmt.Println("grad = ", grad)
}
// grad = [0.4500442759224154 -3.074041876494095 0.42568788880060204]
/*
x := []float64{0.7287793210009457, -0.9371471942974932, -14.017213937483529}
fofx := f(x)
fmt.Println("fofx", fofx)
set := fd.DefaultSettings()
set.Method.Step = 1e-4
fdGrad := fd.Gradient(nil, f, x, nil)
fmt.Println("fd grad = ", fdGrad)
grad := make([]float64, len(fdGrad))
df(x, grad)
fmt.Println("real grad = ", grad)
os.Exit(1)
*/
problem := optimize.Problem{
Func: f,
Grad: df,
}
settings := optimize.DefaultSettings()
settings.GradientThreshold = 1e-4
result, err := optimize.Local(problem, initHyper, settings, nil)
// set noise
g.noise = math.Exp(result.X[len(result.X)-1])
g.kernel.SetHyper(result.X[:nKerHyper])
g.setKernelMat(g.k, g.noise)
ok := g.cholK.Factorize(g.k)
if !ok {
return errors.New("gp: final kernel matrix is not positive definite")
}
v := mat64.NewVector(len(g.outputs), g.outputs)
g.sigInvY.SolveCholeskyVec(g.cholK, v)
return err
}
开发者ID:btracey,项目名称:gaussproc,代码行数:62,代码来源:gp.go
示例20: LogProb
// LogProb computes the log of the pdf of the point x.
func (n *Normal) LogProb(x []float64) float64 {
dim := n.dim
if len(x) != dim {
panic(badSizeMismatch)
}
// Compute the normalization constant
c := -0.5*float64(dim)*logTwoPi - n.logSqrtDet
// Compute (x-mu)'Sigma^-1 (x-mu)
xMinusMu := make([]float64, dim)
floats.SubTo(xMinusMu, x, n.mu)
d := mat64.NewVector(dim, xMinusMu)
tmp := make([]float64, dim)
tmpVec := mat64.NewVector(dim, tmp)
tmpVec.SolveCholeskyVec(n.chol, d)
return c - 0.5*floats.Dot(tmp, xMinusMu)
}
开发者ID:shazow,项目名称:stat,代码行数:18,代码来源:normal.go
注:本文中的github.com/gonum/matrix/mat64.NewVector函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论