本文整理汇总了Golang中github.com/docker/swarmkit/manager/state/store.FindNodes函数的典型用法代码示例。如果您正苦于以下问题:Golang FindNodes函数的具体用法?Golang FindNodes怎么用?Golang FindNodes使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FindNodes函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: TestHeartbeatTimeout
func TestHeartbeatTimeout(t *testing.T) {
cfg := DefaultConfig()
cfg.HeartbeatPeriod = 100 * time.Millisecond
cfg.HeartbeatEpsilon = 0
gd, err := startDispatcher(cfg)
assert.NoError(t, err)
defer gd.Close()
var expectedSessionID string
{
stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{})
assert.NoError(t, err)
resp, err := stream.Recv()
assert.NoError(t, err)
assert.NotEmpty(t, resp.SessionID)
expectedSessionID = resp.SessionID
}
time.Sleep(500 * time.Millisecond)
gd.Store.View(func(readTx store.ReadTx) {
storeNodes, err := store.FindNodes(readTx, store.All)
assert.NoError(t, err)
assert.NotEmpty(t, storeNodes)
assert.Equal(t, api.NodeStatus_DOWN, storeNodes[0].Status.State)
})
// check that node is deregistered
resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{SessionID: expectedSessionID})
assert.Nil(t, resp)
assert.Error(t, err)
assert.Equal(t, grpc.ErrorDesc(err), ErrNodeNotRegistered.Error())
}
开发者ID:ChristianKniep,项目名称:swarmkit,代码行数:33,代码来源:dispatcher_test.go
示例2: Run
// Run is the ConstraintEnforcer's main loop.
func (ce *ConstraintEnforcer) Run() {
defer close(ce.doneChan)
watcher, cancelWatch := state.Watch(ce.store.WatchQueue(), state.EventUpdateNode{})
defer cancelWatch()
var (
nodes []*api.Node
err error
)
ce.store.View(func(readTx store.ReadTx) {
nodes, err = store.FindNodes(readTx, store.All)
})
if err != nil {
log.L.WithError(err).Error("failed to check nodes for noncompliant tasks")
} else {
for _, node := range nodes {
ce.shutdownNoncompliantTasks(node)
}
}
for {
select {
case event := <-watcher:
node := event.(state.EventUpdateNode).Node
ce.shutdownNoncompliantTasks(node)
case <-ce.stopChan:
return
}
}
}
开发者ID:JMesser81,项目名称:docker,代码行数:32,代码来源:constraint_enforcer.go
示例3: testRaftRestartCluster
func testRaftRestartCluster(t *testing.T, stagger bool) {
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
defer raftutils.TeardownCluster(t, nodes)
// Propose a value
values := make([]*api.Node, 2)
var err error
values[0], err = raftutils.ProposeValue(t, nodes[1], "id1")
assert.NoError(t, err, "failed to propose value")
// Stop all nodes
for _, node := range nodes {
node.Server.Stop()
node.Shutdown()
}
raftutils.AdvanceTicks(clockSource, 5)
// Restart all nodes
i := 0
for k, node := range nodes {
if stagger && i != 0 {
raftutils.AdvanceTicks(clockSource, 1)
}
nodes[k] = raftutils.RestartNode(t, clockSource, node, false)
i++
}
raftutils.WaitForCluster(t, clockSource, nodes)
// Propose another value
values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), "id2")
assert.NoError(t, err, "failed to propose value")
for _, node := range nodes {
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
var err error
node.MemoryStore().View(func(tx store.ReadTx) {
var allNodes []*api.Node
allNodes, err = store.FindNodes(tx, store.All)
if err != nil {
return
}
if len(allNodes) != 2 {
err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes))
return
}
for i, nodeID := range []string{"id1", "id2"} {
n := store.GetNode(tx, nodeID)
if !reflect.DeepEqual(n, values[i]) {
err = fmt.Errorf("node %s did not match expected value", nodeID)
return
}
}
})
return err
}))
}
}
开发者ID:ChristianKniep,项目名称:swarmkit,代码行数:59,代码来源:raft_test.go
示例4: Run
// Run is roleManager's main loop.
func (rm *roleManager) Run() {
defer close(rm.doneChan)
var (
nodes []*api.Node
ticker *time.Ticker
tickerCh <-chan time.Time
)
watcher, cancelWatch, err := store.ViewAndWatch(rm.store,
func(readTx store.ReadTx) error {
var err error
nodes, err = store.FindNodes(readTx, store.All)
return err
},
state.EventUpdateNode{})
defer cancelWatch()
if err != nil {
log.L.WithError(err).Error("failed to check nodes for role changes")
} else {
for _, node := range nodes {
rm.pending[node.ID] = node
rm.reconcileRole(node)
}
if len(rm.pending) != 0 {
ticker = time.NewTicker(roleReconcileInterval)
tickerCh = ticker.C
}
}
for {
select {
case event := <-watcher:
node := event.(state.EventUpdateNode).Node
rm.pending[node.ID] = node
rm.reconcileRole(node)
if len(rm.pending) != 0 && ticker == nil {
ticker = time.NewTicker(roleReconcileInterval)
tickerCh = ticker.C
}
case <-tickerCh:
for _, node := range rm.pending {
rm.reconcileRole(node)
}
if len(rm.pending) == 0 {
ticker.Stop()
ticker = nil
tickerCh = nil
}
case <-rm.ctx.Done():
if ticker != nil {
ticker.Stop()
}
return
}
}
}
开发者ID:docker,项目名称:docker,代码行数:59,代码来源:role_manager.go
示例5: UpdateNode
// UpdateNode updates a Node referenced by NodeID with the given NodeSpec.
// - Returns `NotFound` if the Node is not found.
// - Returns `InvalidArgument` if the NodeSpec is malformed.
// - Returns an error if the update fails.
func (s *Server) UpdateNode(ctx context.Context, request *api.UpdateNodeRequest) (*api.UpdateNodeResponse, error) {
if request.NodeID == "" || request.NodeVersion == nil {
return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if err := validateNodeSpec(request.Spec); err != nil {
return nil, err
}
var (
node *api.Node
demote bool
)
err := s.store.Update(func(tx store.Tx) error {
node = store.GetNode(tx, request.NodeID)
if node == nil {
return nil
}
// Demotion sanity checks.
if node.Spec.Role == api.NodeRoleManager && request.Spec.Role == api.NodeRoleWorker {
demote = true
managers, err := store.FindNodes(tx, store.ByRole(api.NodeRoleManager))
if err != nil {
return grpc.Errorf(codes.Internal, "internal store error: %v", err)
}
if len(managers) == 1 && managers[0].ID == node.ID {
return grpc.Errorf(codes.FailedPrecondition, "attempting to demote the last manager of the swarm")
}
}
node.Meta.Version = *request.NodeVersion
node.Spec = *request.Spec.Copy()
return store.UpdateNode(tx, node)
})
if err != nil {
return nil, err
}
if node == nil {
return nil, grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID)
}
if demote && s.raft != nil {
memberlist := s.raft.GetMemberlist()
for raftID, member := range memberlist {
if member.NodeID == request.NodeID {
if err := s.raft.RemoveMember(ctx, raftID); err != nil {
return nil, err
}
break
}
}
}
return &api.UpdateNodeResponse{
Node: node,
}, nil
}
开发者ID:CadeLaRen,项目名称:docker-3,代码行数:61,代码来源:node.go
示例6: UpdateNode
// UpdateNode updates a Node referenced by NodeID with the given NodeSpec.
// - Returns `NotFound` if the Node is not found.
// - Returns `InvalidArgument` if the NodeSpec is malformed.
// - Returns an error if the update fails.
func (s *Server) UpdateNode(ctx context.Context, request *api.UpdateNodeRequest) (*api.UpdateNodeResponse, error) {
if request.NodeID == "" || request.NodeVersion == nil {
return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
if err := validateNodeSpec(request.Spec); err != nil {
return nil, err
}
var (
node *api.Node
member *membership.Member
)
err := s.store.Update(func(tx store.Tx) error {
node = store.GetNode(tx, request.NodeID)
if node == nil {
return grpc.Errorf(codes.NotFound, "node %s not found", request.NodeID)
}
// Demotion sanity checks.
if node.Spec.DesiredRole == api.NodeRoleManager && request.Spec.DesiredRole == api.NodeRoleWorker {
// Check for manager entries in Store.
managers, err := store.FindNodes(tx, store.ByRole(api.NodeRoleManager))
if err != nil {
return grpc.Errorf(codes.Internal, "internal store error: %v", err)
}
if len(managers) == 1 && managers[0].ID == node.ID {
return grpc.Errorf(codes.FailedPrecondition, "attempting to demote the last manager of the swarm")
}
// Check for node in memberlist
if member = s.raft.GetMemberByNodeID(request.NodeID); member == nil {
return grpc.Errorf(codes.NotFound, "can't find manager in raft memberlist")
}
// Quorum safeguard
if !s.raft.CanRemoveMember(member.RaftID) {
return grpc.Errorf(codes.FailedPrecondition, "can't remove member from the raft: this would result in a loss of quorum")
}
}
node.Meta.Version = *request.NodeVersion
node.Spec = *request.Spec.Copy()
return store.UpdateNode(tx, node)
})
if err != nil {
return nil, err
}
return &api.UpdateNodeResponse{
Node: node,
}, nil
}
开发者ID:docker,项目名称:docker,代码行数:57,代码来源:node.go
示例7: markNodesUnknown
func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
var nodes []*api.Node
var err error
d.store.View(func(tx store.ReadTx) {
nodes, err = store.FindNodes(tx, store.All)
})
if err != nil {
return fmt.Errorf("failed to get list of nodes: %v", err)
}
_, err = d.store.Batch(func(batch *store.Batch) error {
for _, n := range nodes {
err := batch.Update(func(tx store.Tx) error {
// check if node is still here
node := store.GetNode(tx, n.ID)
if node == nil {
return nil
}
// do not try to resurrect down nodes
if node.Status.State == api.NodeStatus_DOWN {
return nil
}
node.Status = api.NodeStatus{
State: api.NodeStatus_UNKNOWN,
Message: `Node moved to "unknown" state due to leadership change in cluster`,
}
nodeID := node.ID
expireFunc := func() {
log := log.WithField("node", nodeID)
nodeStatus := api.NodeStatus{State: api.NodeStatus_DOWN, Message: `heartbeat failure for node in "unknown" state`}
log.Debugf("heartbeat expiration for unknown node")
if err := d.nodeRemove(nodeID, nodeStatus); err != nil {
log.WithError(err).Errorf(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
}
}
if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
return fmt.Errorf(`adding node in "unknown" state to node store failed: %v`, err)
}
if err := store.UpdateNode(tx, node); err != nil {
return fmt.Errorf("update failed %v", err)
}
return nil
})
if err != nil {
log.WithField("node", n.ID).WithError(err).Errorf(`failed to move node to "unknown" state`)
}
}
return nil
})
return err
}
开发者ID:ypjin,项目名称:swarmkit,代码行数:52,代码来源:dispatcher.go
示例8: TestHeartbeat
func TestHeartbeat(t *testing.T) {
cfg := DefaultConfig()
cfg.HeartbeatPeriod = 500 * time.Millisecond
cfg.HeartbeatEpsilon = 0
gd, err := startDispatcher(DefaultConfig())
assert.NoError(t, err)
defer gd.Close()
var expectedSessionID string
{
stream, err := gd.Clients[0].Session(context.Background(), &api.SessionRequest{})
assert.NoError(t, err)
defer stream.CloseSend()
resp, err := stream.Recv()
assert.NoError(t, err)
assert.NotEmpty(t, resp.SessionID)
expectedSessionID = resp.SessionID
}
time.Sleep(250 * time.Millisecond)
{
// heartbeat without correct SessionID should fail
resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{})
assert.Nil(t, resp)
assert.Error(t, err)
assert.Equal(t, grpc.Code(err), codes.InvalidArgument)
}
resp, err := gd.Clients[0].Heartbeat(context.Background(), &api.HeartbeatRequest{SessionID: expectedSessionID})
assert.NoError(t, err)
assert.NotZero(t, resp.Period)
time.Sleep(300 * time.Millisecond)
gd.Store.View(func(readTx store.ReadTx) {
storeNodes, err := store.FindNodes(readTx, store.All)
assert.NoError(t, err)
assert.NotEmpty(t, storeNodes)
found := false
for _, node := range storeNodes {
if node.ID == gd.SecurityConfigs[0].ClientTLSCreds.NodeID() {
found = true
assert.Equal(t, api.NodeStatus_READY, node.Status.State)
}
}
assert.True(t, found)
})
}
开发者ID:ChristianKniep,项目名称:swarmkit,代码行数:48,代码来源:dispatcher_test.go
示例9: buildNodeSet
func (s *Scheduler) buildNodeSet(tx store.ReadTx, tasksByNode map[string]map[string]*api.Task) error {
nodes, err := store.FindNodes(tx, store.All)
if err != nil {
return err
}
s.nodeSet.alloc(len(nodes))
for _, n := range nodes {
var resources api.Resources
if n.Description != nil && n.Description.Resources != nil {
resources = *n.Description.Resources
}
s.nodeSet.addOrUpdateNode(newNodeInfo(n, tasksByNode[n.ID], resources))
}
return nil
}
开发者ID:jfrazelle,项目名称:docker,代码行数:18,代码来源:scheduler.go
示例10: TestClusterReelection
func TestClusterReelection(t *testing.T) {
if !*integrationTests {
t.Skip("integration test")
}
mCount, aCount := 5, 15
c := createManagersCluster(t, mCount, aCount)
require.NoError(t, testutils.PollFunc(nil, c.pollRegister))
require.NoError(t, c.destroyLeader())
// let's down some managers in the meantime
require.NoError(t, c.destroyAgents(5))
// ensure that cluster will converge to expected number of agents, we need big timeout because of heartbeat times
require.NoError(t, testutils.PollFuncWithTimeout(nil, c.pollRegister, 30*time.Second))
leader, err := c.leader()
assert.NoError(t, err)
// check nodes in store
var nodes []*api.Node
leader.m.RaftNode.MemoryStore().View(func(tx store.ReadTx) {
ns, err := store.FindNodes(tx, store.All)
assert.NoError(t, err)
for _, n := range ns {
if n.Spec.Role == api.NodeRoleWorker {
nodes = append(nodes, n)
}
}
})
assert.NoError(t, err)
assert.Len(t, nodes, aCount, "there should be all nodes in store")
var downAgentsCount int
for _, node := range nodes {
if node.Status.State == api.NodeStatus_DOWN {
downAgentsCount++
continue
}
assert.Equal(t, api.NodeStatus_READY, node.Status.State, "there should be only down and ready nodes at this point")
}
assert.Equal(t, 5, downAgentsCount, "unexpected number of down agents")
}
开发者ID:ypjin,项目名称:swarmkit,代码行数:40,代码来源:manager_cluster_test.go
示例11: buildNodeHeap
func (s *Scheduler) buildNodeHeap(tx store.ReadTx, tasksByNode map[string]map[string]*api.Task) error {
nodes, err := store.FindNodes(tx, store.All)
if err != nil {
return err
}
s.nodeHeap.alloc(len(nodes))
i := 0
for _, n := range nodes {
var resources api.Resources
if n.Description != nil && n.Description.Resources != nil {
resources = *n.Description.Resources
}
s.nodeHeap.heap = append(s.nodeHeap.heap, newNodeInfo(n, tasksByNode[n.ID], resources))
s.nodeHeap.index[n.ID] = i
i++
}
heap.Init(&s.nodeHeap)
return nil
}
开发者ID:Chandra-TechPassionate,项目名称:docker,代码行数:23,代码来源:scheduler.go
示例12: ListNodes
// ListNodes returns a list of all nodes.
func (s *Server) ListNodes(ctx context.Context, request *api.ListNodesRequest) (*api.ListNodesResponse, error) {
var (
nodes []*api.Node
err error
)
s.store.View(func(tx store.ReadTx) {
switch {
case request.Filters != nil && len(request.Filters.Names) > 0:
nodes, err = store.FindNodes(tx, buildFilters(store.ByName, request.Filters.Names))
case request.Filters != nil && len(request.Filters.NamePrefixes) > 0:
nodes, err = store.FindNodes(tx, buildFilters(store.ByNamePrefix, request.Filters.NamePrefixes))
case request.Filters != nil && len(request.Filters.IDPrefixes) > 0:
nodes, err = store.FindNodes(tx, buildFilters(store.ByIDPrefix, request.Filters.IDPrefixes))
case request.Filters != nil && len(request.Filters.Roles) > 0:
filters := make([]store.By, 0, len(request.Filters.Roles))
for _, v := range request.Filters.Roles {
filters = append(filters, store.ByRole(v))
}
nodes, err = store.FindNodes(tx, store.Or(filters...))
case request.Filters != nil && len(request.Filters.Memberships) > 0:
filters := make([]store.By, 0, len(request.Filters.Memberships))
for _, v := range request.Filters.Memberships {
filters = append(filters, store.ByMembership(v))
}
nodes, err = store.FindNodes(tx, store.Or(filters...))
default:
nodes, err = store.FindNodes(tx, store.All)
}
})
if err != nil {
return nil, err
}
if request.Filters != nil {
nodes = filterNodes(nodes,
func(e *api.Node) bool {
if len(request.Filters.Names) == 0 {
return true
}
if e.Description == nil {
return false
}
return filterContains(e.Description.Hostname, request.Filters.Names)
},
func(e *api.Node) bool {
if len(request.Filters.NamePrefixes) == 0 {
return true
}
if e.Description == nil {
return false
}
return filterContainsPrefix(e.Description.Hostname, request.Filters.NamePrefixes)
},
func(e *api.Node) bool {
return filterContainsPrefix(e.ID, request.Filters.IDPrefixes)
},
func(e *api.Node) bool {
if len(request.Filters.Labels) == 0 {
return true
}
if e.Description == nil {
return false
}
return filterMatchLabels(e.Description.Engine.Labels, request.Filters.Labels)
},
func(e *api.Node) bool {
if len(request.Filters.Roles) == 0 {
return true
}
for _, c := range request.Filters.Roles {
if c == e.Spec.Role {
return true
}
}
return false
},
func(e *api.Node) bool {
if len(request.Filters.Memberships) == 0 {
return true
}
for _, c := range request.Filters.Memberships {
if c == e.Spec.Membership {
return true
}
}
return false
},
)
}
// Add in manager information on nodes that are managers
if s.raft != nil {
memberlist := s.raft.GetMemberlist()
for _, node := range nodes {
for _, member := range memberlist {
if member.NodeID == node.ID {
node.ManagerStatus = &api.ManagerStatus{
RaftID: member.RaftID,
//.........这里部分代码省略.........
开发者ID:SUSE,项目名称:docker.mirror,代码行数:101,代码来源:node.go
示例13: doNetworkInit
//.........这里部分代码省略.........
networks, err = store.FindNetworks(tx, store.All)
})
if err != nil {
return errors.Wrap(err, "error listing all networks in store while trying to allocate during init")
}
var allocatedNetworks []*api.Network
for _, n := range networks {
if na.IsAllocated(n) {
continue
}
if err := a.allocateNetwork(ctx, n); err != nil {
log.G(ctx).WithError(err).Errorf("failed allocating network %s during init", n.ID)
continue
}
allocatedNetworks = append(allocatedNetworks, n)
}
if _, err := a.store.Batch(func(batch *store.Batch) error {
for _, n := range allocatedNetworks {
if err := a.commitAllocatedNetwork(ctx, batch, n); err != nil {
log.G(ctx).WithError(err).Errorf("failed committing allocation of network %s during init", n.ID)
}
}
return nil
}); err != nil {
log.G(ctx).WithError(err).Error("failed committing allocation of networks during init")
}
// Allocate nodes in the store so far before we process watched events.
var nodes []*api.Node
a.store.View(func(tx store.ReadTx) {
nodes, err = store.FindNodes(tx, store.All)
})
if err != nil {
return errors.Wrap(err, "error listing all nodes in store while trying to allocate during init")
}
var allocatedNodes []*api.Node
for _, node := range nodes {
if na.IsNodeAllocated(node) {
continue
}
if node.Attachment == nil {
node.Attachment = &api.NetworkAttachment{}
}
node.Attachment.Network = nc.ingressNetwork.Copy()
if err := a.allocateNode(ctx, node); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s during init", node.ID)
continue
}
allocatedNodes = append(allocatedNodes, node)
}
if _, err := a.store.Batch(func(batch *store.Batch) error {
for _, node := range allocatedNodes {
if err := a.commitAllocatedNode(ctx, batch, node); err != nil {
log.G(ctx).WithError(err).Errorf("Failed to commit allocation of network resources for node %s during init", node.ID)
}
}
return nil
}); err != nil {
开发者ID:Mic92,项目名称:docker,代码行数:67,代码来源:network.go
示例14: TestRaftForceNewCluster
func TestRaftForceNewCluster(t *testing.T) {
t.Parallel()
nodes, clockSource := raftutils.NewRaftCluster(t, tc)
// Propose a value
values := make([]*api.Node, 2)
var err error
values[0], err = raftutils.ProposeValue(t, nodes[1], "id1")
assert.NoError(t, err, "failed to propose value")
// The memberlist should contain 3 members on each node
for i := 1; i <= 3; i++ {
assert.Equal(t, len(nodes[uint64(i)].GetMemberlist()), 3)
}
// Stop all nodes
for _, node := range nodes {
node.Server.Stop()
node.Shutdown()
}
raftutils.AdvanceTicks(clockSource, 5)
toClean := map[uint64]*raftutils.TestNode{
2: nodes[2],
3: nodes[3],
}
raftutils.TeardownCluster(t, toClean)
delete(nodes, 2)
delete(nodes, 3)
// Only restart the first node with force-new-cluster option
nodes[1] = raftutils.RestartNode(t, clockSource, nodes[1], true)
raftutils.WaitForCluster(t, clockSource, nodes)
// The memberlist should contain only one node (self)
assert.Equal(t, len(nodes[1].GetMemberlist()), 1)
// Add 2 more members
nodes[2] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc)
raftutils.WaitForCluster(t, clockSource, nodes)
nodes[3] = raftutils.NewJoinNode(t, clockSource, nodes[1].Address, tc)
raftutils.WaitForCluster(t, clockSource, nodes)
newCluster := map[uint64]*raftutils.TestNode{
1: nodes[1],
2: nodes[2],
3: nodes[3],
}
defer raftutils.TeardownCluster(t, newCluster)
// The memberlist should contain 3 members on each node
for i := 1; i <= 3; i++ {
assert.Equal(t, len(nodes[uint64(i)].GetMemberlist()), 3)
}
// Propose another value
values[1], err = raftutils.ProposeValue(t, raftutils.Leader(nodes), "id2")
assert.NoError(t, err, "failed to propose value")
for _, node := range nodes {
assert.NoError(t, raftutils.PollFunc(clockSource, func() error {
var err error
node.MemoryStore().View(func(tx store.ReadTx) {
var allNodes []*api.Node
allNodes, err = store.FindNodes(tx, store.All)
if err != nil {
return
}
if len(allNodes) != 2 {
err = fmt.Errorf("expected 2 nodes, got %d", len(allNodes))
return
}
for i, nodeID := range []string{"id1", "id2"} {
n := store.GetNode(tx, nodeID)
if !reflect.DeepEqual(n, values[i]) {
err = fmt.Errorf("node %s did not match expected value", nodeID)
return
}
}
})
return err
}))
}
}
开发者ID:ChristianKniep,项目名称:swarmkit,代码行数:88,代码来源:raft_test.go
示例15: markNodesUnknown
func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
log := log.G(ctx).WithField("method", "(*Dispatcher).markNodesUnknown")
var nodes []*api.Node
var err error
d.store.View(func(tx store.ReadTx) {
nodes, err = store.FindNodes(tx, store.All)
})
if err != nil {
return errors.Wrap(err, "failed to get list of nodes")
}
_, err = d.store.Batch(func(batch *store.Batch) error {
for _, n := range nodes {
err := batch.Update(func(tx store.Tx) error {
// check if node is still here
node := store.GetNode(tx, n.ID)
if node == nil {
return nil
}
// do not try to resurrect down nodes
if node.Status.State == api.NodeStatus_DOWN {
nodeCopy := node
expireFunc := func() {
if err := d.moveTasksToOrphaned(nodeCopy.ID); err != nil {
log.WithError(err).Error(`failed to move all tasks to "ORPHANED" state`)
}
d.downNodes.Delete(nodeCopy.ID)
}
d.downNodes.Add(nodeCopy, expireFunc)
return nil
}
node.Status.State = api.NodeStatus_UNKNOWN
node.Status.Message = `Node moved to "unknown" state due to leadership change in cluster`
nodeID := node.ID
expireFunc := func() {
log := log.WithField("node", nodeID)
log.Debugf("heartbeat expiration for unknown node")
if err := d.markNodeNotReady(nodeID, api.NodeStatus_DOWN, `heartbeat failure for node in "unknown" state`); err != nil {
log.WithError(err).Errorf(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
}
}
if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
return errors.Wrap(err, `adding node in "unknown" state to node store failed`)
}
if err := store.UpdateNode(tx, node); err != nil {
return errors.Wrap(err, "update failed")
}
return nil
})
if err != nil {
log.WithField("node", n.ID).WithError(err).Errorf(`failed to move node to "unknown" state`)
}
}
return nil
})
return err
}
开发者ID:yongtang,项目名称:swarmkit,代码行数:61,代码来源:dispatcher.go
示例16: Run
// Run contains the GlobalOrchestrator event loop
func (g *GlobalOrchestrator) Run(ctx context.Context) error {
defer close(g.doneChan)
// Watch changes to services and tasks
queue := g.store.WatchQueue()
watcher, cancel := queue.Watch()
defer cancel()
// Get list of nodes
var (
nodes []*api.Node
err error
)
g.store.View(func(readTx store.ReadTx) {
nodes, err = store.FindNodes(readTx, store.All)
})
if err != nil {
return err
}
for _, n := range nodes {
// if a node is in drain state, do not add it
if isValidNode(n) {
g.nodes[n.ID] = struct{}{}
}
}
// Lookup global services
var existingServices []*api.Service
g.store.View(func(readTx store.ReadTx) {
existingServices, err = store.FindServices(readTx, store.All)
})
if err != nil {
return err
}
for _, s := range existingServices {
if isGlobalService(s) {
g.globalServices[s.ID] = s
g.reconcileOneService(ctx, s)
}
}
for {
select {
case event := <-watcher:
// TODO(stevvooe): Use ctx to limit running time of operation.
switch v := event.(type) {
case state.EventCreateService:
if !isGlobalService(v.Service) {
continue
}
g.globalServices[v.Service.ID] = v.Service
g.reconcileOneService(ctx, v.Service)
case state.EventUpdateService:
if !isGlobalService(v.Service) {
continue
}
g.globalServices[v.Service.ID] = v.Service
g.reconcileOneService(ctx, v.Service)
case state.EventDeleteService:
if !isGlobalService(v.Service) {
continue
}
deleteServiceTasks(ctx, g.store, v.Service)
// delete the service from service map
delete(g.globalServices, v.Service.ID)
g.restarts.ClearServiceHistory(v.Service.ID)
case state.EventCreateNode:
g.reconcileOneNode(ctx, v.Node)
case state.EventUpdateNode:
switch v.Node.Status.State {
// NodeStatus_DISCONNECTED is a transient state, no need to make any change
case api.NodeStatus_DOWN:
g.removeTasksFromNode(ctx, v.Node)
case api.NodeStatus_READY:
// node could come back to READY from DOWN or DISCONNECT
g.reconcileOneNode(ctx, v.Node)
}
case state.EventDeleteNode:
g.removeTasksFromNode(ctx, v.Node)
delete(g.nodes, v.Node.ID)
case state.EventUpdateTask:
if _, exists := g.globalServices[v.Task.ServiceID]; !exists {
continue
}
// global orchestrator needs to inspect when a task has terminated
// it should ignore tasks whose DesiredState is past running, which
// means the task has been processed
if isTaskTerminated(v.Task) {
g.restartTask(ctx, v.Task.ID, v.Task.ServiceID)
}
case state.EventDeleteTask:
// CLI allows deleting task
if _, exists := g.globalServices[v.Task.ServiceID]; !exists {
continue
}
g.reconcileServiceOneNode(ctx, v.Task.ServiceID, v.Task.NodeID)
}
case <-g.stopChan:
return nil
//.........这里部分代码省略.........
开发者ID:CadeLaRen,项目名称:docker-3,代码行数:101,代码来源:global.go
示例17: Run
// Run runs the CA signer main loop.
// The CA signer can be stopped with cancelling ctx or calling Stop().
func (s *Server) Run(ctx context.Context) error {
s.mu.Lock()
if s.isRunning() {
s.mu.Unlock()
return fmt.Errorf("CA signer is already running")
}
s.wg.Add(1)
s.mu.Unlock()
defer s.wg.Done()
logger := log.G(ctx).WithField("module", "ca")
ctx = log.WithLogger(ctx, logger)
// Run() should never be called twice, but just in case, we're
// attempting to close the started channel in a safe way
select {
case <-s.started:
return fmt.Errorf("CA server cannot be started more than once")
default:
close(s.started)
}
// Retrieve the channels to keep track of changes in the cluster
// Retrieve all the currently registered nodes
var nodes []*api.Node
updates, cancel, err := store.ViewAndWatch(
s.store,
func(readTx store.ReadTx) error {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err != nil {
return err
}
if len(clusters) != 1 {
return fmt.Errorf("could not find cluster object")
}
s.updateCluster(ctx, clusters[0])
nodes, err = store.FindNodes(readTx, store.All)
return err
},
state.EventCreateNode{},
state.EventUpdateNode{},
state.EventUpdateCluster{},
)
// Do this after updateCluster has been called, so isRunning never
// returns true without joinTokens being set correctly.
s.mu.Lock()
s.ctx, s.cancel = context.WithCancel(ctx)
s.mu.Unlock()
if err != nil {
log.G(ctx).WithFields(logrus.Fields{
"method": "(*Server).Run",
}).WithError(err).Errorf("snapshot store view failed")
return err
}
defer cancel()
// We might have missed some updates if there was a leader election,
// so let's pick up the slack.
if err := s.reconcileNodeCertificates(ctx, nodes); err != nil {
// We don't return here because that means the Run loop would
// never run. Log an error instead.
log.G(ctx).WithFields(logrus.Fields{
"method": "(*Server).Run",
}).WithError(err).Errorf("error attempting to reconcile certificates")
}
// Watch for new nodes being created, new nodes being updated, and changes
// to the cluster
for {
select {
case event := <-updates:
switch v := event.(type) {
case state.EventCreateNode:
s.evaluateAndSignNodeCert(ctx, v.Node)
case state.EventUpdateNode:
// If this certificate is already at a final state
// no need to evaluate and sign it.
if !isFinalState(v.Node.Certificate.Status) {
s.evaluateAndSignNodeCert(ctx, v.Node)
}
case state.EventUpdateCluster:
s.updateCluster(ctx, v.Cluster)
}
case <-ctx.Done():
return ctx.Err()
case <-s.ctx.Done():
return nil
}
}
}
开发者ID:Chandra-TechPassionate,项目名称:docker,代码行数:96,代码来源:server.go
示例18: doNetworkInit
func (a *Allocator) doNetworkInit(ctx context.Context) error {
na, err := networkallocator.New()
if err != nil {
return err
}
nc := &networkContext{
nwkAllocator: na,
unallocatedTasks: make(map[string]*api.Task),
unallocatedServices: make(map[string]*api.Service),
unallocatedNetworks: make(map[string]*api.Network),
}
// Check if we have the ingress network. If not found create
// it before reading all network objects for allocation.
var networks []*api.Network
a.store.View(func(tx store.ReadTx) {
networks, err = store.FindNetworks(tx, store.ByName(ingressNetworkName))
if len(networks) > 0 {
ingressNetwork = networks[0]
}
})
if err != nil {
return fmt.Errorf("failed to find ingress network during init: %v", err)
}
// If ingress network is not found, create one right away
// using the predefined template.
if len(networks) == 0 {
if err := a.store.Update(func(tx store.Tx) error {
ingressNetwork.ID = identity.NewID()
if err := store.CreateNetwork(tx, ingressNetwork); err != nil {
return err
}
return nil
}); err != nil {
return fmt.Errorf("failed to create ingress network: %v", err)
}
a.store.View(func(tx store.ReadTx) {
networks, err = store.FindNetworks(tx, store.ByName(ingressNetworkName))
if len(networks) > 0 {
ingressNetwork = networks[0]
}
})
if err != nil {
return fmt.Errorf("failed to find ingress network after creating it: %v", err)
}
}
// Try to complete ingress network allocation before anything else so
// that the we can get the preferred subnet for ingress
// network.
if !na.IsAllocated(ingressNetwork) {
if err := a.allocateNetwork(ctx, nc, ingressNetwork); err != nil {
log.G(ctx).Errorf("failed allocating ingress network during init: %v", err)
}
// Update store after allocation
if err := a.store.Update(func(tx store.Tx) error {
if err := store.UpdateNetwork(tx, ingressNetwork); err != nil {
return err
}
return nil
}); err != nil {
return fmt.Errorf("failed to create ingress network: %v", err)
}
}
// Allocate networks in the store so far before we started
// watching.
a.store.View(func(tx store.ReadTx) {
networks, err = store.FindNetworks(tx, store.All)
})
if err != nil {
return fmt.Errorf("error listing all networks in store while trying to allocate during init: %v", err)
}
for _, n := range networks {
if na.IsAllocated(n) {
continue
}
if err := a.allocateNetwork(ctx, nc, n); err != nil {
log.G(ctx).Errorf("failed allocating network %s during init: %v", n.ID, err)
}
}
// Allocate nodes in the store so far before we process watched events.
var nodes []*api.Node
a.store.View(func(tx store.ReadTx) {
nodes, err = store.FindNodes(tx, store.All)
})
if err != nil {
return fmt.Errorf("error listing all services in store while trying to allocate during init: %v", err)
}
//.........这里部分代码省略.........
开发者ID:CadeLaRen,项目名称:docker-3,代码行数:101,代码来源:network.go
示例19: Run
// Run contains the global orchestrator event loop
func (g *Orchestrator) Run(ctx context.Context) error {
defer close(g.doneChan)
// Watch changes to services and tasks
queue := g.store.WatchQueue()
watcher, cancel := queue.Watch()
defer cancel()
// lookup the cluster
var err error
g.store.View(func(readTx store.ReadTx) {
var clusters []*api.Cluster
clusters, err = store.FindClusters(readTx, store.ByName("default"))
if len(clusters) != 1 {
return // just pick up the cluster when it is created.
}
g.cluster = clusters[0]
})
if err != nil {
return err
}
// Get list of nodes
var nodes []*api.Node
g.store.View(func(readTx store.ReadTx) {
nodes, err = store.FindNodes(readTx, store.All)
})
if err != nil {
return err
}
for _, n := range nodes {
g.updateNode(n)
}
// Lookup global services
var existingServices []*api.Service
g.store.View(func(readTx store.ReadTx) {
existingServices, err = store.FindServices(readTx, store.All)
})
if err != nil {
return err
}
var reconcileServiceIDs []string
for _, s := range existingServices {
if orchestrator.IsGlobalService(s) {
g.updateService(s)
reconcileServiceIDs = append(reconcileServiceIDs, s.ID)
}
}
g.reconcileServices(ctx, reconcileServiceIDs)
for {
select {
case event := <-watcher:
// TODO(stevvooe): Use ctx to limit running time of operation.
switch v := event.(type) {
case state.EventUpdateCluster:
g.cluster = v.Cluster
case state.EventCreateService:
if !orchestrator.IsGlobalService(v.Service) {
continue
}
g.updateService(v.Service)
g.reconcileServices(ctx, []string{v.Service.ID})
case state.EventUpdateService:
if !orchestrator.IsGlobalService(v.Service) {
continue
}
g.updateService(v.Service)
g.reconcileServices(ctx, []string{v.Service.ID})
case state.EventDeleteService:
if !orchestrator.IsGlobalService(v.Service) {
continue
}
orchestrator.DeleteServiceTasks(ctx, g.store, v.Service)
// delete the service from service map
delete(g.globalServices, v.Service.ID)
g.restarts.ClearServiceHistory(v.Service.ID)
case state.EventCreateNode:
g.updateNode(v.Node)
g.reconcileOneNode(ctx, v.Node)
case state.EventUpdateNode:
g.updateNode(v.Node)
switch v.Node.Status.State {
// NodeStatus_DISCONNECTED is a transient state, no need to make any change
case api.NodeStatus_DOWN:
g.removeTasksFromNode(ctx, v.Node)
case api.NodeStatus_READY:
// node could come back to READY from DOWN or DISCONNECT
g.reconcileOneNode(ctx, v.Node)
}
case state.EventDeleteNode:
g.removeTasksFromNode(ctx, v.Node)
delete(g.nodes, v.Node.ID)
case state.EventUpdateTask:
if _, exists := g.globalServices[v.Task.ServiceID]; !exists {
continue
//.........这里部分代码省略.........
开发者ID:msabansal,项目名称:docker,代码行数:101,代码来源:global.go
示例20: Run
// Run runs the CA signer main loop.
// The CA signer can be stopped with cancelling ctx or calling Stop().
func (s *Server) Run(ctx context.Context) error {
s.mu.Lock()
if s.isRunning() {
s.mu.Unlock()
return errors.New("CA signer is already running")
}
s.wg.Add(1)
s.mu.Unlock()
defer s.wg.Done()
ctx = log.WithModule(ctx, "ca")
// Retrieve the channels to keep track of changes in the cluster
// Retrieve all the currently registered nodes
var nodes []*api.Node
updates, cancel, err := store.ViewAndWatch(
s.store,
func(readTx store.ReadTx) error {
clusters, err := store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
if err != nil {
return err
}
if len(clusters) != 1 {
return errors.New("could not find cluster object")
}
s.updateCluster(ctx, clusters[0])
nodes, err = store.FindNodes(readTx, store.All)
return err
},
state.EventCreateNode{},
state.EventUpdateNode{},
state.EventUpdateCluster{},
)
// Do this after updateCluster has been called, so isRunning never
// return
|
请发表评论