本文整理汇总了Golang中github.com/docker/swarmkit/manager/state/store.FindTasks函数的典型用法代码示例。如果您正苦于以下问题:Golang FindTasks函数的具体用法?Golang FindTasks怎么用?Golang FindTasks使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FindTasks函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: removeTasksFromNode
func (g *GlobalOrchestrator) removeTasksFromNode(ctx context.Context, node *api.Node) {
var (
tasks []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed finding tasks")
return
}
_, err = g.store.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
// GlobalOrchestrator only removes tasks from globalServices
if _, exists := g.globalServices[t.ServiceID]; exists {
g.removeTask(ctx, batch, t)
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed")
}
}
开发者ID:CadeLaRen,项目名称:docker-3,代码行数:26,代码来源:global.go
示例2: deleteServiceTasks
func deleteServiceTasks(ctx context.Context, s *store.MemoryStore, service *api.Service) {
var (
tasks []*api.Task
err error
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to list tasks")
return
}
_, err = s.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
err := batch.Update(func(tx store.Tx) error {
if err := store.DeleteTask(tx, t.ID); err != nil {
log.G(ctx).WithError(err).Errorf("failed to delete task")
}
return nil
})
if err != nil {
return err
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("task search transaction failed")
}
}
开发者ID:CWSpear,项目名称:docker,代码行数:31,代码来源:replicated.go
示例3: reconcileServiceOneNode
// reconcileServiceOneNode checks one service on one node
func (g *GlobalOrchestrator) reconcileServiceOneNode(ctx context.Context, serviceID string, nodeID string) {
_, exists := g.nodes[nodeID]
if !exists {
return
}
service, exists := g.globalServices[serviceID]
if !exists {
return
}
// the node has completed this servie
completed := false
// tasks for this node and service
var (
tasks []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
var tasksOnNode []*api.Task
tasksOnNode, err = store.FindTasks(tx, store.ByNodeID(nodeID))
if err != nil {
return
}
for _, t := range tasksOnNode {
// only interested in one service
if t.ServiceID != serviceID {
continue
}
if isTaskRunning(t) {
tasks = append(tasks, t)
} else {
if isTaskCompleted(t, restartCondition(t)) {
completed = true
}
}
}
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcile failed finding tasks")
return
}
_, err = g.store.Batch(func(batch *store.Batch) error {
// if restart policy considers this node has finished its task
// it should remove all running tasks
if completed {
g.removeTasks(ctx, batch, service, tasks)
return nil
}
// this node needs to run 1 copy of the task
if len(tasks) == 0 {
g.addTask(ctx, batch, service, nodeID)
} else {
g.removeTasks(ctx, batch, service, tasks[1:])
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServiceOneNode batch failed")
}
}
开发者ID:CadeLaRen,项目名称:docker-3,代码行数:61,代码来源:global.go
示例4: getRunnableAndDeadSlots
// getRunnableAndDeadSlots returns two maps of slots. The first contains slots
// that have at least one task with a desired state above NEW and lesser or
// equal to RUNNING. The second is for slots that only contain tasks with a
// desired state above RUNNING.
func getRunnableAndDeadSlots(s *store.MemoryStore, serviceID string) (map[uint64]slot, map[uint64]slot, error) {
var (
tasks []*api.Task
err error
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(serviceID))
})
if err != nil {
return nil, nil, err
}
runningSlots := make(map[uint64]slot)
for _, t := range tasks {
if t.DesiredState <= api.TaskStateRunning {
runningSlots[t.Slot] = append(runningSlots[t.Slot], t)
}
}
deadSlots := make(map[uint64]slot)
for _, t := range tasks {
if _, exists := runningSlots[t.Slot]; !exists {
deadSlots[t.Slot] = append(deadSlots[t.Slot], t)
}
}
return runningSlots, deadSlots, nil
}
开发者ID:JMesser81,项目名称:docker,代码行数:32,代码来源:services.go
示例5: reconcileOneService
func (g *GlobalOrchestrator) reconcileOneService(ctx context.Context, service *api.Service) {
var (
tasks []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileOneService failed finding tasks")
return
}
// a node may have completed this service
nodeCompleted := make(map[string]struct{})
// nodeID -> task list
nodeTasks := make(map[string][]*api.Task)
for _, t := range tasks {
if isTaskRunning(t) {
// Collect all running instances of this service
nodeTasks[t.NodeID] = append(nodeTasks[t.NodeID], t)
} else {
// for finished tasks, check restartPolicy
if isTaskCompleted(t, restartCondition(t)) {
nodeCompleted[t.NodeID] = struct{}{}
}
}
}
_, err = g.store.Batch(func(batch *store.Batch) error {
var updateTasks []*api.Task
for nodeID := range g.nodes {
ntasks := nodeTasks[nodeID]
// if restart policy considers this node has finished its task
// it should remove all running tasks
if _, exists := nodeCompleted[nodeID]; exists {
g.removeTasks(ctx, batch, service, ntasks)
return nil
}
// this node needs to run 1 copy of the task
if len(ntasks) == 0 {
g.addTask(ctx, batch, service, nodeID)
} else {
updateTasks = append(updateTasks, ntasks[0])
g.removeTasks(ctx, batch, service, ntasks[1:])
}
}
if len(updateTasks) > 0 {
g.updater.Update(ctx, service, updateTasks)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileOneService transaction failed")
}
}
开发者ID:CadeLaRen,项目名称:docker-3,代码行数:56,代码来源:global.go
示例6: reconcile
func (r *ReplicatedOrchestrator) reconcile(ctx context.Context, service *api.Service) {
var (
tasks []*api.Task
err error
)
r.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("reconcile failed finding tasks")
return
}
runningTasks := make([]*api.Task, 0, len(tasks))
runningInstances := make(map[uint64]struct{}) // this could be a bitfield...
for _, t := range tasks {
// Technically the check below could just be
// t.DesiredState <= api.TaskStateRunning, but ignoring tasks
// with DesiredState == NEW simplifies the drainer unit tests.
if t.DesiredState > api.TaskStateNew && t.DesiredState <= api.TaskStateRunning {
runningTasks = append(runningTasks, t)
runningInstances[t.Slot] = struct{}{}
}
}
numTasks := len(runningTasks)
deploy := service.Spec.GetMode().(*api.ServiceSpec_Replicated)
specifiedInstances := int(deploy.Replicated.Replicas)
// TODO(aaronl): Add support for restart delays.
_, err = r.store.Batch(func(batch *store.Batch) error {
switch {
case specifiedInstances > numTasks:
log.G(ctx).Debugf("Service %s was scaled up from %d to %d instances", service.ID, numTasks, specifiedInstances)
// Update all current tasks then add missing tasks
r.updater.Update(ctx, service, runningTasks)
r.addTasks(ctx, batch, service, runningInstances, specifiedInstances-numTasks)
case specifiedInstances < numTasks:
// Update up to N tasks then remove the extra
log.G(ctx).Debugf("Service %s was scaled down from %d to %d instances", service.ID, numTasks, specifiedInstances)
r.updater.Update(ctx, service, runningTasks[:specifiedInstances])
r.removeTasks(ctx, batch, service, runningTasks[specifiedInstances:])
case specifiedInstances == numTasks:
// Simple update, no scaling - update all tasks.
r.updater.Update(ctx, service, runningTasks)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("reconcile batch failed")
}
}
开发者ID:amitshukla,项目名称:docker,代码行数:56,代码来源:services.go
示例7: getRunnableServiceTasks
func getRunnableServiceTasks(t *testing.T, s *store.MemoryStore, service *api.Service) []*api.Task {
var (
err error
tasks []*api.Task
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
assert.NoError(t, err)
runnable := []*api.Task{}
for _, task := range tasks {
if task.DesiredState == api.TaskStateRunning {
runnable = append(runnable, task)
}
}
return runnable
}
开发者ID:ChristianKniep,项目名称:swarmkit,代码行数:19,代码来源:updater_test.go
示例8: RemoveNetwork
// RemoveNetwork removes a Network referenced by NetworkID.
// - Returns `InvalidArgument` if NetworkID is not provided.
// - Returns `NotFound` if the Network is not found.
// - Returns an error if the deletion fails.
func (s *Server) RemoveNetwork(ctx context.Context, request *api.RemoveNetworkRequest) (*api.RemoveNetworkResponse, error) {
if request.NetworkID == "" {
return nil, grpc.Errorf(codes.InvalidArgument, errInvalidArgument.Error())
}
err := s.store.Update(func(tx store.Tx) error {
services, err := store.FindServices(tx, store.ByReferencedNetworkID(request.NetworkID))
if err != nil {
return grpc.Errorf(codes.Internal, "could not find services using network %s: %v", request.NetworkID, err)
}
if len(services) != 0 {
return grpc.Errorf(codes.FailedPrecondition, "network %s is in use by service %s", request.NetworkID, services[0].ID)
}
tasks, err := store.FindTasks(tx, store.ByReferencedNetworkID(request.NetworkID))
if err != nil {
return grpc.Errorf(codes.Internal, "could not find tasks using network %s: %v", request.NetworkID, err)
}
if len(tasks) != 0 {
return grpc.Errorf(codes.FailedPrecondition, "network %s is in use by task %s", request.NetworkID, tasks[0].ID)
}
nw := store.GetNetwork(tx, request.NetworkID)
if _, ok := nw.Spec.Annotations.Labels["com.docker.swarm.internal"]; ok {
networkDescription := nw.ID
if nw.Spec.Annotations.Name != "" {
networkDescription = fmt.Sprintf("%s (%s)", nw.Spec.Annotations.Name, nw.ID)
}
return grpc.Errorf(codes.PermissionDenied, "%s is a pre-defined network and cannot be removed", networkDescription)
}
return store.DeleteNetwork(tx, request.NetworkID)
})
if err != nil {
if err == store.ErrNotExist {
return nil, grpc.Errorf(codes.NotFound, "network %s not found", request.NetworkID)
}
return nil, err
}
return &api.RemoveNetworkResponse{}, nil
}
开发者ID:yongtang,项目名称:swarmkit,代码行数:46,代码来源:network.go
示例9: match
func (s *subscription) match() {
s.mu.Lock()
defer s.mu.Unlock()
add := func(t *api.Task) {
if t.NodeID == "" {
s.pendingTasks[t.ID] = struct{}{}
return
}
if _, ok := s.nodes[t.NodeID]; !ok {
s.nodes[t.NodeID] = struct{}{}
s.wg.Add(1)
}
}
s.store.View(func(tx store.ReadTx) {
for _, nid := range s.message.Selector.NodeIDs {
s.nodes[nid] = struct{}{}
}
for _, tid := range s.message.Selector.TaskIDs {
if task := store.GetTask(tx, tid); task != nil {
add(task)
}
}
for _, sid := range s.message.Selector.ServiceIDs {
tasks, err := store.FindTasks(tx, store.ByServiceID(sid))
if err != nil {
log.L.Warning(err)
continue
}
for _, task := range tasks {
add(task)
}
}
})
}
开发者ID:haoshuwei,项目名称:docker,代码行数:38,代码来源:subscription.go
示例10: restartTasksByNodeID
func (r *Orchestrator) restartTasksByNodeID(ctx context.Context, nodeID string) {
var err error
r.store.View(func(tx store.ReadTx) {
var tasks []*api.Task
tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
if err != nil {
return
}
for _, t := range tasks {
if t.DesiredState > api.TaskStateRunning {
continue
}
service := store.GetService(tx, t.ServiceID)
if orchestrator.IsReplicatedService(service) {
r.restartTasks[t.ID] = struct{}{}
}
}
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to list tasks to remove")
}
}
开发者ID:yongtang,项目名称:swarmkit,代码行数:23,代码来源:tasks.go
示例11: moveTasksToOrphaned
func (d *Dispatcher) moveTasksToOrphaned(nodeID string) error {
_, err := d.store.Batch(func(batch *store.Batch) error {
var (
tasks []*api.Task
err error
)
d.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
})
if err != nil {
return err
}
for _, task := range tasks {
if task.Status.State < api.TaskStateOrphaned {
task.Status.State = api.TaskStateOrphaned
}
if err := batch.Update(func(tx store.Tx) error {
err := store.UpdateTask(tx, task)
if err != nil {
return err
}
return nil
}); err != nil {
return err
}
}
return nil
})
return err
}
开发者ID:yongtang,项目名称:swarmkit,代码行数:37,代码来源:dispatcher.go
示例12: getRunnableSlots
// getRunnableSlots returns a map of slots that have at least one task with
// a desired state above NEW and lesser or equal to RUNNING.
func getRunnableSlots(s *store.MemoryStore, serviceID string) (map[uint64]slot, error) {
var (
tasks []*api.Task
err error
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(serviceID))
})
if err != nil {
return nil, err
}
runningSlots := make(map[uint64]slot)
for _, t := range tasks {
// Technically the check below could just be
// t.DesiredState <= api.TaskStateRunning, but ignoring tasks
// with DesiredState == NEW simplifies the drainer unit tests.
if t.DesiredState > api.TaskStateNew && t.DesiredState <= api.TaskStateRunning {
runningSlots[t.Slot] = append(runningSlots[t.Slot], t)
}
}
return runningSlots, nil
}
开发者ID:CWSpear,项目名称:docker,代码行数:26,代码来源:services.go
示例13: Tasks
// Tasks is a stream of tasks state for node. Each message contains full list
// of tasks which should be run on node, if task is not present in that list,
// it should be terminated.
func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServer) error {
nodeInfo, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
nodeID := nodeInfo.NodeID
if err := d.isRunningLocked(); err != nil {
return err
}
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).Tasks",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log.G(stream.Context()).WithFields(fields).Debugf("")
if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
tasksMap := make(map[string]*api.Task)
nodeTasks, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
if err != nil {
return err
}
for _, t := range tasks {
tasksMap[t.ID] = t
}
return nil
},
state.EventCreateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
)
if err != nil {
return err
}
defer cancel()
for {
if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
var tasks []*api.Task
for _, t := range tasksMap {
// dispatcher only sends tasks that have been assigned to a node
if t != nil && t.Status.State >= api.TaskStateAssigned {
tasks = append(tasks, t)
}
}
if err := stream.Send(&api.TasksMessage{Tasks: tasks}); err != nil {
return err
}
select {
case event := <-nodeTasks:
switch v := event.(type) {
case state.EventCreateTask:
tasksMap[v.Task.ID] = v.Task
case state.EventUpdateTask:
tasksMap[v.Task.ID] = v.Task
case state.EventDeleteTask:
delete(tasksMap, v.Task.ID)
}
case <-stream.Context().Done():
return stream.Context().Err()
case <-d.ctx.Done():
return d.ctx.Err()
}
}
}
开发者ID:ypjin,项目名称:swarmkit,代码行数:87,代码来源:dispatcher.go
示例14: tick
func (tr *TaskReaper) tick() {
if len(tr.dirty) == 0 {
return
}
defer func() {
tr.dirty = make(map[instanceTuple]struct{})
}()
var deleteTasks []string
tr.store.View(func(tx store.ReadTx) {
for dirty := range tr.dirty {
service := store.GetService(tx, dirty.serviceID)
if service == nil {
continue
}
taskHistory := tr.taskHistory
if taskHistory < 0 {
continue
}
var historicTasks []*api.Task
switch service.Spec.GetMode().(type) {
case *api.ServiceSpec_Replicated:
var err error
historicTasks, err = store.FindTasks(tx, store.BySlot(dirty.serviceID, dirty.instance))
if err != nil {
continue
}
case *api.ServiceSpec_Global:
tasksByNode, err := store.FindTasks(tx, store.ByNodeID(dirty.nodeID))
if err != nil {
continue
}
for _, t := range tasksByNode {
if t.ServiceID == dirty.serviceID {
historicTasks = append(historicTasks, t)
}
}
}
if int64(len(historicTasks)) <= taskHistory {
continue
}
// TODO(aaronl): This could filter for non-running tasks and use quickselect
// instead of sorting the whole slice.
sort.Sort(tasksByTimestamp(historicTasks))
for _, t := range historicTasks {
if t.DesiredState <= api.TaskStateRunning {
// Don't delete running tasks
continue
}
deleteTasks = append(deleteTasks, t.ID)
taskHistory++
if int64(len(historicTasks)) <= taskHistory {
break
}
}
}
})
if len(deleteTasks) > 0 {
tr.store.Batch(func(batch *store.Batch) error {
for _, taskID := range deleteTasks {
batch.Update(func(tx store.Tx) error {
return store.DeleteTask(tx, taskID)
})
}
return nil
})
}
}
开发者ID:BrickXu,项目名称:docker,代码行数:83,代码来源:task_reaper.go
示例15: reconcileServices
func (g *Orchestrator) reconcileServices(ctx context.Context, serviceIDs []string) {
nodeCompleted := make(map[string]map[string]struct{})
nodeTasks := make(map[string]map[string][]*api.Task)
g.store.View(func(tx store.ReadTx) {
for _, serviceID := range serviceIDs {
tasks, err := store.FindTasks(tx, store.ByServiceID(serviceID))
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices failed finding tasks for service %s", serviceID)
continue
}
// a node may have completed this service
nodeCompleted[serviceID] = make(map[string]struct{})
// nodeID -> task list
nodeTasks[serviceID] = make(map[string][]*api.Task)
for _, t := range tasks {
if isTaskRunning(t) {
// Collect all running instances of this service
nodeTasks[serviceID][t.NodeID] = append(nodeTasks[serviceID][t.NodeID], t)
} else {
// for finished tasks, check restartPolicy
if isTaskCompleted(t, orchestrator.RestartCondition(t)) {
nodeCompleted[serviceID][t.NodeID] = struct{}{}
}
}
}
}
})
_, err := g.store.Batch(func(batch *store.Batch) error {
var updateTasks []orchestrator.Slot
for _, serviceID := range serviceIDs {
if _, exists := nodeTasks[serviceID]; !exists {
continue
}
service := g.globalServices[serviceID]
for nodeID, node := range g.nodes {
meetsConstraints := constraint.NodeMatches(service.constraints, node)
ntasks := nodeTasks[serviceID][nodeID]
delete(nodeTasks[serviceID], nodeID)
// if restart policy considers this node has finished its task
// it should remove all running tasks
if _, exists := nodeCompleted[serviceID][nodeID]; exists || !meetsConstraints {
g.removeTasks(ctx, batch, ntasks)
continue
}
if node.Spec.Availability == api.NodeAvailabilityPause {
// the node is paused, so we won't add or update
// any tasks
continue
}
// this node needs to run 1 copy of the task
if len(ntasks) == 0 {
g.addTask(ctx, batch, service.Service, nodeID)
} else {
updateTasks = append(updateTasks, ntasks)
}
}
if len(updateTasks) > 0 {
g.updater.Update(ctx, g.cluster, service.Service, updateTasks)
}
// Remove any tasks assigned to nodes not found in g.nodes.
// These must be associated with nodes that are drained, or
// nodes that no longer exist.
for _, ntasks := range nodeTasks[serviceID] {
g.removeTasks(ctx, batch, ntasks)
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServices transaction failed")
}
}
开发者ID:msabansal,项目名称:docker,代码行数:82,代码来源:global.go
示例16: Assignments
// Assignments is a stream of assignments for a node. Each message contains
// either full list of tasks and secrets for the node, or an incremental update.
func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
nodeInfo, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
nodeID := nodeInfo.NodeID
dctx, err := d.isRunningLocked()
if err != nil {
return err
}
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).Assignments",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log := log.G(stream.Context()).WithFields(fields)
log.Debugf("")
if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
var (
sequence int64
appliesTo string
initial api.AssignmentsMessage
)
tasksMap := make(map[string]*api.Task)
tasksUsingSecret := make(map[string]map[string]struct{})
sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
sequence++
msg.AppliesTo = appliesTo
msg.ResultsIn = strconv.FormatInt(sequence, 10)
appliesTo = msg.ResultsIn
msg.Type = assignmentType
if err := stream.Send(&msg); err != nil {
return err
}
return nil
}
// returns a slice of new secrets to send down
addSecretsForTask := func(readTx store.ReadTx, t *api.Task) []*api.Secret {
container := t.Spec.GetContainer()
if container == nil {
return nil
}
var newSecrets []*api.Secret
for _, secretRef := range container.Secrets {
// Empty ID prefix will return all secrets. Bail if there is no SecretID
if secretRef.SecretID == "" {
log.Debugf("invalid secret reference")
continue
}
secretID := secretRef.SecretID
log := log.WithFields(logrus.Fields{
"secret.id": secretID,
"secret.name": secretRef.SecretName,
})
if len(tasksUsingSecret[secretID]) == 0 {
tasksUsingSecret[secretID] = make(map[string]struct{})
secrets, err := store.FindSecrets(readTx, store.ByIDPrefix(secretID))
if err != nil {
log.WithError(err).Errorf("error retrieving secret")
continue
}
if len(secrets) != 1 {
log.Debugf("secret not found")
continue
}
// If the secret was found and there was one result
// (there should never be more than one because of the
// uniqueness constraint), add this secret to our
// initial set that we send down.
newSecrets = append(newSecrets, secrets[0])
}
tasksUsingSecret[secretID][t.ID] = struct{}{}
}
return newSecrets
}
// TODO(aaronl): Also send node secrets that should be exposed to
// this node.
nodeTasks, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
//.........这里部分代码省略.........
开发者ID:yongtang,项目名称:swarmkit,代码行数:101,代码来源:dispatcher.go
示例17: Tasks
// Tasks is a stream of tasks state for node. Each message contains full list
// of tasks which should be run on node, if task is not present in that list,
// it should be terminated.
func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServer) error {
nodeInfo, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
nodeID := nodeInfo.NodeID
dctx, err := d.isRunningLocked()
if err != nil {
return err
}
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).Tasks",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log.G(stream.Context()).WithFields(fields).Debugf("")
if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
tasksMap := make(map[string]*api.Task)
nodeTasks, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
if err != nil {
return err
}
for _, t := range tasks {
tasksMap[t.ID] = t
}
return nil
},
state.EventCreateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
)
if err != nil {
return err
}
defer cancel()
for {
if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
var tasks []*api.Task
for _, t := range tasksMap {
// dispatcher only sends tasks that have been assigned to a node
if t != nil && t.Status.State >= api.TaskStateAssigned {
tasks = append(tasks, t)
}
}
if err := stream.Send(&api.TasksMessage{Tasks: tasks}); err != nil {
return err
}
// bursty events should be processed in batches and sent out snapshot
var (
modificationCnt int
batchingTimer *time.Timer
batchingTimeout <-chan time.Time
)
batchingLoop:
for modificationCnt < modificationBatchLimit {
select {
case event := <-nodeTasks:
switch v := event.(type) {
case state.EventCreateTask:
tasksMap[v.Task.ID] = v.Task
modificationCnt++
case state.EventUpdateTask:
if oldTask, exists := tasksMap[v.Task.ID]; exists {
// States ASSIGNED and below are set by the orchestrator/scheduler,
// not the agent, so tasks in these states need to be sent to the
// agent even if nothing else has changed.
if equality.TasksEqualStable(oldTask, v.Task) && v.Task.Status.State > api.TaskStateAssigned {
// this update should not trigger action at agent
tasksMap[v.Task.ID] = v.Task
continue
}
}
tasksMap[v.Task.ID] = v.Task
modificationCnt++
case state.EventDeleteTask:
//.........这里部分代码省略.........
开发者ID:yongtang,项目名称:swarmkit,代码行数:101,代码来源:dispatcher.go
示例18: doNetworkInit
//.........这里部分代码省略.........
node.Attachment.Network = ingressNetwork.Copy()
if err := a.allocateNode(ctx, nc, node); err != nil {
log.G(ctx).Errorf("Failed to allocate network resources for node %s during init: %v", node.ID, err)
}
}
// Allocate services in the store so far before we process watched events.
var services []*api.Service
a.store.View(func(tx store.ReadTx) {
services, err = store.FindServices(tx, store.All)
})
if err != nil {
return fmt.Errorf("error listing all services in store while trying to allocate during init: %v", err)
}
for _, s := range services {
if s.Spec.Endpoint == nil {
continue
}
if na.IsServiceAllocated(s) {
continue
}
if err := a.allocateService(ctx, nc, s); err != nil {
log.G(ctx).Errorf("failed allocating service %s during init: %v", s.ID, err)
}
}
// Allocate tasks in the store so far before we started watching.
var tasks []*api.Task
a.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.All)
})
if err != nil {
return fmt.Errorf("error listing all tasks in store while trying to allocate during init: %v", err)
}
if _, err := a.store.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
if taskDead(t) {
continue
}
var s *api.Service
if t.ServiceID != "" {
a.store.View(func(tx store.ReadTx) {
s = store.GetService(tx, t.ServiceID)
})
}
// Populate network attachments in the task
// based on service spec.
a.taskCreateNetworkAttachments(t, s)
if taskReadyForNetworkVote(t, s, nc) {
if t.Status.State >= api.TaskStateAllocated {
continue
}
if a.taskAllocateVote(networkVoter, t.ID) {
// If the task is not attached to any network, network
// allocators job is done. Immediately cast a vote so
// that the task can be moved to ALLOCATED state as
// soon as possible.
开发者ID:CadeLaRen,项目名称:docker-3,代码行数:67,代码来源:network.go
示例19: shutdownNoncompliantTasks
func (ce *ConstraintEnforcer) shutdownNoncompliantTasks(node *api.Node) {
// If the availability is "drain", the orchestrator will
// shut down all tasks.
// If the availability is "pause", we shouldn't touch
// the tasks on this node.
if node.Spec.Availability != api.NodeAvailabilityActive {
return
}
var (
tasks []*api.Task
err error
)
ce.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID))
})
if err != nil {
log.L.WithError(err).Errorf("failed to list tasks for node ID %s", node.ID)
}
var availableMemoryBytes, availableNanoCPUs int64
if node.Description != nil && node.Description.Resources != nil {
availableMemoryBytes = node.Description.Resources.MemoryBytes
availableNanoCPUs = node.Description.Resources.NanoCPUs
}
removeTasks := make(map[string]*api.Task)
// TODO(aaronl): The set of tasks removed will be
// nondeterministic because it depends on the order of
// the slice returned from FindTasks. We could do
// a separate pass over the tasks for each type of
// resource, and sort by the size of the reservation
// to remove the most resource-intensive tasks.
for _, t := range tasks {
if t.DesiredState < api.TaskStateAssigned || t.DesiredState > api.TaskStateRunning {
continue
}
// Ensure that the task still meets scheduling
// constraints.
if t.Spec.Placement != nil && len(t.Spec.Placement.Constraints) != 0 {
constraints, _ := constraint.Parse(t.Spec.Placement.Constraints)
if !constraint.NodeMatches(constraints, node) {
removeTasks[t.ID] = t
continue
}
}
// Ensure that the task assigned to the node
// still satisfies the resource limits.
if t.Spec.Resources != nil && t.Spec.Resources.Reservations != nil {
if t.Spec.Resources.Reservations.MemoryBytes > availableMemoryBytes {
removeTasks[t.ID] = t
continue
}
if t.Spec.Resources.Reservations.NanoCPUs > availableNanoCPUs {
removeTasks[t.ID] = t
continue
}
availableMemoryBytes -= t.Spec.Resources.Reservations.MemoryBytes
availableNanoCPUs -= t.Spec.Resources.Reservations.NanoCPUs
}
}
if len(removeTasks) != 0 {
_, err := ce.store.Batch(func(batch *store.Batch) error {
for _, t := range removeTasks {
err := batch.Update(func(tx store.Tx) error {
t = store.GetTask(tx, t.ID)
if t == nil || t.DesiredState > api.TaskStateRunning {
return nil
}
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
})
if err != nil {
log.L.WithError(err).Errorf("failed to shut down task %s", t.ID)
}
}
return nil
})
if err != nil {
log.L.WithError(err).Errorf("failed to shut down tasks")
}
}
}
开发者ID:JMesser81,项目名称:docker,代码行数:91,代码来源:constraint_enforcer.go
示例20: initTasks
func (r *Orchestrator) initTasks(ctx context.Context, readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.All)
if err != nil {
return err
}
for _, t := range tasks {
if t.NodeID != "" {
n := store.GetNode(readTx, t.NodeID)
if invalidNode(n) && t.Status.State <= api.TaskStateRunning && t.DesiredState <= api.TaskStateRunning {
r.restartTasks[t.ID] = struct{}{}
}
}
}
_, err = r.store.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
if t.ServiceID == "" {
continue
}
// TODO(aluzzardi): We should NOT retrieve the service here.
service := store.GetService(readTx, t.ServiceID)
if service == nil {
// Service was deleted
err := batch.Update(func(tx store.Tx) error {
return store.DeleteTask(tx, t.ID)
})
if err != nil {
log.G(ctx).WithError(err).Error("failed to set task desired state to dead")
}
continue
}
// TODO(aluzzardi): This is shady. We should have a more generic condition.
if t.DesiredState != api.TaskStateReady || !orchestrator.IsReplicatedService(service) {
continue
}
restartDelay := orchestrator.DefaultRestartDelay
if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
var err error
restartDelay, err = gogotypes.DurationFromProto(t.Spec.Restart.Delay)
if err != nil {
log.G(ctx).WithError(err).Error("invalid restart delay")
restartDelay = orchestrator.DefaultRestartDelay
}
}
if restartDelay != 0 {
timestamp, err := gogotypes.TimestampFromProto(t.Status.Timestamp)
if err == nil {
restartTime := timestamp.Add(restartDelay)
calculatedRestartDelay := restartTime.Sub(time.Now())
if calculatedRestartDelay < restartDelay {
restartDelay = calculatedRestartDelay
}
if restartDelay > 0 {
_ = batch.Update(func(tx store.Tx) error {
t := store.GetTask(tx, t.ID)
// TODO(aluzzardi): This is shady as well. We should have a more generic condition.
if t == nil || t.DesiredState != api.TaskStateReady {
return nil
}
r.restarts.DelayStart(ctx, tx, nil, t.ID, restartDelay, true)
return nil
})
continue
}
} else {
log.G(ctx).WithError(err).Error("invalid status timestamp")
}
}
// Start now
err := batch.Update(func(tx store.Tx) error {
return r.restarts.StartNow(tx, t.ID)
})
if err != nil {
log.G(ctx).WithError(err).WithField("task.id", t.ID).Error("moving task out of delayed state failed")
}
}
return nil
})
return err
}
开发者ID:yongtang,项目名称:swarmkit,代码行数:83,代码来源:tasks.go
注:本文中的github.com/docker/swarmkit/manager/state/store.FindTasks函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论