本文整理汇总了Golang中github.com/docker/swarmkit/manager/state/store.UpdateTask函数的典型用法代码示例。如果您正苦于以下问题:Golang UpdateTask函数的具体用法?Golang UpdateTask怎么用?Golang UpdateTask使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了UpdateTask函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: useExistingTask
func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.Task) {
var removeTasks []*api.Task
for _, t := range slot {
if t != existing {
removeTasks = append(removeTasks, t)
}
}
if len(removeTasks) != 0 || existing.DesiredState != api.TaskStateRunning {
_, err := u.store.Batch(func(batch *store.Batch) error {
u.removeOldTasks(ctx, batch, removeTasks)
if existing.DesiredState != api.TaskStateRunning {
err := batch.Update(func(tx store.Tx) error {
t := store.GetTask(tx, existing.ID)
if t == nil {
return fmt.Errorf("task %s not found while trying to start it", existing.ID)
}
if t.DesiredState >= api.TaskStateRunning {
return fmt.Errorf("task %s was already started when reached by updater", existing.ID)
}
t.DesiredState = api.TaskStateRunning
return store.UpdateTask(tx, t)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("starting task %s failed", existing.ID)
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Error("updater batch transaction failed")
}
}
}
开发者ID:HuKeping,项目名称:docker,代码行数:34,代码来源:updater.go
示例2: removeOldTasks
// removeOldTasks shuts down the given tasks and returns one of the tasks that
// was shut down, or an error.
func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, removeTasks []*api.Task) (*api.Task, error) {
var (
lastErr error
removedTask *api.Task
)
for _, original := range removeTasks {
err := batch.Update(func(tx store.Tx) error {
t := store.GetTask(tx, original.ID)
if t == nil {
return fmt.Errorf("task %s not found while trying to shut it down", original.ID)
}
if t.DesiredState > api.TaskStateRunning {
return fmt.Errorf("task %s was already shut down when reached by updater", original.ID)
}
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
})
if err != nil {
lastErr = err
} else {
removedTask = original
}
}
if removedTask == nil {
return nil, lastErr
}
return removedTask, nil
}
开发者ID:CWSpear,项目名称:docker,代码行数:31,代码来源:updater.go
示例3: StartNow
// StartNow moves the task into the RUNNING state so it will proceed to start
// up.
func (r *Supervisor) StartNow(tx store.Tx, taskID string) error {
t := store.GetTask(tx, taskID)
if t == nil || t.DesiredState >= api.TaskStateRunning {
return nil
}
t.DesiredState = api.TaskStateRunning
return store.UpdateTask(tx, t)
}
开发者ID:Mic92,项目名称:docker,代码行数:10,代码来源:restart.go
示例4: commitAllocatedTask
func (a *Allocator) commitAllocatedTask(ctx context.Context, batch *store.Batch, t *api.Task) error {
return batch.Update(func(tx store.Tx) error {
err := store.UpdateTask(tx, t)
if err == store.ErrSequenceConflict {
storeTask := store.GetTask(tx, t.ID)
taskUpdateNetworks(storeTask, t.Networks)
taskUpdateEndpoint(storeTask, t.Endpoint)
if storeTask.Status.State < api.TaskStatePending {
storeTask.Status = t.Status
}
err = store.UpdateTask(tx, storeTask)
}
return errors.Wrapf(err, "failed updating state in store transaction for task %s", t.ID)
})
}
开发者ID:Mic92,项目名称:docker,代码行数:17,代码来源:network.go
示例5: updateTask
func (u *Updater) updateTask(ctx context.Context, original, updated *api.Task) error {
log.G(ctx).Debugf("replacing %s with %s", original.ID, updated.ID)
// Kick off the watch before even creating the updated task. This is in order to avoid missing any event.
taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{
Task: &api.Task{ID: updated.ID},
Checks: []state.TaskCheckFunc{state.TaskCheckID},
})
defer cancel()
var delayStartCh <-chan struct{}
// Atomically create the updated task and bring down the old one.
err := u.store.Update(func(tx store.Tx) error {
t := store.GetTask(tx, original.ID)
if t == nil {
return fmt.Errorf("task %s not found while trying to update it", original.ID)
}
if t.DesiredState > api.TaskStateRunning {
return fmt.Errorf("task %s was already shut down when reached by updater", original.ID)
}
t.DesiredState = api.TaskStateShutdown
if err := store.UpdateTask(tx, t); err != nil {
return err
}
if err := store.CreateTask(tx, updated); err != nil {
return err
}
// Wait for the old task to stop or time out, and then set the new one
// to RUNNING.
delayStartCh = u.restarts.DelayStart(ctx, tx, original, updated.ID, 0, true)
return nil
})
if err != nil {
return err
}
<-delayStartCh
// Wait for the new task to come up.
// TODO(aluzzardi): Consider adding a timeout here.
for {
select {
case e := <-taskUpdates:
updated = e.(state.EventUpdateTask).Task
if updated.Status.State >= api.TaskStateRunning {
return nil
}
case <-u.stopChan:
return nil
}
}
}
开发者ID:Chandra-TechPassionate,项目名称:docker,代码行数:55,代码来源:updater.go
示例6: processTaskUpdates
func (d *Dispatcher) processTaskUpdates() {
d.taskUpdatesLock.Lock()
if len(d.taskUpdates) == 0 {
d.taskUpdatesLock.Unlock()
return
}
taskUpdates := d.taskUpdates
d.taskUpdates = make(map[string]*api.TaskStatus)
d.taskUpdatesLock.Unlock()
log := log.G(d.ctx).WithFields(logrus.Fields{
"method": "(*Dispatcher).processTaskUpdates",
})
_, err := d.store.Batch(func(batch *store.Batch) error {
for taskID, status := range taskUpdates {
err := batch.Update(func(tx store.Tx) error {
logger := log.WithField("task.id", taskID)
task := store.GetTask(tx, taskID)
if task == nil {
logger.Errorf("task unavailable")
return nil
}
logger = logger.WithField("state.transition", fmt.Sprintf("%v->%v", task.Status.State, status.State))
if task.Status == *status {
logger.Debug("task status identical, ignoring")
return nil
}
if task.Status.State > status.State {
logger.Debug("task status invalid transition")
return nil
}
task.Status = *status
if err := store.UpdateTask(tx, task); err != nil {
logger.WithError(err).Error("failed to update task status")
return nil
}
logger.Debug("task status updated")
return nil
})
if err != nil {
log.WithError(err).Error("dispatcher transaction failed")
}
}
return nil
})
if err != nil {
log.WithError(err).Error("dispatcher batch failed")
}
}
开发者ID:ypjin,项目名称:swarmkit,代码行数:54,代码来源:dispatcher.go
示例7: removeTask
func (g *GlobalOrchestrator) removeTask(ctx context.Context, batch *store.Batch, t *api.Task) {
// set existing task DesiredState to TaskStateShutdown
// TODO(aaronl): optimistic update?
err := batch.Update(func(tx store.Tx) error {
t = store.GetTask(tx, t.ID)
if t != nil {
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: removeTask failed to remove %s", t.ID)
}
}
开发者ID:CadeLaRen,项目名称:docker-3,代码行数:15,代码来源:global.go
示例8: removeTasks
func (r *ReplicatedOrchestrator) removeTasks(ctx context.Context, batch *store.Batch, service *api.Service, tasks []*api.Task) {
for _, t := range tasks {
err := batch.Update(func(tx store.Tx) error {
// TODO(aaronl): optimistic update?
t = store.GetTask(tx, t.ID)
if t != nil {
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("removing task %s failed", t.ID)
}
}
}
开发者ID:amitshukla,项目名称:docker,代码行数:16,代码来源:services.go
示例9: removeOldTasks
func (u *Updater) removeOldTasks(ctx context.Context, batch *store.Batch, removeTasks []*api.Task) {
for _, original := range removeTasks {
err := batch.Update(func(tx store.Tx) error {
t := store.GetTask(tx, original.ID)
if t == nil {
return fmt.Errorf("task %s not found while trying to shut it down", original.ID)
}
if t.DesiredState > api.TaskStateRunning {
return fmt.Errorf("task %s was already shut down when reached by updater", original.ID)
}
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("shutting down stale task %s failed", original.ID)
}
}
}
开发者ID:HuKeping,项目名称:docker,代码行数:18,代码来源:updater.go
示例10: moveTasksToOrphaned
func (d *Dispatcher) moveTasksToOrphaned(nodeID string) error {
_, err := d.store.Batch(func(batch *store.Batch) error {
var (
tasks []*api.Task
err error
)
d.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
})
if err != nil {
return err
}
for _, task := range tasks {
if task.Status.State < api.TaskStateOrphaned {
task.Status.State = api.TaskStateOrphaned
}
if err := batch.Update(func(tx store.Tx) error {
err := store.UpdateTask(tx, task)
if err != nil {
return err
}
return nil
}); err != nil {
return err
}
}
return nil
})
return err
}
开发者ID:yongtang,项目名称:swarmkit,代码行数:37,代码来源:dispatcher.go
示例11: TestUpdaterRollback
func TestUpdaterRollback(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
var (
failImage1 uint32
failImage2 uint32
)
watchCreate, cancelCreate := state.Watch(s.WatchQueue(), state.EventCreateTask{})
defer cancelCreate()
watchServiceUpdate, cancelServiceUpdate := state.Watch(s.WatchQueue(), state.EventUpdateService{})
defer cancelServiceUpdate()
// Fail new tasks the updater tries to run
watchUpdate, cancelUpdate := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancelUpdate()
go func() {
failedLast := false
for {
select {
case e := <-watchUpdate:
task := e.(state.EventUpdateTask).Task
if task.DesiredState == task.Status.State {
continue
}
if task.DesiredState == api.TaskStateRunning && task.Status.State != api.TaskStateFailed && task.Status.State != api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
// Never fail two image2 tasks in a row, so there's a mix of
// failed and successful tasks for the rollback.
if task.Spec.GetContainer().Image == "image1" && atomic.LoadUint32(&failImage1) == 1 {
task.Status.State = api.TaskStateFailed
failedLast = true
} else if task.Spec.GetContainer().Image == "image2" && atomic.LoadUint32(&failImage2) == 1 && !failedLast {
task.Status.State = api.TaskStateFailed
failedLast = true
} else {
task.Status.State = task.DesiredState
failedLast = false
}
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
} else if task.DesiredState > api.TaskStateRunning {
err := s.Update(func(tx store.Tx) error {
task = store.GetTask(tx, task.ID)
task.Status.State = task.DesiredState
return store.UpdateTask(tx, task)
})
assert.NoError(t, err)
}
}
}
}()
// Create a service with four replicas specified before the orchestrator
// is started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
s1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{
Image: "image1",
},
},
Restart: &api.RestartPolicy{
Condition: api.RestartOnNone,
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 4,
},
},
Update: &api.UpdateConfig{
FailureAction: api.UpdateConfig_ROLLBACK,
Parallelism: 1,
Delay: *ptypes.DurationProto(10 * time.Millisecond),
Monitor: ptypes.DurationProto(500 * time.Millisecond),
MaxFailureRatio: 0.4,
},
},
}
assert.NoError(t, store.CreateService(tx, s1))
return nil
})
//.........这里部分代码省略.........
开发者ID:docker,项目名称:swarmkit,代码行数:101,代码来源:update_test.go
示例12: TestOrchestratorRestartDelay
func TestOrchestratorRestartDelay(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
// Create a service with two instances specified before the orchestrator is
// started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
j1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
},
Restart: &api.RestartPolicy{
Condition: api.RestartOnAny,
Delay: ptypes.DurationProto(100 * time.Millisecond),
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 2,
},
},
},
}
assert.NoError(t, store.CreateService(tx, j1))
return nil
})
assert.NoError(t, err)
// Start the orchestrator.
go func() {
assert.NoError(t, orchestrator.Run(ctx))
}()
observedTask1 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
observedTask2 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
// Fail the first task. Confirm that it gets restarted.
updatedTask1 := observedTask1.Copy()
updatedTask1.Status = api.TaskStatus{State: api.TaskStateFailed}
before := time.Now()
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask1))
return nil
})
assert.NoError(t, err)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
observedTask3 := watchTaskCreate(t, watch)
expectCommit(t, watch)
assert.Equal(t, observedTask3.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask3.DesiredState, api.TaskStateReady)
assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1")
observedTask4 := watchTaskUpdate(t, watch)
after := time.Now()
// At least 100 ms should have elapsed. Only check the lower bound,
// because the system may be slow and it could have taken longer.
if after.Sub(before) < 100*time.Millisecond {
t.Fatalf("restart delay should have elapsed. Got: %v", after.Sub(before))
}
assert.Equal(t, observedTask4.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask4.DesiredState, api.TaskStateRunning)
assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1")
}
开发者ID:ypjin,项目名称:swarmkit,代码行数:88,代码来源:restart_test.go
示例13: procUnallocatedTasksNetwork
func (a *Allocator) procUnallocatedTasksNetwork(ctx context.Context, nc *networkContext) {
tasks := make([]*api.Task, 0, len(nc.unallocatedTasks))
committed, err := a.store.Batch(func(batch *store.Batch) error {
for _, t := range nc.unallocatedTasks {
var allocatedT *api.Task
err := batch.Update(func(tx store.Tx) error {
var err error
allocatedT, err = a.allocateTask(ctx, nc, tx, t)
return err
})
if err != nil {
log.G(ctx).WithError(err).Error("task allocation failure")
continue
}
tasks = append(tasks, allocatedT)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Error("failed a store batch operation while processing unallocated tasks")
}
var retryCnt int
for len(tasks) != 0 {
var err error
for _, t := range tasks[:committed] {
delete(nc.unallocatedTasks, t.ID)
}
tasks = tasks[committed:]
if len(tasks) == 0 {
break
}
updatedTasks := make([]*api.Task, 0, len(tasks))
committed, err = a.store.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
err := batch.Update(func(tx store.Tx) error {
return store.UpdateTask(tx, t)
})
if err != nil {
log.G(ctx).WithError(err).Error("allocated task store update failure")
continue
}
updatedTasks = append(updatedTasks, t)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Error("failed a store batch operation while processing unallocated tasks")
}
tasks = updatedTasks
select {
case <-ctx.Done():
return
default:
}
retryCnt++
if retryCnt >= 3 {
log.G(ctx).Errorf("failed to complete batch update of allocated tasks after 3 retries")
break
}
}
}
开发者ID:maxim28,项目名称:docker,代码行数:76,代码来源:network.go
示例14: doNetworkInit
//.........这里部分代码省略.........
node.Attachment.Network = nc.ingressNetwork.Copy()
if err := a.allocateNode(ctx, nc, node); err != nil {
log.G(ctx).Errorf("Failed to allocate network resources for node %s during init: %v", node.ID, err)
}
}
// Allocate services in the store so far before we process watched events.
var services []*api.Service
a.store.View(func(tx store.ReadTx) {
services, err = store.FindServices(tx, store.All)
})
if err != nil {
return fmt.Errorf("error listing all services in store while trying to allocate during init: %v", err)
}
for _, s := range services {
if nc.nwkAllocator.IsServiceAllocated(s) {
continue
}
if err := a.allocateService(ctx, nc, s); err != nil {
log.G(ctx).Errorf("failed allocating service %s during init: %v", s.ID, err)
}
}
// Allocate tasks in the store so far before we started watching.
var tasks []*api.Task
a.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.All)
})
if err != nil {
return fmt.Errorf("error listing all tasks in store while trying to allocate during init: %v", err)
}
if _, err := a.store.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
if taskDead(t) {
continue
}
var s *api.Service
if t.ServiceID != "" {
a.store.View(func(tx store.ReadTx) {
s = store.GetService(tx, t.ServiceID)
})
}
// Populate network attachments in the task
// based on service spec.
a.taskCreateNetworkAttachments(t, s)
if taskReadyForNetworkVote(t, s, nc) {
if t.Status.State >= api.TaskStateAllocated {
continue
}
if a.taskAllocateVote(networkVoter, t.ID) {
// If the task is not attached to any network, network
// allocators job is done. Immediately cast a vote so
// that the task can be moved to ALLOCATED state as
// soon as possible.
if err := batch.Update(func(tx store.Tx) error {
storeT := store.GetTask(tx, t.ID)
if storeT == nil {
return fmt.Errorf("task %s not found while trying to update state", t.ID)
}
updateTaskStatus(storeT, api.TaskStateAllocated, "allocated")
if err := store.UpdateTask(tx, storeT); err != nil {
return fmt.Errorf("failed updating state in store transaction for task %s: %v", storeT.ID, err)
}
return nil
}); err != nil {
log.G(ctx).WithError(err).Error("error updating task network")
}
}
continue
}
err := batch.Update(func(tx store.Tx) error {
_, err := a.allocateTask(ctx, nc, tx, t)
return err
})
if err != nil {
log.G(ctx).Errorf("failed allocating task %s during init: %v", t.ID, err)
nc.unallocatedTasks[t.ID] = t
}
}
return nil
}); err != nil {
return err
}
a.netCtx = nc
return nil
}
开发者ID:maxim28,项目名称:docker,代码行数:101,代码来源:network.go
示例15: allocateTask
func (a *Allocator) allocateTask(ctx context.Context, nc *networkContext, tx store.Tx, t *api.Task) (*api.Task, error) {
taskUpdated := false
// Get the latest task state from the store before updating.
storeT := store.GetTask(tx, t.ID)
if storeT == nil {
return nil, fmt.Errorf("could not find task %s while trying to update network allocation", t.ID)
}
// We might be here even if a task allocation has already
// happened but wasn't successfully committed to store. In such
// cases skip allocation and go straight ahead to updating the
// store.
if !nc.nwkAllocator.IsTaskAllocated(t) {
if t.ServiceID != "" {
s := store.GetService(tx, t.ServiceID)
if s == nil {
return nil, fmt.Errorf("could not find service %s", t.ServiceID)
}
if !nc.nwkAllocator.IsServiceAllocated(s) {
return nil, fmt.Errorf("service %s to which this task %s belongs has pending allocations", s.ID, t.ID)
}
taskUpdateEndpoint(t, s.Endpoint)
}
for _, na := range t.Networks {
n := store.GetNetwork(tx, na.Network.ID)
if n == nil {
return nil, fmt.Errorf("failed to retrieve network %s while allocating task %s", na.Network.ID, t.ID)
}
if !nc.nwkAllocator.IsAllocated(n) {
return nil, fmt.Errorf("network %s attached to task %s not allocated yet", n.ID, t.ID)
}
na.Network = n
}
if err := nc.nwkAllocator.AllocateTask(t); err != nil {
return nil, fmt.Errorf("failed during networktask allocation for task %s: %v", t.ID, err)
}
if nc.nwkAllocator.IsTaskAllocated(t) {
taskUpdateNetworks(storeT, t.Networks)
taskUpdateEndpoint(storeT, t.Endpoint)
taskUpdated = true
}
}
// Update the network allocations and moving to
// ALLOCATED state on top of the latest store state.
if a.taskAllocateVote(networkVoter, t.ID) {
if storeT.Status.State < api.TaskStateAllocated {
updateTaskStatus(storeT, api.TaskStateAllocated, "allocated")
taskUpdated = true
}
}
if taskUpdated {
if err := store.UpdateTask(tx, storeT); err != nil {
return nil, fmt.Errorf("failed updating state in store transaction for task %s: %v", storeT.ID, err)
}
}
return storeT, nil
}
开发者ID:maxim28,项目名称:docker,代码行数:67,代码来源:network.go
示例16: TestOrchestratorRestartWindow
func TestOrchestratorRestartWindow(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
// Create a service with two instances specified before the orchestrator is
// started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
j1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 2,
},
},
Task: api.TaskSpec{
Restart: &api.RestartPolicy{
Condition: api.RestartOnAny,
Delay: ptypes.DurationProto(100 * time.Millisecond),
MaxAttempts: 1,
Window: ptypes.DurationProto(500 * time.Millisecond),
},
},
},
}
assert.NoError(t, store.CreateService(tx, j1))
return nil
})
assert.NoError(t, err)
// Start the orchestrator.
go func() {
assert.NoError(t, orchestrator.Run(ctx))
}()
observedTask1 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
observedTask2 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
// Fail the first task. Confirm that it gets restarted.
updatedTask1 := observedTask1.Copy()
updatedTask1.Status = api.TaskStatus{State: api.TaskStateFailed}
before := time.Now()
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask1))
return nil
})
assert.NoError(t, err)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
observedTask3 := watchTaskCreate(t, watch)
expectCommit(t, watch)
assert.Equal(t, observedTask3.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask3.DesiredState, api.TaskStateReady)
assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1")
observedTask4 := watchTaskUpdate(t, watch)
after := time.Now()
// At least 100 ms should have elapsed. Only check the lower bound,
// because the system may be slow and it could have taken longer.
if after.Sub(before) < 100*time.Millisecond {
t.Fatal("restart delay should have elapsed")
}
assert.Equal(t, observedTask4.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask4.DesiredState, api.TaskStateRunning)
assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1")
// Fail the second task. Confirm that it gets restarted.
updatedTask2 := observedTask2.Copy()
updatedTask2.Status = api.TaskStatus{State: api.TaskStateFailed}
before = time.Now()
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask2))
return nil
})
assert.NoError(t, err)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
//.........这里部分代码省略.........
开发者ID:ypjin,项目名称:swarmkit,代码行数:101,代码来源:restart_test.go
示例17: TestTaskHistory
func TestTaskHistory(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
assert.NoError(t, s.Update(func(tx store.Tx) error {
store.CreateCluster(tx, &api.Cluster{
ID: identity.NewID(),
Spec: api.ClusterSpec{
Annotations: api.Annotations{
Name: store.DefaultClusterName,
},
Orchestration: api.OrchestrationConfig{
TaskHistoryRetentionLimit: 2,
},
},
})
return nil
}))
taskReaper := NewTaskReaper(s)
defer taskReaper.Stop()
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
// Create a service with two instances specified before the orchestrator is
// started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
j1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 2,
},
},
Task: api.TaskSpec{
Restart: &api.RestartPolicy{
Condition: api.RestartOnAny,
Delay: ptypes.DurationProto(0),
},
},
},
}
assert.NoError(t, store.CreateService(tx, j1))
return nil
})
assert.NoError(t, err)
// Start the orchestrator.
go func() {
assert.NoError(t, orchestrator.Run(ctx))
}()
go taskReaper.Run()
observedTask1 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
observedTask2 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
// Fail both tasks. They should both get restarted.
updatedTask1 := observedTask1.Copy()
updatedTask1.Status.State = api.TaskStateFailed
updatedTask1.ServiceAnnotations = api.Annotations{Name: "original"}
updatedTask2 := observedTask2.Copy()
updatedTask2.Status.State = api.TaskStateFailed
updatedTask2.ServiceAnnotations = api.Annotations{Name: "original"}
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask1))
assert.NoError(t, store.UpdateTask(tx, updatedTask2))
return nil
})
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
observedTask3 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask3.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask3.ServiceAnnotations.Name, "name1")
expectTaskUpdate(t, watch)
observedTask4 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask4.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask4.ServiceAnnotations.Name, "name1")
// Fail these replacement tasks. Since TaskHistory is set to 2, this
// should cause the oldest tasks for each instance to get deleted.
//.........这里部分代码省略.........
开发者ID:ypjin,项目名称:swarmkit,代码行数:101,代码来源:task_reaper_test.go
示例18: shutdownNoncompliantTasks
func (ce *ConstraintEnforcer) shutdownNoncompliantTasks(node *api.Node) {
// If the availability is "drain", the orchestrator will
// shut down all tasks.
// If the availability is "pause", we shouldn't touch
// the tasks on this node.
if node.Spec.Availability != api.NodeAvailabilityActive {
return
}
var (
tasks []*api.Task
err error
)
ce.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID))
})
if err != nil {
log.L.WithError(err).Errorf("failed to list tasks for node ID %s", node.ID)
}
var availableMemoryBytes, availableNanoCPUs int64
if node.Description != nil && node.Description.Resources != nil {
availableMemoryBytes = node.Description.Resources.MemoryBytes
availableNanoCPUs = node.Description.Resources.NanoCPUs
}
removeTasks := make(map[string]*api.Task)
// TODO(aaronl): The set of tasks removed will be
// nondeterministic because it depends on the order of
// the slice returned from FindTasks. We could do
// a separate pass over the tasks for each type of
// resource, and sort by the size of the reservation
// to remove the most resource-intensive tasks.
for _, t := range tasks {
if t.DesiredState < api.TaskStateAssigned || t.DesiredState > api.TaskStateRunning {
continue
}
// Ensure that the task still meets scheduling
// constraints.
if t.Spec.Placement != nil && len(t.Spec.Placement.Constraints) != 0 {
constraints, _ := constraint.Parse(t.Spec.Placement.Constraints)
if !constraint.NodeMatches(constraints, node) {
removeTasks[t.ID] = t
continue
}
}
// Ensure that the task assigned to the node
// still satisfies the resource limits.
if t.Spec.Resources != nil && t.Spec.Resources.Reservations != nil {
if t.Spec.Resources.Reservations.MemoryBytes > availableMemoryBytes {
removeTasks[t.ID] = t
continue
}
if t.Spec.Resources.Reservations.NanoCPUs > availableNanoCPUs {
removeTasks[t.ID] = t
continue
}
availableMemoryBytes -= t.Spec.Resources.Reservations.MemoryBytes
availableNanoCPUs -= t.Spec.Resources.Reservations.NanoCPUs
}
}
if len(removeTasks) != 0 {
_, err := ce.store.Batch(func(batch *store.Batch) error {
for _, t := range removeTasks {
err := batch.Update(func(tx store.Tx) error {
t = store.GetTask(tx, t.ID)
if t == nil || t.DesiredState > api.TaskStateRunning {
return nil
}
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
})
if err != nil {
log.L.WithError(err).Errorf("failed to shut down task %s", t.ID)
}
}
return nil
})
if err != nil {
log.L.WithError(err).Errorf("failed to shut down tasks")
}
}
}
开发者ID:JMesser81,项目名称:docker,代码行数:91,代码来源:constraint_enforcer.go
示例19: TestOrchestratorRestartOnNone
func TestOrchestratorRestartOnNone(t *testing.T) {
ctx := context.Background()
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
orchestrator := NewReplicatedOrchestrator(s)
defer orchestrator.Stop()
watch, cancel := state.Watch(s.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
// Create a service with two instances specified before the orchestrator is
// started. This should result in two tasks when the orchestrator
// starts up.
err := s.Update(func(tx store.Tx) error {
j1 := &api.Service{
ID: "id1",
Spec: api.ServiceSpec{
Annotations: api.Annotations{
Name: "name1",
},
Task: api.TaskSpec{
Runtime: &api.TaskSpec_Container{
Container: &api.ContainerSpec{},
},
Restart: &api.RestartPolicy{
Condition: api.RestartOnNone,
},
},
Mode: &api.ServiceSpec_Replicated{
Replicated: &api.ReplicatedService{
Replicas: 2,
},
},
},
}
assert.NoError(t, store.CreateService(tx, j1))
return nil
})
assert.NoError(t, err)
// Start the orchestrator.
go func() {
assert.NoError(t, orchestrator.Run(ctx))
}()
observedTask1 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
observedTask2 := watchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
// Fail the first task. Confirm that it does not get restarted.
updatedTask1 := observedTask1.Copy()
updatedTask1.Status.State = api.TaskStateFailed
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask1))
return nil
})
assert.NoError(t, err)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
select {
case <-watch:
t.Fatal("got unexpected event")
case <-time.After(100 * time.Millisecond):
}
// Mark the second task as completed. Confirm that it does not get restarted.
updatedTask2 := observedTask2.Copy()
updatedTask2.Status = api.TaskStatus{State: api.TaskStateCompleted}
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.UpdateTask(tx, updatedTask2))
return nil
})
assert.NoError(t, err)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
expectTaskUpdate(t, watch)
expectCommit(t, watch)
select {
case <-watch:
t.Fatal("got unexpected event")
case <-time.After(100 * time.Millisecond):
}
}
开发者ID:ypjin,项目名称:swarmkit,代码行数:93,代码来源:restart_test.go
示例20: TestSchedulerFaultyNode
func TestSchedulerFaultyNode(t *testing.T) {
ctx := context.Background()
taskTemplate := &api.Task{
ServiceID: "service1",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
node1 := &api.Node{
ID: "id1",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "id1",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
node2 := &api.Node{
ID: "id2",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "id2",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial nodes, and one task assigned to node id1
assert.NoError(t, store.CreateNode(tx, node1))
assert.NoError(t, store.CreateNode(tx, node2))
task1 := taskTemplate.Copy()
task1.ID = "id1"
task1.NodeID = "id1"
task1.Status.State = api.TaskStateRunning
assert.NoError(t, store.CreateTask(tx, task1))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
for i := 0; i != 8; i++ {
// Simulate a task failure cycle
newTask := taskTemplate.Copy()
newTask.ID = identity.NewID()
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateTask(tx, newTask))
return nil
})
assert.NoError(t, err)
assignment := watchAssignment(t, watch)
assert.Equal(t, newTask.ID, assignment.ID)
if i < 5 {
// The first 5 attempts should be assigned to node id2 because
// it has no replicas of the service.
assert.Equal(t, "id2", assignment.NodeID)
} else {
// The next ones should be assigned to id1, since we'll
// flag id2 as potentially faulty.
assert.Equal(t, "id1", assignment.NodeID)
}
err = s.Update(func(tx store.Tx) error {
newTask := store.GetTask(tx, newTask.ID)
require.NotNil(t, newTask)
newTask.Status.State = api.TaskStateFailed
assert.NoError(t, store.UpdateTask(tx, newTask))
return nil
})
assert.NoError(t, err)
}
}
开发者ID:docker,项目名称:swarmkit,代码行数:100,代码来源:scheduler_test.go
注:本文中的github.com/docker/swarmkit/manager/state/store.UpdateTask函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论