本文整理汇总了Golang中github.com/docker/swarmkit/log.G函数的典型用法代码示例。如果您正苦于以下问题:Golang G函数的具体用法?Golang G怎么用?Golang G使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了G函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: useExistingTask
func (u *Updater) useExistingTask(ctx context.Context, slot slot, existing *api.Task) {
var removeTasks []*api.Task
for _, t := range slot {
if t != existing {
removeTasks = append(removeTasks, t)
}
}
if len(removeTasks) != 0 || existing.DesiredState != api.TaskStateRunning {
_, err := u.store.Batch(func(batch *store.Batch) error {
u.removeOldTasks(ctx, batch, removeTasks)
if existing.DesiredState != api.TaskStateRunning {
err := batch.Update(func(tx store.Tx) error {
t := store.GetTask(tx, existing.ID)
if t == nil {
return fmt.Errorf("task %s not found while trying to start it", existing.ID)
}
if t.DesiredState >= api.TaskStateRunning {
return fmt.Errorf("task %s was already started when reached by updater", existing.ID)
}
t.DesiredState = api.TaskStateRunning
return store.UpdateTask(tx, t)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("starting task %s failed", existing.ID)
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Error("updater batch transaction failed")
}
}
}
开发者ID:HuKeping,项目名称:docker,代码行数:34,代码来源:updater.go
示例2: deleteServiceTasks
func deleteServiceTasks(ctx context.Context, s *store.MemoryStore, service *api.Service) {
var (
tasks []*api.Task
err error
)
s.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to list tasks")
return
}
_, err = s.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
err := batch.Update(func(tx store.Tx) error {
if err := store.DeleteTask(tx, t.ID); err != nil {
log.G(ctx).WithError(err).Errorf("failed to delete task")
}
return nil
})
if err != nil {
return err
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("task search transaction failed")
}
}
开发者ID:CWSpear,项目名称:docker,代码行数:31,代码来源:replicated.go
示例3: rollbackUpdate
func (u *Updater) rollbackUpdate(ctx context.Context, serviceID, message string) {
log.G(ctx).Debugf("starting rollback of service %s", serviceID)
var service *api.Service
err := u.store.Update(func(tx store.Tx) error {
service = store.GetService(tx, serviceID)
if service == nil {
return nil
}
if service.UpdateStatus == nil {
// The service was updated since we started this update
return nil
}
service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_STARTED
service.UpdateStatus.Message = message
if service.PreviousSpec == nil {
return errors.New("cannot roll back service because no previous spec is available")
}
service.Spec = *service.PreviousSpec
service.PreviousSpec = nil
return store.UpdateService(tx, service)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to start rollback of service %s", serviceID)
return
}
}
开发者ID:CWSpear,项目名称:docker,代码行数:31,代码来源:updater.go
示例4: removeTasksFromNode
func (g *GlobalOrchestrator) removeTasksFromNode(ctx context.Context, node *api.Node) {
var (
tasks []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed finding tasks")
return
}
_, err = g.store.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
// GlobalOrchestrator only removes tasks from globalServices
if _, exists := g.globalServices[t.ServiceID]; exists {
g.removeTask(ctx, batch, t)
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed")
}
}
开发者ID:CadeLaRen,项目名称:docker-3,代码行数:26,代码来源:global.go
示例5: deallocateVIP
func (na *NetworkAllocator) deallocateVIP(vip *api.Endpoint_VirtualIP) error {
localNet := na.getNetwork(vip.NetworkID)
if localNet == nil {
return fmt.Errorf("networkallocator: could not find local network state")
}
ipam, _, err := na.resolveIPAM(localNet.nw)
if err != nil {
return fmt.Errorf("failed to resolve IPAM while allocating : %v", err)
}
// Retrieve the poolID and immediately nuke
// out the mapping.
poolID := localNet.endpoints[vip.Addr]
delete(localNet.endpoints, vip.Addr)
ip, _, err := net.ParseCIDR(vip.Addr)
if err != nil {
log.G(context.TODO()).Errorf("Could not parse VIP address %s while releasing", vip.Addr)
return err
}
if err := ipam.ReleaseAddress(poolID, ip); err != nil {
log.G(context.TODO()).Errorf("IPAM failure while releasing VIP address %s: %v", vip.Addr, err)
return err
}
return nil
}
开发者ID:Chandra-TechPassionate,项目名称:docker,代码行数:29,代码来源:networkallocator.go
示例6: getIDs
// getIDs returns an ordered set of IDs included in the given snapshot and
// the entries. The given snapshot/entries can contain two kinds of
// ID-related entry:
// - ConfChangeAddNode, in which case the contained ID will be added into the set.
// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
ids := make(map[uint64]bool)
if snap != nil {
for _, id := range snap.Metadata.ConfState.Nodes {
ids[id] = true
}
}
for _, e := range ents {
if e.Type != raftpb.EntryConfChange {
continue
}
if snap != nil && e.Index < snap.Metadata.Index {
continue
}
var cc raftpb.ConfChange
if err := cc.Unmarshal(e.Data); err != nil {
log.G(context.Background()).Panicf("unmarshal configuration change should never fail: %v", err)
}
switch cc.Type {
case raftpb.ConfChangeAddNode:
ids[cc.NodeID] = true
case raftpb.ConfChangeRemoveNode:
delete(ids, cc.NodeID)
case raftpb.ConfChangeUpdateNode:
// do nothing
default:
log.G(context.Background()).Panic("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
}
}
var sids []uint64
for id := range ids {
sids = append(sids, id)
}
return sids
}
开发者ID:ygf11,项目名称:docker,代码行数:40,代码来源:raft.go
示例7: pauseUpdate
func (u *Updater) pauseUpdate(ctx context.Context, serviceID, message string) {
log.G(ctx).Debugf("pausing update of service %s", serviceID)
err := u.store.Update(func(tx store.Tx) error {
service := store.GetService(tx, serviceID)
if service == nil {
return nil
}
if service.UpdateStatus == nil {
// The service was updated since we started this update
return nil
}
if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_PAUSED
} else {
service.UpdateStatus.State = api.UpdateStatus_PAUSED
}
service.UpdateStatus.Message = message
return store.UpdateService(tx, service)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to pause update of service %s", serviceID)
}
}
开发者ID:CWSpear,项目名称:docker,代码行数:27,代码来源:updater.go
示例8: start
// start begins the session and returns the first SessionMessage.
func (s *session) start(ctx context.Context) error {
log.G(ctx).Debugf("(*session).start")
client := api.NewDispatcherClient(s.agent.config.Conn)
description, err := s.agent.config.Executor.Describe(ctx)
if err != nil {
log.G(ctx).WithError(err).WithField("executor", s.agent.config.Executor).
Errorf("node description unavailable")
return err
}
// Override hostname
if s.agent.config.Hostname != "" {
description.Hostname = s.agent.config.Hostname
}
stream, err := client.Session(ctx, &api.SessionRequest{
Description: description,
})
if err != nil {
return err
}
msg, err := stream.Recv()
if err != nil {
return err
}
s.sessionID = msg.SessionID
s.session = stream
return s.handleSessionMessage(ctx, msg)
}
开发者ID:tkopczynski,项目名称:docker,代码行数:34,代码来源:session.go
示例9: procUnallocatedServices
func (a *Allocator) procUnallocatedServices(ctx context.Context) {
nc := a.netCtx
var allocatedServices []*api.Service
for _, s := range nc.unallocatedServices {
if !nc.nwkAllocator.IsServiceAllocated(s) {
if err := a.allocateService(ctx, s); err != nil {
log.G(ctx).WithError(err).Debugf("Failed allocation of unallocated service %s", s.ID)
continue
}
allocatedServices = append(allocatedServices, s)
}
}
if len(allocatedServices) == 0 {
return
}
committed, err := a.store.Batch(func(batch *store.Batch) error {
for _, s := range allocatedServices {
if err := a.commitAllocatedService(ctx, batch, s); err != nil {
log.G(ctx).WithError(err).Debugf("Failed to commit allocation of unallocated service %s", s.ID)
continue
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Error("Failed to commit allocation of unallocated services")
}
for _, s := range allocatedServices[:committed] {
delete(nc.unallocatedServices, s.ID)
}
}
开发者ID:Mic92,项目名称:docker,代码行数:35,代码来源:network.go
示例10: reconcileServiceOneNode
// reconcileServiceOneNode checks one service on one node
func (g *GlobalOrchestrator) reconcileServiceOneNode(ctx context.Context, serviceID string, nodeID string) {
_, exists := g.nodes[nodeID]
if !exists {
return
}
service, exists := g.globalServices[serviceID]
if !exists {
return
}
// the node has completed this servie
completed := false
// tasks for this node and service
var (
tasks []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
var tasksOnNode []*api.Task
tasksOnNode, err = store.FindTasks(tx, store.ByNodeID(nodeID))
if err != nil {
return
}
for _, t := range tasksOnNode {
// only interested in one service
if t.ServiceID != serviceID {
continue
}
if isTaskRunning(t) {
tasks = append(tasks, t)
} else {
if isTaskCompleted(t, restartCondition(t)) {
completed = true
}
}
}
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcile failed finding tasks")
return
}
_, err = g.store.Batch(func(batch *store.Batch) error {
// if restart policy considers this node has finished its task
// it should remove all running tasks
if completed {
g.removeTasks(ctx, batch, service, tasks)
return nil
}
// this node needs to run 1 copy of the task
if len(tasks) == 0 {
g.addTask(ctx, batch, service, nodeID)
} else {
g.removeTasks(ctx, batch, service, tasks[1:])
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServiceOneNode batch failed")
}
}
开发者ID:CadeLaRen,项目名称:docker-3,代码行数:61,代码来源:global.go
示例11: updateCluster
// updateCluster is called when there are cluster changes, and it ensures that the local RootCA is
// always aware of changes in clusterExpiry and the Root CA key material
func (s *Server) updateCluster(ctx context.Context, cluster *api.Cluster) {
s.mu.Lock()
s.joinTokens = cluster.RootCA.JoinTokens.Copy()
s.mu.Unlock()
var err error
// If the cluster has a RootCA, let's try to update our SecurityConfig to reflect the latest values
rCA := cluster.RootCA
if len(rCA.CACert) != 0 && len(rCA.CAKey) != 0 {
expiry := DefaultNodeCertExpiration
if cluster.Spec.CAConfig.NodeCertExpiry != nil {
// NodeCertExpiry exists, let's try to parse the duration out of it
clusterExpiry, err := ptypes.Duration(cluster.Spec.CAConfig.NodeCertExpiry)
if err != nil {
log.G(ctx).WithFields(logrus.Fields{
"cluster.id": cluster.ID,
"method": "(*Server).updateCluster",
}).WithError(err).Warn("failed to parse certificate expiration, using default")
} else {
// We were able to successfully parse the expiration out of the cluster.
expiry = clusterExpiry
}
} else {
// NodeCertExpiry seems to be nil
log.G(ctx).WithFields(logrus.Fields{
"cluster.id": cluster.ID,
"method": "(*Server).updateCluster",
}).WithError(err).Warn("failed to parse certificate expiration, using default")
}
// Attempt to update our local RootCA with the new parameters
err = s.securityConfig.UpdateRootCA(rCA.CACert, rCA.CAKey, expiry)
if err != nil {
log.G(ctx).WithFields(logrus.Fields{
"cluster.id": cluster.ID,
"method": "(*Server).updateCluster",
}).WithError(err).Error("updating Root CA failed")
} else {
log.G(ctx).WithFields(logrus.Fields{
"cluster.id": cluster.ID,
"method": "(*Server).updateCluster",
}).Debugf("Root CA updated successfully")
}
}
// Update our security config with the list of External CA URLs
// from the new cluster state.
// TODO(aaronl): In the future, this will be abstracted with an
// ExternalCA interface that has different implementations for
// different CA types. At the moment, only CFSSL is supported.
var cfsslURLs []string
for _, ca := range cluster.Spec.CAConfig.ExternalCAs {
if ca.Protocol == api.ExternalCA_CAProtocolCFSSL {
cfsslURLs = append(cfsslURLs, ca.URL)
}
}
s.securityConfig.externalCA.UpdateURLs(cfsslURLs...)
}
开发者ID:Chandra-TechPassionate,项目名称:docker,代码行数:62,代码来源:server.go
示例12: Init
// Init prepares the worker for assignments.
func (w *worker) Init(ctx context.Context) error {
w.mu.Lock()
defer w.mu.Unlock()
ctx = log.WithModule(ctx, "worker")
// TODO(stevvooe): Start task cleanup process.
// read the tasks from the database and start any task managers that may be needed.
return w.db.Update(func(tx *bolt.Tx) error {
return WalkTasks(tx, func(task *api.Task) error {
if !TaskAssigned(tx, task.ID) {
// NOTE(stevvooe): If tasks can survive worker restart, we need
// to startup the controller and ensure they are removed. For
// now, we can simply remove them from the database.
if err := DeleteTask(tx, task.ID); err != nil {
log.G(ctx).WithError(err).Errorf("error removing task %v", task.ID)
}
return nil
}
status, err := GetTaskStatus(tx, task.ID)
if err != nil {
log.G(ctx).WithError(err).Error("unable to read tasks status")
return nil
}
task.Status = *status // merges the status into the task, ensuring we start at the right point.
return w.startTask(ctx, tx, task)
})
})
}
开发者ID:xlgao-zju,项目名称:docker,代码行数:33,代码来源:worker.go
示例13: worker
func (u *Updater) worker(ctx context.Context, queue <-chan *api.Task) {
for t := range queue {
updated := newTask(u.cluster, u.newService, t.Slot)
updated.DesiredState = api.TaskStateReady
if isGlobalService(u.newService) {
updated.NodeID = t.NodeID
}
if err := u.updateTask(ctx, t, updated); err != nil {
log.G(ctx).WithError(err).WithField("task.id", t.ID).Error("update failed")
}
if u.newService.Spec.Update != nil && (u.newService.Spec.Update.Delay.Seconds != 0 || u.newService.Spec.Update.Delay.Nanos != 0) {
delay, err := ptypes.Duration(&u.newService.Spec.Update.Delay)
if err != nil {
log.G(ctx).WithError(err).Error("invalid update delay")
continue
}
select {
case <-time.After(delay):
case <-u.stopChan:
return
}
}
}
}
开发者ID:Chandra-TechPassionate,项目名称:docker,代码行数:26,代码来源:updater.go
示例14: events
// events issues a call to the events API and returns a channel with all
// events. The stream of events can be shutdown by cancelling the context.
func (c *containerAdapter) events(ctx context.Context) <-chan events.Message {
log.G(ctx).Debugf("waiting on events")
buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter())
eventsq := make(chan events.Message, len(buffer))
for _, event := range buffer {
eventsq <- event
}
go func() {
defer c.backend.UnsubscribeFromEvents(l)
for {
select {
case ev := <-l:
jev, ok := ev.(events.Message)
if !ok {
log.G(ctx).Warnf("unexpected event message: %q", ev)
continue
}
select {
case eventsq <- jev:
case <-ctx.Done():
return
}
case <-ctx.Done():
return
}
}
}()
return eventsq
}
开发者ID:HuKeping,项目名称:docker,代码行数:35,代码来源:adapter.go
示例15: UpdateTaskStatus
// UpdateTaskStatus attempts to send a task status update over the current session,
// blocking until the operation is completed.
//
// If an error is returned, the operation should be retried.
func (a *Agent) UpdateTaskStatus(ctx context.Context, taskID string, status *api.TaskStatus) error {
log.G(ctx).WithField("task.id", taskID).Debugf("(*Agent).UpdateTaskStatus")
ctx, cancel := context.WithCancel(ctx)
defer cancel()
errs := make(chan error, 1)
if err := a.withSession(ctx, func(session *session) error {
go func() {
err := session.sendTaskStatus(ctx, taskID, status)
if err != nil {
if err == errTaskUnknown {
err = nil // dispatcher no longer cares about this task.
} else {
log.G(ctx).WithError(err).Error("sending task status update failed")
}
} else {
log.G(ctx).Debug("task status reported")
}
errs <- err
}()
return nil
}); err != nil {
return err
}
select {
case err := <-errs:
return err
case <-ctx.Done():
return ctx.Err()
}
}
开发者ID:Chandra-TechPassionate,项目名称:docker,代码行数:38,代码来源:agent.go
示例16: procUnallocatedNetworks
func (a *Allocator) procUnallocatedNetworks(ctx context.Context) {
nc := a.netCtx
var allocatedNetworks []*api.Network
for _, n := range nc.unallocatedNetworks {
if !nc.nwkAllocator.IsAllocated(n) {
if err := a.allocateNetwork(ctx, n); err != nil {
log.G(ctx).WithError(err).Debugf("Failed allocation of unallocated network %s", n.ID)
continue
}
allocatedNetworks = append(allocatedNetworks, n)
}
}
if len(allocatedNetworks) == 0 {
return
}
committed, err := a.store.Batch(func(batch *store.Batch) error {
for _, n := range allocatedNetworks {
if err := a.commitAllocatedNetwork(ctx, batch, n); err != nil {
log.G(ctx).WithError(err).Debugf("Failed to commit allocation of unallocated network %s", n.ID)
continue
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Error("Failed to commit allocation of unallocated networks")
}
for _, n := range allocatedNetworks[:committed] {
delete(nc.unallocatedNetworks, n.ID)
}
}
开发者ID:Mic92,项目名称:docker,代码行数:35,代码来源:network.go
示例17: completeUpdate
func (u *Updater) completeUpdate(ctx context.Context, serviceID string) {
log.G(ctx).Debugf("update of service %s complete", serviceID)
err := u.store.Update(func(tx store.Tx) error {
service := store.GetService(tx, serviceID)
if service == nil {
return nil
}
if service.UpdateStatus == nil {
// The service was changed since we started this update
return nil
}
if service.UpdateStatus.State == api.UpdateStatus_ROLLBACK_STARTED {
service.UpdateStatus.State = api.UpdateStatus_ROLLBACK_COMPLETED
service.UpdateStatus.Message = "rollback completed"
} else {
service.UpdateStatus.State = api.UpdateStatus_COMPLETED
service.UpdateStatus.Message = "update completed"
}
service.UpdateStatus.CompletedAt = ptypes.MustTimestampProto(time.Now())
return store.UpdateService(tx, service)
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to mark update of service %s complete", serviceID)
}
}
开发者ID:CWSpear,项目名称:docker,代码行数:28,代码来源:updater.go
示例18: start
// start begins the session and returns the first SessionMessage.
func (s *session) start(ctx context.Context) error {
log.G(ctx).Debugf("(*session).start")
description, err := s.agent.config.Executor.Describe(ctx)
if err != nil {
log.G(ctx).WithError(err).WithField("executor", s.agent.config.Executor).
Errorf("node description unavailable")
return err
}
// Override hostname
if s.agent.config.Hostname != "" {
description.Hostname = s.agent.config.Hostname
}
errChan := make(chan error, 1)
var (
msg *api.SessionMessage
stream api.Dispatcher_SessionClient
)
// Note: we don't defer cancellation of this context, because the
// streaming RPC is used after this function returned. We only cancel
// it in the timeout case to make sure the goroutine completes.
sessionCtx, cancelSession := context.WithCancel(ctx)
// Need to run Session in a goroutine since there's no way to set a
// timeout for an individual Recv call in a stream.
go func() {
client := api.NewDispatcherClient(s.conn)
stream, err = client.Session(sessionCtx, &api.SessionRequest{
Description: description,
SessionID: s.sessionID,
})
if err != nil {
errChan <- err
return
}
msg, err = stream.Recv()
errChan <- err
}()
select {
case err := <-errChan:
if err != nil {
return err
}
case <-time.After(dispatcherRPCTimeout):
cancelSession()
return errors.New("session initiation timed out")
}
s.sessionID = msg.SessionID
s.session = stream
return s.handleSessionMessage(ctx, msg)
}
开发者ID:HuKeping,项目名称:docker,代码行数:58,代码来源:session.go
示例19: Stop
// Stop stops the manager. It immediately closes all open connections and
// active RPCs as well as stopping the scheduler.
func (m *Manager) Stop(ctx context.Context) {
log.G(ctx).Info("Stopping manager")
// It's not safe to start shutting down while the manager is still
// starting up.
<-m.started
// the mutex stops us from trying to stop while we're alrady stopping, or
// from returning before we've finished stopping.
m.mu.Lock()
defer m.mu.Unlock()
select {
// check to see that we've already stopped
case <-m.stopped:
return
default:
// do nothing, we're stopping for the first time
}
// once we start stopping, send a signal that we're doing so. this tells
// Run that we've started stopping, when it gets the error from errServe
// it also prevents the loop from processing any more stuff.
close(m.stopped)
m.Dispatcher.Stop()
m.caserver.Stop()
if m.allocator != nil {
m.allocator.Stop()
}
if m.replicatedOrchestrator != nil {
m.replicatedOrchestrator.Stop()
}
if m.globalOrchestrator != nil {
m.globalOrchestrator.Stop()
}
if m.taskReaper != nil {
m.taskReaper.Stop()
}
if m.scheduler != nil {
m.scheduler.Stop()
}
if m.keyManager != nil {
m.keyManager.Stop()
}
if m.connSelector != nil {
m.connSelector.Stop()
}
m.RaftNode.Shutdown()
// some time after this point, Run will receive an error from one of these
m.server.Stop()
m.localserver.Stop()
log.G(ctx).Info("Manager shut down")
// mutex is released and Run can return now
}
开发者ID:maxim28,项目名称:docker,代码行数:59,代码来源:manager.go
示例20: reconcile
func (r *ReplicatedOrchestrator) reconcile(ctx context.Context, service *api.Service) {
var (
tasks []*api.Task
err error
)
r.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByServiceID(service.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("reconcile failed finding tasks")
return
}
runningTasks := make([]*api.Task, 0, len(tasks))
runningInstances := make(map[uint64]struct{}) // this could be a bitfield...
for _, t := range tasks {
// Technically the check below could just be
// t.DesiredState <= api.TaskStateRunning, but ignoring tasks
// with DesiredState == NEW simplifies the drainer unit tests.
if t.DesiredState > api.TaskStateNew && t.DesiredState <= api.TaskStateRunning {
runningTasks = append(runningTasks, t)
runningInstances[t.Slot] = struct{}{}
}
}
numTasks := len(runningTasks)
deploy := service.Spec.GetMode().(*api.ServiceSpec_Replicated)
specifiedInstances := int(deploy.Replicated.Replicas)
// TODO(aaronl): Add support for restart delays.
_, err = r.store.Batch(func(batch *store.Batch) error {
switch {
case specifiedInstances > numTasks:
log.G(ctx).Debugf("Service %s was scaled up from %d to %d instances", service.ID, numTasks, specifiedInstances)
// Update all current tasks then add missing tasks
r.updater.Update(ctx, service, runningTasks)
r.addTasks(ctx, batch, service, runningInstances, specifiedInstances-numTasks)
case specifiedInstances < numTasks:
// Update up to N tasks then remove the extra
log.G(ctx).Debugf("Service %s was scaled down from %d to %d instances", service.ID, numTasks, specifiedInstances)
r.updater.Update(ctx, service, runningTasks[:specifiedInstances])
r.removeTasks(ctx, batch, service, runningTasks[specifiedInstances:])
case specifiedInstances == numTasks:
// Simple update, no scaling - update all tasks.
r.updater.Update(ctx, service, runningTasks)
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("reconcile batch failed")
}
}
开发者ID:amitshukla,项目名称:docker,代码行数:56,代码来源:services.go
注:本文中的github.com/docker/swarmkit/log.G函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论