本文整理汇总了Golang中github.com/openshift/origin/pkg/deploy/util.LabelForDeployment函数的典型用法代码示例。如果您正苦于以下问题:Golang LabelForDeployment函数的具体用法?Golang LabelForDeployment怎么用?Golang LabelForDeployment使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了LabelForDeployment函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: executeExecNewPod
// executeExecNewPod executes a ExecNewPod hook by creating a new pod based on
// the hook parameters and deployment. The pod is then synchronously watched
// until the pod completes, and if the pod failed, an error is returned.
//
// The hook pod inherits the following from the container the hook refers to:
//
// * Environment (hook keys take precedence)
// * Working directory
// * Resources
func (e *HookExecutor) executeExecNewPod(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, label string) error {
// Build a pod spec from the hook config and deployment
podSpec, err := makeHookPod(hook, deployment, label)
if err != nil {
return err
}
// Try to create the pod.
pod, err := e.PodClient.CreatePod(deployment.Namespace, podSpec)
if err != nil {
if !kerrors.IsAlreadyExists(err) {
return fmt.Errorf("couldn't create lifecycle pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
}
} else {
glog.V(0).Infof("Created lifecycle pod %s for deployment %s", pod.Name, deployutil.LabelForDeployment(deployment))
}
stopChannel := make(chan struct{})
defer close(stopChannel)
nextPod := e.PodClient.PodWatch(pod.Namespace, pod.Name, pod.ResourceVersion, stopChannel)
glog.V(0).Infof("Waiting for hook pod %s/%s to complete", pod.Namespace, pod.Name)
for {
pod := nextPod()
switch pod.Status.Phase {
case kapi.PodSucceeded:
return nil
case kapi.PodFailed:
return fmt.Errorf(pod.Status.Message)
}
}
}
开发者ID:nitintutlani,项目名称:origin,代码行数:41,代码来源:lifecycle.go
示例2: cancelDeployerPods
func (c *DeploymentController) cancelDeployerPods(deployment *kapi.ReplicationController) error {
deployerPods, err := c.podClient.getDeployerPodsFor(deployment.Namespace, deployment.Name)
if err != nil {
return fmt.Errorf("couldn't fetch deployer pods for %s while trying to cancel deployment: %v", deployutil.LabelForDeployment(deployment), err)
}
glog.V(4).Infof("Cancelling %d deployer pods for deployment %s", len(deployerPods), deployutil.LabelForDeployment(deployment))
zeroDelay := int64(1)
anyCancelled := false
for _, deployerPod := range deployerPods {
// Set the ActiveDeadlineSeconds on the pod so it's terminated very soon.
if deployerPod.Spec.ActiveDeadlineSeconds == nil || *deployerPod.Spec.ActiveDeadlineSeconds != zeroDelay {
deployerPod.Spec.ActiveDeadlineSeconds = &zeroDelay
if _, err := c.podClient.updatePod(deployerPod.Namespace, &deployerPod); err != nil {
if config, decodeErr := c.decodeConfig(deployment); decodeErr == nil {
c.recorder.Eventf(config, kapi.EventTypeWarning, "FailedCancellation", "Error cancelling deployer pod %s for deployment %s: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err)
} else {
c.recorder.Eventf(deployment, kapi.EventTypeWarning, "FailedCancellation", "Error cancelling deployer pod %s for deployment %s: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err)
}
return fmt.Errorf("couldn't cancel deployer pod %s for deployment %s: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err)
}
anyCancelled = true
glog.V(4).Infof("Cancelled deployer pod %s for deployment %s", deployerPod.Name, deployutil.LabelForDeployment(deployment))
}
}
if anyCancelled {
if config, decodeErr := c.decodeConfig(deployment); decodeErr == nil && len(deployerPods) > 0 {
c.recorder.Eventf(config, kapi.EventTypeNormal, "Cancelled", "Cancelled deployer pods for deployment %s", deployutil.LabelForDeployment(deployment))
} else if len(deployerPods) > 0 {
c.recorder.Eventf(deployment, kapi.EventTypeNormal, "Cancelled", "Cancelled deployer pods")
}
}
return nil
}
开发者ID:iconoeugen,项目名称:origin,代码行数:33,代码来源:controller.go
示例3: Accept
// Accept implements UpdateAcceptor.
func (c *AcceptNewlyObservedReadyPods) Accept(deployment *kapi.ReplicationController) error {
// Make a pod store to poll and ensure it gets cleaned up.
podStore, stopStore := c.getDeploymentPodStore(deployment)
defer close(stopStore)
// Start checking for pod updates.
glog.V(0).Infof("Waiting %.f seconds for pods owned by deployment %q to become ready (checking every %.f seconds; %d pods previously accepted)", c.timeout.Seconds(), deployutil.LabelForDeployment(deployment), c.interval.Seconds(), c.acceptedPods.Len())
err := wait.Poll(c.interval, c.timeout, func() (done bool, err error) {
// Check for pod readiness.
unready := kutil.NewStringSet()
for _, obj := range podStore.List() {
pod := obj.(*kapi.Pod)
// Skip previously accepted pods; we only want to verify newly observed
// and unaccepted pods.
if c.acceptedPods.Has(pod.Name) {
continue
}
if kapi.IsPodReady(pod) {
// If the pod is ready, track it as accepted.
c.acceptedPods.Insert(pod.Name)
} else {
// Otherwise, track it as unready.
unready.Insert(pod.Name)
}
}
// Check to see if we're done.
if unready.Len() == 0 {
glog.V(0).Infof("All pods ready for %s", deployutil.LabelForDeployment(deployment))
return true, nil
}
// Otherwise, try again later.
glog.V(4).Infof("Still waiting for %d pods to become ready for deployment %s", unready.Len(), deployutil.LabelForDeployment(deployment))
return false, nil
})
// Handle acceptance failure.
if err != nil {
if err == wait.ErrWaitTimeout {
return fmt.Errorf("pods for deployment %q took longer than %.f seconds to become ready", deployutil.LabelForDeployment(deployment), c.timeout.Seconds())
}
return fmt.Errorf("pod readiness check failed for deployment %q: %v", deployutil.LabelForDeployment(deployment), err)
}
return nil
}
开发者ID:nitintutlani,项目名称:origin,代码行数:45,代码来源:lifecycle.go
示例4: executeExecNewPod
// executeExecNewPod executes a ExecNewPod hook by creating a new pod based on
// the hook parameters and deployment. The pod is then synchronously watched
// until the pod completes, and if the pod failed, an error is returned.
//
// The hook pod inherits the following from the container the hook refers to:
//
// * Environment (hook keys take precedence)
// * Working directory
// * Resources
func (e *HookExecutor) executeExecNewPod(hook *deployapi.LifecycleHook, deployment *kapi.ReplicationController, label string) error {
config, err := deployutil.DecodeDeploymentConfig(deployment, e.codec)
if err != nil {
return err
}
// Build a pod spec from the hook config and deployment
podSpec, err := makeHookPod(hook, deployment, &config.Template.Strategy, label)
if err != nil {
return err
}
// Try to create the pod.
pod, err := e.podClient.CreatePod(deployment.Namespace, podSpec)
if err != nil {
if !kerrors.IsAlreadyExists(err) {
return fmt.Errorf("couldn't create lifecycle pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
}
} else {
glog.V(0).Infof("Created lifecycle pod %s/%s for deployment %s", pod.Namespace, pod.Name, deployutil.LabelForDeployment(deployment))
}
stopChannel := make(chan struct{})
defer close(stopChannel)
nextPod := e.podClient.PodWatch(pod.Namespace, pod.Name, pod.ResourceVersion, stopChannel)
// Wait for the hook pod to reach a terminal phase. Start reading logs as
// soon as the pod enters a usable phase.
var updatedPod *kapi.Pod
var once sync.Once
wg := &sync.WaitGroup{}
wg.Add(1)
glog.V(0).Infof("Watching logs for hook pod %s/%s while awaiting completion", pod.Namespace, pod.Name)
waitLoop:
for {
updatedPod = nextPod()
switch updatedPod.Status.Phase {
case kapi.PodRunning:
go once.Do(func() { e.readPodLogs(pod, wg) })
case kapi.PodSucceeded, kapi.PodFailed:
go once.Do(func() { e.readPodLogs(pod, wg) })
break waitLoop
}
}
// The pod is finished, wait for all logs to be consumed before returning.
wg.Wait()
if updatedPod.Status.Phase == kapi.PodFailed {
return fmt.Errorf(updatedPod.Status.Message)
}
return nil
}
开发者ID:urashidmalik,项目名称:origin,代码行数:60,代码来源:lifecycle.go
示例5: cancelDeployerPods
func (c *DeploymentController) cancelDeployerPods(deployment *kapi.ReplicationController) error {
deployerPods, err := c.podClient.getDeployerPodsFor(deployment.Namespace, deployment.Name)
if err != nil {
return fmt.Errorf("couldn't fetch deployer pods for %s while trying to cancel deployment: %v", deployutil.LabelForDeployment(deployment), err)
}
glog.V(4).Infof("Cancelling %d deployer pods for deployment %s", len(deployerPods), deployutil.LabelForDeployment(deployment))
zeroDelay := int64(1)
cleanedAll := len(deployerPods) > 0
for _, deployerPod := range deployerPods {
// Set the ActiveDeadlineSeconds on the pod so it's terminated very soon.
if deployerPod.Spec.ActiveDeadlineSeconds == nil || *deployerPod.Spec.ActiveDeadlineSeconds != zeroDelay {
deployerPod.Spec.ActiveDeadlineSeconds = &zeroDelay
if _, err := c.podClient.updatePod(deployerPod.Namespace, &deployerPod); err != nil {
cleanedAll = false
utilruntime.HandleError(fmt.Errorf("couldn't cancel deployer pod %s for deployment %s: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err))
}
glog.V(4).Infof("Cancelled deployer pod %s for deployment %s", deployerPod.Name, deployutil.LabelForDeployment(deployment))
}
}
if cleanedAll {
c.emitDeploymentEvent(deployment, kapi.EventTypeNormal, "Cancelled", "Cancelled all deployer pods")
}
return nil
}
开发者ID:RomainVabre,项目名称:origin,代码行数:24,代码来源:controller.go
示例6: updateDeploymentConfig
func (c *DeploymentTriggerController) updateDeploymentConfig(old, cur interface{}) {
newDc := cur.(*deployapi.DeploymentConfig)
oldDc := old.(*deployapi.DeploymentConfig)
// A periodic relist will send update events for all known deployment configs.
if newDc.ResourceVersion == oldDc.ResourceVersion {
return
}
// No need to enqueue deployment configs that have no triggers or are paused.
if len(newDc.Spec.Triggers) == 0 || newDc.Spec.Paused {
return
}
// We don't want to compete with the main deployment config controller. Let's process this
// config once it's synced. Note that this does not eliminate conflicts between the two
// controllers because the main controller is constantly updating deployment configs as
// owning replication controllers and pods are updated.
if !deployutil.HasSynced(newDc, newDc.Generation) {
return
}
// Enqueue the deployment config if it hasn't been deployed yet.
if newDc.Status.LatestVersion == 0 {
c.enqueueDeploymentConfig(newDc)
return
}
// Compare deployment config templates before enqueueing. This reduces the amount of times
// we will try to instantiate a deployment config at the expense of duplicating some of the
// work that the instantiate endpoint is already doing but I think this is fine.
shouldInstantiate := true
latestRc, err := c.rcLister.ReplicationControllers(newDc.Namespace).Get(deployutil.LatestDeploymentNameForConfig(newDc))
if err != nil {
// If we get an error here it may be due to the rc cache lagging behind. In such a case
// just defer to the api server (instantiate REST) where we will retry this.
glog.V(2).Infof("Cannot get latest rc for dc %s:%d (%v) - will defer to instantiate", deployutil.LabelForDeploymentConfig(newDc), newDc.Status.LatestVersion, err)
} else {
initial, err := deployutil.DecodeDeploymentConfig(latestRc, c.codec)
if err != nil {
glog.V(2).Infof("Cannot decode dc from replication controller %s: %v", deployutil.LabelForDeployment(latestRc), err)
return
}
shouldInstantiate = !reflect.DeepEqual(newDc.Spec.Template, initial.Spec.Template)
}
if !shouldInstantiate {
return
}
c.enqueueDeploymentConfig(newDc)
}
开发者ID:xgwang-zte,项目名称:origin,代码行数:47,代码来源:factory.go
示例7: cleanupDeployerPods
func (c *DeploymentController) cleanupDeployerPods(deployment *kapi.ReplicationController) error {
deployerPods, err := c.podClient.getDeployerPodsFor(deployment.Namespace, deployment.Name)
if err != nil {
return fmt.Errorf("couldn't fetch deployer pods for %q: %v", deployutil.LabelForDeployment(deployment), err)
}
cleanedAll := true
for _, deployerPod := range deployerPods {
if err := c.podClient.deletePod(deployerPod.Namespace, deployerPod.Name); err != nil && !kerrors.IsNotFound(err) {
// if the pod deletion failed, then log the error and continue
// we will try to delete any remaining deployer pods and return an error later
utilruntime.HandleError(fmt.Errorf("couldn't delete completed deployer pod %q for deployment %q: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err))
cleanedAll = false
}
}
if !cleanedAll {
return actionableError(fmt.Sprintf("couldn't clean up all deployer pods for %s", deployment.Name))
}
return nil
}
开发者ID:sgallagher,项目名称:origin,代码行数:21,代码来源:controller.go
示例8: cleanupDeployerPods
func (c *DeploymentController) cleanupDeployerPods(deployment *kapi.ReplicationController) error {
selector := deployutil.DeployerPodSelector(deployment.Name)
deployerList, err := c.podStore.Pods(deployment.Namespace).List(selector)
if err != nil {
return fmt.Errorf("couldn't fetch deployer pods for %q: %v", deployutil.LabelForDeployment(deployment), err)
}
cleanedAll := true
gracePeriod := int64(10)
for _, deployerPod := range deployerList.Items {
if err := c.pn.Pods(deployerPod.Namespace).Delete(deployerPod.Name, &kapi.DeleteOptions{GracePeriodSeconds: &gracePeriod}); err != nil && !kerrors.IsNotFound(err) {
// if the pod deletion failed, then log the error and continue
// we will try to delete any remaining deployer pods and return an error later
utilruntime.HandleError(fmt.Errorf("couldn't delete completed deployer pod %q for deployment %q: %v", deployerPod.Name, deployutil.LabelForDeployment(deployment), err))
cleanedAll = false
}
}
if !cleanedAll {
return actionableError(fmt.Sprintf("couldn't clean up all deployer pods for %s", deployment.Name))
}
return nil
}
开发者ID:knobunc,项目名称:origin,代码行数:23,代码来源:controller.go
示例9: Handle
// Handle processes deployment and either creates a deployer pod or responds
// to a terminal deployment status. Since this controller started using caches,
// the provided rc MUST be deep-copied beforehand (see work() in factory.go).
func (c *DeploymentController) Handle(deployment *kapi.ReplicationController) error {
currentStatus := deployutil.DeploymentStatusFor(deployment)
nextStatus := currentStatus
deploymentScaled := false
deployerPodName := deployutil.DeployerPodNameForDeployment(deployment.Name)
deployer, deployerErr := c.getPod(deployment.Namespace, deployerPodName)
switch currentStatus {
case deployapi.DeploymentStatusNew:
// If the deployment has been cancelled, don't create a deployer pod.
// Instead try to delete any deployer pods found and transition the
// deployment to Pending so that the deployment config controller
// continues to see the deployment as in-flight. Eventually the deletion
// of the deployer pod should cause a requeue of this deployment and
// then it can be transitioned to Failed by this controller.
if deployutil.IsDeploymentCancelled(deployment) {
nextStatus = deployapi.DeploymentStatusPending
if err := c.cleanupDeployerPods(deployment); err != nil {
return err
}
break
}
// If the pod already exists, it's possible that a previous CreatePod
// succeeded but the deployment state update failed and now we're re-
// entering. Ensure that the pod is the one we created by verifying the
// annotation on it, and throw a retryable error.
if deployerErr != nil && !kerrors.IsNotFound(deployerErr) {
return fmt.Errorf("couldn't fetch existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), deployerErr)
}
if deployerErr == nil && deployer != nil {
// Do a stronger check to validate that the existing deployer pod is
// actually for this deployment, and if not, fail this deployment.
//
// TODO: Investigate checking the container image of the running pod and
// comparing with the intended deployer pod image. If we do so, we'll need
// to ensure that changes to 'unrelated' pods don't result in updates to
// the deployment. So, the image check will have to be done in other areas
// of the code as well.
if deployutil.DeploymentNameFor(deployer) != deployment.Name {
nextStatus = deployapi.DeploymentStatusFailed
deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedUnrelatedDeploymentExists
c.emitDeploymentEvent(deployment, kapi.EventTypeWarning, "FailedCreate", fmt.Sprintf("Error creating deployer pod since another pod with the same name (%q) exists", deployer.Name))
glog.V(2).Infof("Couldn't create deployer pod for %s since an unrelated pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), deployer.Name)
} else {
// Update to pending relative to the existing validated deployer pod.
deployment.Annotations[deployapi.DeploymentPodAnnotation] = deployer.Name
nextStatus = deployapi.DeploymentStatusPending
glog.V(4).Infof("Detected existing deployer pod %s for deployment %s", deployer.Name, deployutil.LabelForDeployment(deployment))
}
// Don't try and re-create the deployer pod.
break
}
if _, ok := deployment.Annotations[deployapi.DeploymentIgnorePodAnnotation]; ok {
return nil
}
// Generate a deployer pod spec.
deployerPod, err := c.makeDeployerPod(deployment)
if err != nil {
return fatalError(fmt.Sprintf("couldn't make deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
}
// Create the deployer pod.
deploymentPod, err := c.pn.Pods(deployment.Namespace).Create(deployerPod)
// Retry on error.
if err != nil {
return actionableError(fmt.Sprintf("couldn't create deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
}
deployment.Annotations[deployapi.DeploymentPodAnnotation] = deploymentPod.Name
nextStatus = deployapi.DeploymentStatusPending
glog.V(4).Infof("Created deployer pod %s for deployment %s", deploymentPod.Name, deployutil.LabelForDeployment(deployment))
case deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning:
switch {
case kerrors.IsNotFound(deployerErr):
nextStatus = deployapi.DeploymentStatusFailed
// If the deployment is cancelled here then we deleted the deployer in a previous
// resync of the deployment.
if !deployutil.IsDeploymentCancelled(deployment) {
deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedDeployerPodNoLongerExists
c.emitDeploymentEvent(deployment, kapi.EventTypeWarning, "Failed", fmt.Sprintf("Deployer pod %q has gone missing", deployerPodName))
glog.V(4).Infof("Failing deployment %q because its deployer pod %q disappeared", deployutil.LabelForDeployment(deployment), deployerPodName)
}
case deployerErr != nil:
// We'll try again later on resync. Continue to process cancellations.
glog.V(4).Infof("Error getting deployer pod %s for deployment %s: %v", deployerPodName, deployutil.LabelForDeployment(deployment), deployerErr)
default: /* err == nil */
// If the deployment has been cancelled, delete any deployer pods
// found and transition the deployment to Pending so that the
// deployment config controller continues to see the deployment
// as in-flight. Eventually the deletion of the deployer pod should
// cause a requeue of this deployment and then it can be transitioned
//.........这里部分代码省略.........
开发者ID:juanvallejo,项目名称:origin,代码行数:101,代码来源:controller.go
示例10: Handle
// Handle processes change triggers for config.
func (c *DeploymentConfigChangeController) Handle(config *deployapi.DeploymentConfig) error {
hasChangeTrigger := false
for _, trigger := range config.Triggers {
if trigger.Type == deployapi.DeploymentTriggerOnConfigChange {
hasChangeTrigger = true
break
}
}
if !hasChangeTrigger {
glog.V(4).Infof("Ignoring DeploymentConfig %s; no change triggers detected", deployutil.LabelForDeploymentConfig(config))
return nil
}
if config.LatestVersion == 0 {
_, _, err := c.generateDeployment(config)
if err != nil {
if kerrors.IsConflict(err) {
return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
}
// TODO: This needs handled by setting some state within the API so
// users know why the trigger isn't doing anything.
// https://github.com/openshift/origin/issues/3526
return nil
}
glog.V(4).Infof("Created initial Deployment for DeploymentConfig %s", deployutil.LabelForDeploymentConfig(config))
return nil
}
latestDeploymentName := deployutil.LatestDeploymentNameForConfig(config)
deployment, err := c.changeStrategy.getDeployment(config.Namespace, latestDeploymentName)
if err != nil {
// If there's no deployment for the latest config, we have no basis of
// comparison. It's the responsibility of the deployment config controller
// to make the deployment for the config, so return early.
if kerrors.IsNotFound(err) {
glog.V(2).Infof("Ignoring change for DeploymentConfig %s; no existing Deployment found", deployutil.LabelForDeploymentConfig(config))
return nil
}
return fmt.Errorf("couldn't retrieve Deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
}
deployedConfig, err := c.decodeConfig(deployment)
if err != nil {
return fatalError(fmt.Sprintf("error decoding DeploymentConfig from Deployment %s for DeploymentConfig %s: %v", deployutil.LabelForDeployment(deployment), deployutil.LabelForDeploymentConfig(config), err))
}
newSpec, oldSpec := config.Template.ControllerTemplate.Template.Spec, deployedConfig.Template.ControllerTemplate.Template.Spec
if kapi.Semantic.DeepEqual(oldSpec, newSpec) {
glog.V(2).Infof("Ignoring DeploymentConfig change for %s (latestVersion=%d); same as Deployment %s", deployutil.LabelForDeploymentConfig(config), config.LatestVersion, deployutil.LabelForDeployment(deployment))
return nil
}
fromVersion, toVersion, err := c.generateDeployment(config)
if err != nil {
if kerrors.IsConflict(err) {
return fatalError(fmt.Sprintf("DeploymentConfig %s updated since retrieval; aborting trigger: %v", deployutil.LabelForDeploymentConfig(config), err))
}
return fmt.Errorf("couldn't generate deployment for DeploymentConfig %s: %v", deployutil.LabelForDeploymentConfig(config), err)
}
glog.V(4).Infof("Updated DeploymentConfig %s from version %d to %d for existing deployment %s", deployutil.LabelForDeploymentConfig(config), fromVersion, toVersion, deployutil.LabelForDeployment(deployment))
return nil
}
开发者ID:ppitonak,项目名称:origin,代码行数:64,代码来源:controller.go
示例11: reconcileDeployments
// reconcileDeployments reconciles existing deployment replica counts which
// could have diverged outside the deployment process (e.g. due to auto or
// manual scaling, or partial deployments). The active deployment is the last
// successful deployment, not necessarily the latest in terms of the config
// version. The active deployment replica count should follow the config, and
// all other deployments should be scaled to zero.
//
// Previously, scaling behavior was that the config replica count was used
// only for initial deployments and the active deployment had to be scaled up
// directly. To continue supporting that old behavior we must detect when the
// deployment has been directly manipulated, and if so, preserve the directly
// updated value and sync the config with the deployment.
func (c *DeploymentConfigController) reconcileDeployments(existingDeployments []kapi.ReplicationController, config *deployapi.DeploymentConfig) error {
latestIsDeployed, latestDeployment := deployutil.LatestDeploymentInfo(config, existingDeployments)
if !latestIsDeployed {
// We shouldn't be reconciling if the latest deployment hasn't been
// created; this is enforced on the calling side, but double checking
// can't hurt.
return c.updateStatus(config, existingDeployments)
}
activeDeployment := deployutil.ActiveDeployment(existingDeployments)
// Compute the replica count for the active deployment (even if the active
// deployment doesn't exist). The active replica count is the value that
// should be assigned to the config, to allow the replica propagation to
// flow downward from the config.
//
// By default we'll assume the config replicas should be used to update the
// active deployment except in special cases (like first sync or externally
// updated deployments.)
activeReplicas := config.Spec.Replicas
source := "the deploymentConfig itself (no change)"
activeDeploymentExists := activeDeployment != nil
activeDeploymentIsLatest := activeDeploymentExists && activeDeployment.Name == latestDeployment.Name
latestDesiredReplicas, latestHasDesiredReplicas := deployutil.DeploymentDesiredReplicas(latestDeployment)
switch {
case activeDeploymentExists && activeDeploymentIsLatest:
// The active/latest deployment follows the config unless this is its first
// sync or if an external change to the deployment replicas is detected.
lastActiveReplicas, hasLastActiveReplicas := deployutil.DeploymentReplicas(activeDeployment)
if !hasLastActiveReplicas || lastActiveReplicas != activeDeployment.Spec.Replicas {
activeReplicas = activeDeployment.Spec.Replicas
source = fmt.Sprintf("the latest/active deployment %q which was scaled directly or has not previously been synced", deployutil.LabelForDeployment(activeDeployment))
}
case activeDeploymentExists && !activeDeploymentIsLatest:
// The active/non-latest deployment follows the config if it was
// previously synced; if this is the first sync, infer what the config
// value should be based on either the latest desired or whatever the
// deployment is currently scaled to.
_, hasLastActiveReplicas := deployutil.DeploymentReplicas(activeDeployment)
if hasLastActiveReplicas {
break
}
if latestHasDesiredReplicas {
activeReplicas = latestDesiredReplicas
source = fmt.Sprintf("the desired replicas of latest deployment %q which has not been previously synced", deployutil.LabelForDeployment(latestDeployment))
} else if activeDeployment.Spec.Replicas > 0 {
activeReplicas = activeDeployment.Spec.Replicas
source = fmt.Sprintf("the active deployment %q which has not been previously synced", deployutil.LabelForDeployment(activeDeployment))
}
case !activeDeploymentExists && latestHasDesiredReplicas:
// If there's no active deployment, use the latest desired, if available.
activeReplicas = latestDesiredReplicas
source = fmt.Sprintf("the desired replicas of latest deployment %q with no active deployment", deployutil.LabelForDeployment(latestDeployment))
}
// Bring the config in sync with the deployment. Once we know the config
// accurately represents the desired replica count of the active deployment,
// we can safely reconcile deployments.
//
// If the deployment config is test, never update the deployment config based
// on deployments, since test behavior overrides user scaling.
switch {
case config.Spec.Replicas == activeReplicas:
case config.Spec.Test:
glog.V(4).Infof("Detected changed replicas for test deploymentConfig %q, ignoring that change", deployutil.LabelForDeploymentConfig(config))
default:
copied, err := deployutil.DeploymentConfigDeepCopy(config)
if err != nil {
return err
}
oldReplicas := copied.Spec.Replicas
copied.Spec.Replicas = activeReplicas
config, err = c.dn.DeploymentConfigs(copied.Namespace).Update(copied)
if err != nil {
return err
}
glog.V(4).Infof("Synced deploymentConfig %q replicas from %d to %d based on %s", deployutil.LabelForDeploymentConfig(config), oldReplicas, activeReplicas, source)
}
// Reconcile deployments. The active deployment follows the config, and all
// other deployments should be scaled to zero.
var updatedDeployments []kapi.ReplicationController
for i := range existingDeployments {
deployment := existingDeployments[i]
toAppend := deployment
isActiveDeployment := activeDeployment != nil && deployment.Name == activeDeployment.Name
//.........这里部分代码省略.........
开发者ID:rootfs,项目名称:origin,代码行数:101,代码来源:controller.go
示例12: Get
// Get returns a streamer resource with the contents of the deployment log
func (r *REST) Get(ctx kapi.Context, name string, opts runtime.Object) (runtime.Object, error) {
// Ensure we have a namespace in the context
namespace, ok := kapi.NamespaceFrom(ctx)
if !ok {
return nil, errors.NewBadRequest("namespace parameter required.")
}
// Validate DeploymentLogOptions
deployLogOpts, ok := opts.(*deployapi.DeploymentLogOptions)
if !ok {
return nil, errors.NewBadRequest("did not get an expected options.")
}
if errs := validation.ValidateDeploymentLogOptions(deployLogOpts); len(errs) > 0 {
return nil, errors.NewInvalid("deploymentLogOptions", "", errs)
}
// Fetch deploymentConfig and check latest version; if 0, there are no deployments
// for this config
config, err := r.ConfigGetter.DeploymentConfigs(namespace).Get(name)
if err != nil {
return nil, errors.NewNotFound("deploymentConfig", name)
}
desiredVersion := config.Status.LatestVersion
if desiredVersion == 0 {
return nil, errors.NewBadRequest(fmt.Sprintf("no deployment exists for deploymentConfig %q", config.Name))
}
// Support retrieving logs for older deployments
switch {
case deployLogOpts.Version == nil:
// Latest
case *deployLogOpts.Version <= 0 || int(*deployLogOpts.Version) > config.Status.LatestVersion:
// Invalid version
return nil, errors.NewBadRequest(fmt.Sprintf("invalid version for deploymentConfig %q: %d", config.Name, *deployLogOpts.Version))
default:
desiredVersion = int(*deployLogOpts.Version)
}
// Get desired deployment
targetName := deployutil.DeploymentNameForConfigVersion(config.Name, desiredVersion)
target, err := r.DeploymentGetter.ReplicationControllers(namespace).Get(targetName)
if err != nil {
return nil, err
}
// Check for deployment status; if it is new or pending, we will wait for it. If it is complete,
// the deployment completed successfully and the deployer pod will be deleted so we will return a
// success message. If it is running or failed, retrieve the log from the deployer pod.
status := deployutil.DeploymentStatusFor(target)
switch status {
case deployapi.DeploymentStatusNew, deployapi.DeploymentStatusPending:
if deployLogOpts.NoWait {
glog.V(4).Infof("Deployment %s is in %s state. No logs to retrieve yet.", deployutil.LabelForDeployment(target), status)
return &genericrest.LocationStreamer{}, nil
}
glog.V(4).Infof("Deployment %s is in %s state, waiting for it to start...", deployutil.LabelForDeployment(target), status)
latest, ok, err := registry.WaitForRunningDeployment(r.DeploymentGetter, target, r.Timeout)
if err != nil {
return nil, errors.NewBadRequest(fmt.Sprintf("unable to wait for deployment %s to run: %v", deployutil.LabelForDeployment(target), err))
}
if !ok {
return nil, errors.NewTimeoutError(fmt.Sprintf("timed out waiting for deployment %s to start after %s", deployutil.LabelForDeployment(target), r.Timeout), 1)
}
if deployutil.DeploymentStatusFor(latest) == deployapi.DeploymentStatusComplete {
// Deployer pod has been deleted, no logs to retrieve
glog.V(4).Infof("Deployment %s was successful so the deployer pod is deleted. No logs to retrieve.", deployutil.LabelForDeployment(target))
return &genericrest.LocationStreamer{}, nil
}
case deployapi.DeploymentStatusComplete:
// Deployer pod has been deleted, no logs to retrieve
glog.V(4).Infof("Deployment %s was successful so the deployer pod is deleted. No logs to retrieve.", deployutil.LabelForDeployment(target))
return &genericrest.LocationStreamer{}, nil
}
// Setup url of the deployer pod
deployPodName := deployutil.DeployerPodNameForDeployment(target.Name)
logOpts := deployapi.DeploymentToPodLogOptions(deployLogOpts)
location, transport, err := pod.LogLocation(r.PodGetter, r.ConnectionInfo, ctx, deployPodName, logOpts)
if err != nil {
return nil, errors.NewBadRequest(err.Error())
}
return &genericrest.LocationStreamer{
Location: location,
Transport: transport,
ContentType: "text/plain",
Flush: deployLogOpts.Follow,
ResponseChecker: genericrest.NewGenericHttpResponseChecker("Pod", deployPodName),
}, nil
}
开发者ID:erinboyd,项目名称:origin,代码行数:92,代码来源:rest.go
示例13: Deploy
func (s *RollingDeploymentStrategy) Deploy(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int) error {
config, err := deployutil.DecodeDeploymentConfig(to, s.codec)
if err != nil {
return fmt.Errorf("couldn't decode DeploymentConfig from deployment %s: %v", deployutil.LabelForDeployment(to), err)
}
params := config.Template.Strategy.RollingParams
updateAcceptor := s.getUpdateAcceptor(time.Duration(*params.TimeoutSeconds) * time.Second)
// If there's no prior deployment, delegate to another strategy since the
// rolling updater only supports transitioning between two deployments.
//
// Hook support is duplicated here for now. When the rolling updater can
// handle initial deployments, all of this code can go away.
if from == nil {
// Execute any pre-hook.
if params.Pre != nil {
err := s.hookExecutor.Execute(params.Pre, to, "prehook")
if err != nil {
return fmt.Errorf("Pre hook failed: %s", err)
}
glog.Infof("Pre hook finished")
}
// Execute the delegate strategy.
err := s.initialStrategy.DeployWithAcceptor(from, to, desiredReplicas, updateAcceptor)
if err != nil {
return err
}
// Execute any post-hook. Errors are logged and ignored.
if params.Post != nil {
err := s.hookExecutor.Execute(params.Post, to, "posthook")
if err != nil {
util.HandleError(fmt.Errorf("post hook failed: %s", err))
} else {
glog.Infof("Post hook finished")
}
}
// All done.
return nil
}
// Prepare for a rolling update.
// Execute any pre-hook.
if params.Pre != nil {
err := s.hookExecutor.Execute(params.Pre, to, "prehook")
if err != nil {
return fmt.Errorf("pre hook failed: %s", err)
}
glog.Infof("Pre hook finished")
}
// HACK: Assign the source ID annotation that the rolling updater expects,
// unless it already exists on the deployment.
//
// Related upstream issue:
// https://github.com/kubernetes/kubernetes/pull/7183
err = wait.Poll(s.apiRetryPeriod, s.apiRetryTimeout, func() (done bool, err error) {
existing, err := s.client.ReplicationControllers(to.Namespace).Get(to.Name)
if err != nil {
msg := fmt.Sprintf("couldn't look up deployment %s: %s", deployutil.LabelForDeployment(to), err)
if kerrors.IsNotFound(err) {
return false, fmt.Errorf("%s", msg)
}
// Try again.
glog.Infof(msg)
return false, nil
}
if _, hasSourceId := existing.Annotations[sourceIdAnnotation]; !hasSourceId {
existing.Annotations[sourceIdAnnotation] = fmt.Sprintf("%s:%s", from.Name, from.ObjectMeta.UID)
if _, err := s.client.ReplicationControllers(existing.Namespace).Update(existing); err != nil {
msg := fmt.Sprintf("couldn't assign source annotation to deployment %s: %v", deployutil.LabelForDeployment(existing), err)
if kerrors.IsNotFound(err) {
return false, fmt.Errorf("%s", msg)
}
// Try again.
glog.Infof(msg)
return false, nil
}
}
return true, nil
})
if err != nil {
return err
}
to, err = s.client.ReplicationControllers(to.Namespace).Get(to.Name)
if err != nil {
return err
}
// HACK: There's a validation in the rolling updater which assumes that when
// an existing RC is supplied, it will have >0 replicas- a validation which
// is then disregarded as the desired count is obtained from the annotation
// on the RC. For now, fake it out by just setting replicas to 1.
//
// Related upstream issue:
// https://github.com/kubernetes/kubernetes/pull/7183
to.Spec.Replicas = 1
//.........这里部分代码省略.........
开发者ID:ncantor,项目名称:origin,代码行数:101,代码来源:rolling.go
示例14: Handle
// Handle processes deployment and either creates a deployer pod or responds
// to a terminal deployment status.
func (c *DeploymentController) Handle(deployment *kapi.ReplicationController) error {
currentStatus := deployutil.DeploymentStatusFor(deployment)
nextStatus := currentStatus
deploymentScaled := false
switch currentStatus {
case deployapi.DeploymentStatusNew:
// If the deployment has been cancelled, don't create a deployer pod, and
// transition to failed immediately.
if deployutil.IsDeploymentCancelled(deployment) {
nextStatus = deployapi.DeploymentStatusFailed
break
}
// Generate a deployer pod spec.
podTemplate, err := c.makeDeployerPod(deployment)
if err != nil {
return fatalError(fmt.Sprintf("couldn't make deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err))
}
// Create the deployer pod.
deploymentPod, err := c.podClient.createPod(deployment.Namespace, podTemplate)
if err == nil {
deployment.Annotations[deployapi.DeploymentPodAnnotation] = deploymentPod.Name
nextStatus = deployapi.DeploymentStatusPending
glog.V(4).Infof("Created pod %s for deployment %s", deploymentPod.Name, deployutil.LabelForDeployment(deployment))
break
}
// Retry on error.
if !kerrors.IsAlreadyExists(err) {
c.recorder.Eventf(deployment, kapi.EventTypeWarning, "FailedCreate", "Error creating deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
return fmt.Errorf("couldn't create deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
}
// If the pod already exists, it's possible that a previous CreatePod
// succeeded but the deployment state update failed and now we're re-
// entering. Ensure that the pod is the one we created by verifying the
// annotation on it, and throw a retryable error.
existingPod, err := c.podClient.getPod(deployment.Namespace, deployutil.DeployerPodNameForDeployment(deployment.Name))
if err != nil {
c.recorder.Eventf(deployment, kapi.EventTypeWarning, "FailedCreate", "Error getting existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
return fmt.Errorf("couldn't fetch existing deployer pod for %s: %v", deployutil.LabelForDeployment(deployment), err)
}
// Do a stronger check to validate that the existing deployer pod is
// actually for this deployment, and if not, fail this deployment.
//
// TODO: Investigate checking the container image of the running pod and
// comparing with the intended deployer pod image. If we do so, we'll need
// to ensure that changes to 'unrelated' pods don't result in updates to
// the deployment. So, the image check will have to be done in other areas
// of the code as well.
if deployutil.DeploymentNameFor(existingPod) != deployment.Name {
nextStatus = deployapi.DeploymentStatusFailed
deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedUnrelatedDeploymentExists
c.recorder.Eventf(deployment, kapi.EventTypeWarning, "FailedCreate", "Error creating deployer pod for %s since another pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), existingPod.Name)
glog.V(2).Infof("Couldn't create deployer pod for %s since an unrelated pod with the same name (%q) exists", deployutil.LabelForDeployment(deployment), existingPod.Name)
break
}
// Update to pending relative to the existing validated deployer pod.
deployment.Annotations[deployapi.DeploymentPodAnnotation] = existingPod.Name
nextStatus = deployapi.DeploymentStatusPending
glog.V(4).Infof("Detected existing deployer pod %s for deployment %s", existingPod.Name, deployutil.LabelForDeployment(deployment))
case deployapi.DeploymentStatusPending, deployapi.DeploymentStatusRunning:
// If the deployer pod has vanished, consider the deployment a failure.
deployerPodName := deployutil.DeployerPodNameForDeployment(deployment.Name)
if _, err := c.podClient.getPod(deployment.Namespace, deployerPodName); err != nil {
if kerrors.IsNotFound(err) {
nextStatus = deployapi.DeploymentStatusFailed
deployment.Annotations[deployapi.DeploymentStatusAnnotation] = string(nextStatus)
deployment.Annotations[deployapi.DeploymentStatusReasonAnnotation] = deployapi.DeploymentFailedDeployerPodNoLongerExists
c.recorder.Eventf(deployment, kapi.EventTypeWarning, "failed", "Deployer pod %q has gone missing", deployerPodName)
glog.V(4).Infof("Failing deployment %q because its deployer pod %q disappeared", deployutil.LabelForDeployment(deployment), deployerPodName)
break
} else {
// We'll try again later on resync. Continue to process cancellations.
glog.V(2).Infof("Error getting deployer pod %s for deployment %s: %#v", deployerPodName, deployutil.LabelForDeployment(deployment), err)
}
}
// If the deployment is cancelled, terminate any deployer/hook pods.
// NOTE: Do not mark the deployment as Failed just yet.
// The deployment will be marked as Failed by the deployer pod controller
// when the deployer pod failure state is picked up
// Also, it will scale down the failed deployment and scale back up
// the last successful completed deployment
if deployutil.IsDeploymentCancelled(deployment) {
deployerPods, err := c.podClient.getDeployerPodsFor(deployment.Namespace, deployment.Name)
if err != nil {
return fmt.Errorf("couldn't fetch deployer pods for %s while trying to cancel deployment: %v", deployutil.LabelForDeployment(deployment), err)
}
glog.V(4).Infof("Cancelling %d deployer pods for deployment %s", len(deployerPods), deployutil.LabelForDeployment(deployment))
zeroDelay := int64(1)
for _, deployerPod := range deployerPods {
// Set the ActiveDeadlineSeconds on the pod so it's terminated very soon.
if deployerPod.Spec.ActiveDeadlineSeconds == nil || *deployerPod.Spec.ActiveDeadlineSeconds != zeroDelay {
//.........这里部分代码省略.........
开发者ID:richm,项目名称:origin,代码行数:101,代码来源:controller.go
示例15: DeployWithAcceptor
// DeployWithAcceptor scales down from and then scales up to. If
// updateAcceptor is provided and the desired replica count is >1, the first
// replica of to is rolled out and validated before performing the full scale
// up.
//
// This is currently only used in conjunction with the rolling update strategy
// for initial deployments.
func (s *RecreateDeploymentStrategy) DeployWithAcceptor(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
config, err := deployutil.DecodeDeploymentConfig(to, s.codec)
if err != nil {
return fmt.Errorf("couldn't decode config from deployment %s: %v", to.Name, err)
}
params := config.Spec.Strategy.RecreateParams
retryParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)
waitParams := kubectl.NewRetryParams(s.retryPeriod, s.r
|
请发表评论