本文整理汇总了Golang中k8s/io/kubernetes/pkg/api/v1.IsPodReady函数的典型用法代码示例。如果您正苦于以下问题:Golang IsPodReady函数的具体用法?Golang IsPodReady怎么用?Golang IsPodReady使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了IsPodReady函数的17个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: Less
func (s ActivePods) Less(i, j int) bool {
// 1. Unassigned < assigned
// If only one of the pods is unassigned, the unassigned one is smaller
if s[i].Spec.NodeName != s[j].Spec.NodeName && (len(s[i].Spec.NodeName) == 0 || len(s[j].Spec.NodeName) == 0) {
return len(s[i].Spec.NodeName) == 0
}
// 2. PodPending < PodUnknown < PodRunning
m := map[v1.PodPhase]int{v1.PodPending: 0, v1.PodUnknown: 1, v1.PodRunning: 2}
if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
return m[s[i].Status.Phase] < m[s[j].Status.Phase]
}
// 3. Not ready < ready
// If only one of the pods is not ready, the not ready one is smaller
if v1.IsPodReady(s[i]) != v1.IsPodReady(s[j]) {
return !v1.IsPodReady(s[i])
}
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
// see https://github.com/kubernetes/kubernetes/issues/22065
// 4. Been ready for empty time < less time < more time
// If both pods are ready, the latest ready one is smaller
if v1.IsPodReady(s[i]) && v1.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
}
// 5. Pods with containers with higher restart counts < lower restart counts
if maxContainerRestarts(s[i]) != maxContainerRestarts(s[j]) {
return maxContainerRestarts(s[i]) > maxContainerRestarts(s[j])
}
// 6. Empty creation time pods < newer pods < older pods
if !s[i].CreationTimestamp.Equal(s[j].CreationTimestamp) {
return afterOrZero(s[i].CreationTimestamp, s[j].CreationTimestamp)
}
return false
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:33,代码来源:controller_utils.go
示例2: updatePod
// When a pod is updated, figure out what controller/s manage it and wake them
// up. If the labels of the pod have changed we need to awaken both the old
// and new controller. old and cur must be *v1.Pod types.
func (rm *ReplicationManager) updatePod(old, cur interface{}) {
curPod := cur.(*v1.Pod)
oldPod := old.(*v1.Pod)
if curPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs.
return
}
glog.V(4).Infof("Pod %s updated, objectMeta %+v -> %+v.", curPod.Name, oldPod.ObjectMeta, curPod.ObjectMeta)
labelChanged := !reflect.DeepEqual(curPod.Labels, oldPod.Labels)
if curPod.DeletionTimestamp != nil {
// when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period,
// and after such time has passed, the kubelet actually deletes it from the store. We receive an update
// for modification of the deletion timestamp and expect an rc to create more replicas asap, not wait
// until the kubelet actually deletes the pod. This is different from the Phase of a pod changing, because
// an rc never initiates a phase change, and so is never asleep waiting for the same.
rm.deletePod(curPod)
if labelChanged {
// we don't need to check the oldPod.DeletionTimestamp because DeletionTimestamp cannot be unset.
rm.deletePod(oldPod)
}
return
}
// Only need to get the old controller if the labels changed.
// Enqueue the oldRC before the curRC to give curRC a chance to adopt the oldPod.
if labelChanged {
// If the old and new rc are the same, the first one that syncs
// will set expectations preventing any damage from the second.
if oldRC := rm.getPodController(oldPod); oldRC != nil {
rm.enqueueController(oldRC)
}
}
changedToReady := !v1.IsPodReady(oldPod) && v1.IsPodReady(curPod)
if curRC := rm.getPodController(curPod); curRC != nil {
rm.enqueueController(curRC)
// TODO: MinReadySeconds in the Pod will generate an Available condition to be added in
// the Pod status which in turn will trigger a requeue of the owning replication controller
// thus having its status updated with the newly available replica. For now, we can fake the
// update by resyncing the controller MinReadySeconds after the it is requeued because a Pod
// transitioned to Ready.
// Note that this still suffers from #29229, we are just moving the problem one level
// "closer" to kubelet (from the deployment to the replication controller manager).
if changedToReady && curRC.Spec.MinReadySeconds > 0 {
glog.V(2).Infof("ReplicationController %q will be enqueued after %ds for availability check", curRC.Name, curRC.Spec.MinReadySeconds)
rm.enqueueControllerAfter(curRC, time.Duration(curRC.Spec.MinReadySeconds)*time.Second)
}
}
}
开发者ID:abutcher,项目名称:kubernetes,代码行数:53,代码来源:replication_controller.go
示例3: IsReady
func (cc *ConformanceContainer) IsReady() (bool, error) {
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
if err != nil {
return false, err
}
return v1.IsPodReady(pod), nil
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:7,代码来源:container.go
示例4: scale
func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error {
name := ps.Name
ns := ps.Namespace
p.update(ns, name, func(ps *apps.StatefulSet) { *(ps.Spec.Replicas) = count })
var petList *v1.PodList
pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
petList = p.getPodList(ps)
if int32(len(petList.Items)) == count {
return true, nil
}
return false, nil
})
if pollErr != nil {
unhealthy := []string{}
for _, pet := range petList.Items {
delTs, phase, readiness := pet.DeletionTimestamp, pet.Status.Phase, v1.IsPodReady(&pet)
if delTs != nil || phase != v1.PodRunning || !readiness {
unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", pet.Name, delTs, phase, readiness))
}
}
return fmt.Errorf("Failed to scale statefulset to %d in %v. Remaining pods:\n%v", count, statefulsetTimeout, unhealthy)
}
return nil
}
开发者ID:abutcher,项目名称:kubernetes,代码行数:25,代码来源:petset.go
示例5: waitForRunning
func (p *statefulSetTester) waitForRunning(numPets int32, ps *apps.StatefulSet, shouldBeReady bool) {
pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
func() (bool, error) {
podList := p.getPodList(ps)
if int32(len(podList.Items)) < numPets {
framework.Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPets)
return false, nil
}
if int32(len(podList.Items)) > numPets {
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
}
for _, p := range podList.Items {
isReady := v1.IsPodReady(&p)
desiredReadiness := shouldBeReady == isReady
framework.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
if p.Status.Phase != v1.PodRunning || !desiredReadiness {
return false, nil
}
}
return true, nil
})
if pollErr != nil {
framework.Failf("Failed waiting for pods to enter running: %v", pollErr)
}
}
开发者ID:abutcher,项目名称:kubernetes,代码行数:25,代码来源:petset.go
示例6: podReadyTime
func podReadyTime(pod *v1.Pod) metav1.Time {
if v1.IsPodReady(pod) {
for _, c := range pod.Status.Conditions {
// we only care about pod ready conditions
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
return c.LastTransitionTime
}
}
}
return metav1.Time{}
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:11,代码来源:controller_utils.go
示例7: PodRunningAndReady
// PodRunningAndReady returns true if the pod is running and ready, false if the pod has not
// yet reached those states, returns ErrPodCompleted if the pod has run to completion, or
// an error in any other case.
func PodRunningAndReady(event watch.Event) (bool, error) {
switch event.Type {
case watch.Deleted:
return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, "")
}
switch t := event.Object.(type) {
case *v1.Pod:
switch t.Status.Phase {
case v1.PodFailed, v1.PodSucceeded:
return false, ErrPodCompleted
case v1.PodRunning:
return v1.IsPodReady(t), nil
}
}
return false, nil
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:19,代码来源:conditions.go
示例8: updateDaemonSetStatus
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet) error {
glog.V(4).Infof("Updating daemon set status")
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
if err != nil {
return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
}
nodeList, err := dsc.nodeStore.List()
if err != nil {
return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
}
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int
for _, node := range nodeList.Items {
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(&node, ds)
if err != nil {
return err
}
scheduled := len(nodeToDaemonPods[node.Name]) > 0
if wantToRun {
desiredNumberScheduled++
if scheduled {
currentNumberScheduled++
// Sort the daemon pods by creation time, so the the oldest is first.
daemonPods, _ := nodeToDaemonPods[node.Name]
sort.Sort(podByCreationTimestamp(daemonPods))
if v1.IsPodReady(daemonPods[0]) {
numberReady++
}
}
} else {
if scheduled {
numberMisscheduled++
}
}
}
err = storeDaemonSetStatus(dsc.kubeClient.Extensions().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady)
if err != nil {
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
}
return nil
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:46,代码来源:daemoncontroller.go
示例9: isHealthy
// isHealthy returns true if the pod is ready & running. If the pod has the
// "pod.alpha.kubernetes.io/initialized" annotation set to "false", pod state is ignored.
func (d *defaultPetHealthChecker) isHealthy(pod *v1.Pod) bool {
if pod == nil || pod.Status.Phase != v1.PodRunning {
return false
}
podReady := v1.IsPodReady(pod)
// User may have specified a pod readiness override through a debug annotation.
initialized, ok := pod.Annotations[StatefulSetInitAnnotation]
if ok {
if initAnnotation, err := strconv.ParseBool(initialized); err != nil {
glog.V(4).Infof("Failed to parse %v annotation on pod %v: %v", StatefulSetInitAnnotation, pod.Name, err)
} else if !initAnnotation {
glog.V(4).Infof("StatefulSet pod %v waiting on annotation %v", pod.Name, StatefulSetInitAnnotation)
podReady = initAnnotation
}
}
return podReady
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:20,代码来源:pet.go
示例10: countHealthyPods
func countHealthyPods(pods []*v1.Pod, disruptedPods map[string]metav1.Time, currentTime time.Time) (currentHealthy int32) {
Pod:
for _, pod := range pods {
// Pod is beeing deleted.
if pod.DeletionTimestamp != nil {
continue
}
// Pod is expected to be deleted soon.
if disruptionTime, found := disruptedPods[pod.Name]; found && disruptionTime.Time.Add(DeletionTimeout).After(currentTime) {
continue
}
if v1.IsPodReady(pod) {
currentHealthy++
continue Pod
}
}
return
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:19,代码来源:disruption.go
示例11: calculateStatus
func calculateStatus(rs extensions.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) extensions.ReplicaSetStatus {
newStatus := rs.Status
// Count the number of pods that have labels matching the labels of the pod
// template of the replica set, the matching pods may have more
// labels than are in the template. Because the label of podTemplateSpec is
// a superset of the selector of the replica set, so the possible
// matching pods must be part of the filteredPods.
fullyLabeledReplicasCount := 0
readyReplicasCount := 0
availableReplicasCount := 0
templateLabel := labels.Set(rs.Spec.Template.Labels).AsSelectorPreValidated()
for _, pod := range filteredPods {
if templateLabel.Matches(labels.Set(pod.Labels)) {
fullyLabeledReplicasCount++
}
if v1.IsPodReady(pod) {
readyReplicasCount++
if v1.IsPodAvailable(pod, rs.Spec.MinReadySeconds, metav1.Now()) {
availableReplicasCount++
}
}
}
failureCond := GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
if manageReplicasErr != nil && failureCond == nil {
var reason string
if diff := len(filteredPods) - int(*(rs.Spec.Replicas)); diff < 0 {
reason = "FailedCreate"
} else if diff > 0 {
reason = "FailedDelete"
}
cond := NewReplicaSetCondition(extensions.ReplicaSetReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error())
SetCondition(&newStatus, cond)
} else if manageReplicasErr == nil && failureCond != nil {
RemoveCondition(&newStatus, extensions.ReplicaSetReplicaFailure)
}
newStatus.Replicas = int32(len(filteredPods))
newStatus.FullyLabeledReplicas = int32(fullyLabeledReplicasCount)
newStatus.ReadyReplicas = int32(readyReplicasCount)
newStatus.AvailableReplicas = int32(availableReplicasCount)
return newStatus
}
开发者ID:nak3,项目名称:kubernetes,代码行数:43,代码来源:replica_set_utils.go
示例12: checkExistingRCRecovers
func checkExistingRCRecovers(f *framework.Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := v1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
if err != nil {
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
return false, nil
}
if len(pods.Items) == 0 {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
framework.Logf("apiserver has recovered")
return true, nil
}))
By("waiting for replication controller to recover")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := v1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil && v1.IsPodReady(&pod) {
return true, nil
}
}
return false, nil
}))
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:37,代码来源:etcd_failure.go
示例13: GetResourceReplicas
// GetResourceReplicas calculates the desired replica count based on a target resource utilization percentage
// of the given resource for pods matching the given selector in the given namespace, and the current replica count
func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUtilization int32, resource v1.ResourceName, namespace string, selector labels.Selector) (replicaCount int32, utilization int32, timestamp time.Time, err error) {
metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector)
if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
}
podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
}
if len(podList.Items) == 0 {
return 0, 0, time.Time{}, fmt.Errorf("no pods returned by selector while calculating replica count")
}
requests := make(map[string]int64, len(podList.Items))
readyPodCount := 0
unreadyPods := sets.NewString()
missingPods := sets.NewString()
for _, pod := range podList.Items {
podSum := int64(0)
for _, container := range pod.Spec.Containers {
if containerRequest, ok := container.Resources.Requests[resource]; ok {
podSum += containerRequest.MilliValue()
} else {
return 0, 0, time.Time{}, fmt.Errorf("missing request for %s on container %s in pod %s/%s", resource, container.Name, namespace, pod.Name)
}
}
requests[pod.Name] = podSum
if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) {
// save this pod name for later, but pretend it doesn't exist for now
unreadyPods.Insert(pod.Name)
delete(metrics, pod.Name)
continue
}
if _, found := metrics[pod.Name]; !found {
// save this pod name for later, but pretend it doesn't exist for now
missingPods.Insert(pod.Name)
continue
}
readyPodCount++
}
if len(metrics) == 0 {
return 0, 0, time.Time{}, fmt.Errorf("did not receive metrics for any ready pods")
}
usageRatio, utilization, err := metricsclient.GetResourceUtilizationRatio(metrics, requests, targetUtilization)
if err != nil {
return 0, 0, time.Time{}, err
}
rebalanceUnready := len(unreadyPods) > 0 && usageRatio > 1.0
if !rebalanceUnready && len(missingPods) == 0 {
if math.Abs(1.0-usageRatio) <= tolerance {
// return the current replicas if the change would be too small
return currentReplicas, utilization, timestamp, nil
}
// if we don't have any unready or missing pods, we can calculate the new replica count now
return int32(math.Ceil(usageRatio * float64(readyPodCount))), utilization, timestamp, nil
}
if len(missingPods) > 0 {
if usageRatio < 1.0 {
// on a scale-down, treat missing pods as using 100% of the resource request
for podName := range missingPods {
metrics[podName] = requests[podName]
}
} else if usageRatio > 1.0 {
// on a scale-up, treat missing pods as using 0% of the resource request
for podName := range missingPods {
metrics[podName] = 0
}
}
}
if rebalanceUnready {
// on a scale-up, treat unready pods as using 0% of the resource request
for podName := range unreadyPods {
metrics[podName] = 0
}
}
// re-run the utilization calculation with our new numbers
newUsageRatio, _, err := metricsclient.GetResourceUtilizationRatio(metrics, requests, targetUtilization)
if err != nil {
return 0, utilization, time.Time{}, err
}
if math.Abs(1.0-newUsageRatio) <= tolerance || (usageRatio < 1.0 && newUsageRatio > 1.0) || (usageRatio > 1.0 && newUsageRatio < 1.0) {
// return the current replicas if the change would be too small,
// or if the new usage ratio would cause a change in scale direction
//.........这里部分代码省略.........
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:101,代码来源:replica_calculator.go
示例14:
err = wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
func() (bool, error) {
podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: label.String()})
if err != nil {
return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label)
}
ExpectNoError(err)
if len(podList.Items) < numPets {
framework.Logf("Found %d pets, waiting for %d", len(podList.Items), numPets)
return false, nil
}
if len(podList.Items) > numPets {
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
}
for _, p := range podList.Items {
isReady := v1.IsPodReady(&p)
if p.Status.Phase != v1.PodRunning || !isReady {
framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, v1.PodRunning, p.Status.Phase, isReady)
return false, nil
}
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
By("Finding each node in the nodetool status lines")
forEachPod("app", "cassandra", func(pod v1.Pod) {
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
if matched != true {
framework.Failf("Cassandra pod ip %s is not reporting Up and Normal 'UN' via nodetool status", pod.Status.PodIP)
开发者ID:gambol99,项目名称:kubernetes,代码行数:31,代码来源:examples.go
示例15:
v1.ObjectMeta{
Name: expectedPod.Name,
ResourceVersion: expectedPod.ResourceVersion,
},
))
Expect(err).NotTo(HaveOccurred())
By("Verifying the 2nd pod is removed only when it becomes running and ready")
pst.restoreProbe(ps, testProbe)
_, err = watch.Until(statefulsetTimeout, watcher, func(event watch.Event) (bool, error) {
pod := event.Object.(*v1.Pod)
if event.Type == watch.Deleted && pod.Name == expectedPodName {
return false, fmt.Errorf("Pod %v was deleted before enter running", pod.Name)
}
framework.Logf("Observed event %v for pod %v. Phase %v, Pod is ready %v",
event.Type, pod.Name, pod.Status.Phase, v1.IsPodReady(pod))
if pod.Name != expectedPodName {
return false, nil
}
if pod.Status.Phase == v1.PodRunning && v1.IsPodReady(pod) {
return true, nil
}
return false, nil
})
Expect(err).NotTo(HaveOccurred())
})
It("Scaling should happen in predictable order and halt if any stateful pod is unhealthy", func() {
psLabels := klabels.Set(labels)
By("Initializing watcher for selector " + psLabels.String())
watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.ListOptions{
开发者ID:abutcher,项目名称:kubernetes,代码行数:31,代码来源:petset.go
示例16:
if readyTime.Sub(startedTime) < initialDelay {
framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
}
restartCount := getRestartCount(p)
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
})
It("with readiness probe that fails should never be ready and never restart [Conformance]", func() {
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
Consistently(func() (bool, error) {
p, err := podClient.Get(p.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return v1.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready")
p, err := podClient.Get(p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
Expect(isReady).NotTo(BeTrue(), "pod should be not ready")
restartCount := getRestartCount(p)
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
})
It("should be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
开发者ID:kubernetes,项目名称:kubernetes,代码行数:31,代码来源:container_probe.go
示例17: syncService
//.........这里部分代码省略.........
if len(pod.Status.PodIP) == 0 {
glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
continue
}
if pod.DeletionTimestamp != nil {
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
continue
}
epp := v1.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
epa := v1.EndpointAddress{
IP: pod.Status.PodIP,
NodeName: &pod.Spec.NodeName,
TargetRef: &v1.ObjectReference{
Kind: "Pod",
Namespace: pod.ObjectMeta.Namespace,
Name: pod.ObjectMeta.Name,
UID: pod.ObjectMeta.UID,
ResourceVersion: pod.ObjectMeta.ResourceVersion,
}}
hostname := getHostname(pod)
if len(hostname) > 0 &&
getSubdomain(pod) == service.Name &&
service.Namespace == pod.Namespace {
hostRecord := endpoints.HostRecord{
HostName: hostname,
}
// TODO: stop populating podHostNames annotation in 1.4
podHostNames[string(pod.Status.PodIP)] = hostRecord
epa.Hostname = hostname
}
if tolerateUnreadyEndpoints || v1.IsPodReady(pod) {
subsets = append(subsets, v1.EndpointSubset{
Addresses: []v1.EndpointAddress{epa},
Ports: []v1.EndpointPort{epp},
})
readyEps++
} else {
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
subsets = append(subsets, v1.EndpointSubset{
NotReadyAddresses: []v1.EndpointAddress{epa},
Ports: []v1.EndpointPort{epp},
})
notReadyEps++
}
}
}
subsets = endpoints.RepackSubsets(subsets)
// See if there's actually an update here.
currentEndpoints, err := e.client.Core().Endpoints(service.Namespace).Get(service.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
currentEndpoints = &v1.Endpoints{
ObjectMeta: v1.ObjectMeta{
Name: service.Name,
Labels: service.Labels,
},
}
} else {
return err
}
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:66,代码来源:endpoints_controller.go
注:本文中的k8s/io/kubernetes/pkg/api/v1.IsPodReady函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论