本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/cache.SplitMetaNamespaceKey函数的典型用法代码示例。如果您正苦于以下问题:Golang SplitMetaNamespaceKey函数的具体用法?Golang SplitMetaNamespaceKey怎么用?Golang SplitMetaNamespaceKey使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了SplitMetaNamespaceKey函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: TestWatchPods
func TestWatchPods(t *testing.T) {
testJob := newJob(2, 2)
clientset := fake.NewSimpleClientset(testJob)
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
// Put one job and one pod into the store
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(testJob)
received := make(chan struct{})
// The pod update sent through the fakeWatcher should figure out the managing job and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
t.Errorf("Error getting namespace/name from key %v: %v", key, err)
}
job, err := manager.jobLister.Jobs(ns).Get(name)
if err != nil {
t.Errorf("Expected to find job under key %v: %v", key, err)
}
if !api.Semantic.DeepDerivative(job, testJob) {
t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
close(received)
return nil
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right job.
stopCh := make(chan struct{})
defer close(stopCh)
go sharedInformerFactory.Pods().Informer().Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(1, api.PodRunning, testJob)
testPod := pods[0]
testPod.Status.Phase = api.PodFailed
fakeWatch.Add(&testPod)
t.Log("Waiting for pod to reach syncHandler")
<-received
}
开发者ID:eljefedelrodeodeljefe,项目名称:kubernetes,代码行数:46,代码来源:jobcontroller_test.go
示例2: TestWatchJobs
func TestWatchJobs(t *testing.T) {
clientset := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil))
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
var testJob batch.Job
received := make(chan struct{})
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler.
manager.syncHandler = func(key string) error {
defer close(received)
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
t.Errorf("Error getting namespace/name from key %v: %v", key, err)
}
job, err := manager.jobLister.Jobs(ns).Get(name)
if err != nil || job == nil {
t.Errorf("Expected to find job under key %v: %v", key, err)
return nil
}
if !api.Semantic.DeepDerivative(*job, testJob) {
t.Errorf("Expected %#v, but got %#v", testJob, *job)
}
return nil
}
// Start only the job watcher and the workqueue, send a watch event,
// and make sure it hits the sync method.
stopCh := make(chan struct{})
defer close(stopCh)
sharedInformerFactory.Start(stopCh)
go manager.Run(1, stopCh)
// We're sending new job to see if it reaches syncHandler.
testJob.Namespace = "bar"
testJob.Name = "foo"
fakeWatch.Add(&testJob)
t.Log("Waiting for job to reach syncHandler")
<-received
}
开发者ID:eljefedelrodeodeljefe,项目名称:kubernetes,代码行数:43,代码来源:jobcontroller_test.go
示例3: HandleTimed
// HandleTimed is invoked when a key is ready to be processed.
func (b *scheduled) HandleTimed(key, value interface{}) {
if !b.enabled {
b.scheduler.Remove(key, value)
return
}
glog.V(5).Infof("DEBUG: checking %s", key)
if b.rateLimiter != nil && !b.rateLimiter.TryAccept() {
glog.V(5).Infof("DEBUG: check of %s exceeded rate limit, will retry later", key)
return
}
namespace, name, _ := cache.SplitMetaNamespaceKey(key.(string))
if err := b.controller.NextTimedByName(namespace, name); err != nil {
// the stream cannot be imported
if err == ErrNotImportable {
// value must match to be removed, so we avoid races against creation by ensuring that we only
// remove the stream if the uid and resource version in the scheduler are exactly the same.
b.scheduler.Remove(key, value)
return
}
utilruntime.HandleError(err)
return
}
}
开发者ID:asiainfoLDP,项目名称:datafactory,代码行数:24,代码来源:factory.go
示例4: syncService
func (e *EndpointController) syncService(key string) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
}()
if !e.podStoreSynced() {
// Sleep so we give the pod reflector goroutine a chance to run.
time.Sleep(PodStoreSyncedPollPeriod)
glog.Infof("Waiting for pods controller to sync, requeuing service %v", key)
e.queue.Add(key)
return
}
obj, exists, err := e.serviceStore.Store.GetByKey(key)
if err != nil || !exists {
// Delete the corresponding endpoint, as the service has been deleted.
// TODO: Please note that this will delete an endpoint when a
// service is deleted. However, if we're down at the time when
// the service is deleted, we will miss that deletion, so this
// doesn't completely solve the problem. See #6877.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.Errorf("Need to delete endpoint with key %q, but couldn't understand the key: %v", key, err)
// Don't retry, as the key isn't going to magically become understandable.
return
}
err = e.client.Endpoints(namespace).Delete(name, nil)
if err != nil && !errors.IsNotFound(err) {
glog.Errorf("Error deleting endpoint %q: %v", key, err)
e.queue.Add(key) // Retry
}
return
}
service := obj.(*api.Service)
if service.Spec.Selector == nil {
// services without a selector receive no endpoints from this controller;
// these services will receive the endpoints that are created out-of-band via the REST API.
return
}
glog.V(5).Infof("About to update endpoints for service %q", key)
pods, err := e.podStore.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelector())
if err != nil {
// Since we're getting stuff from a local cache, it is
// basically impossible to get this error.
glog.Errorf("Error syncing service %q: %v", key, err)
e.queue.Add(key) // Retry
return
}
subsets := []api.EndpointSubset{}
podHostNames := map[string]endpoints.HostRecord{}
var tolerateUnreadyEndpoints bool
if v, ok := service.Annotations[TolerateUnreadyEndpointsAnnotation]; ok {
b, err := strconv.ParseBool(v)
if err == nil {
tolerateUnreadyEndpoints = b
} else {
glog.Errorf("Failed to parse annotation %v: %v", TolerateUnreadyEndpointsAnnotation, err)
}
}
for i := range pods.Items {
pod := &pods.Items[i]
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
portName := servicePort.Name
portProto := servicePort.Protocol
portNum, err := podutil.FindPort(pod, servicePort)
if err != nil {
glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
continue
}
if len(pod.Status.PodIP) == 0 {
glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
continue
}
if pod.DeletionTimestamp != nil {
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
continue
}
epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
epa := api.EndpointAddress{
IP: pod.Status.PodIP,
TargetRef: &api.ObjectReference{
Kind: "Pod",
Namespace: pod.ObjectMeta.Namespace,
Name: pod.ObjectMeta.Name,
UID: pod.ObjectMeta.UID,
ResourceVersion: pod.ObjectMeta.ResourceVersion,
}}
hostname := getHostname(pod)
if len(hostname) > 0 &&
//.........这里部分代码省略.........
开发者ID:Cloven,项目名称:minikube,代码行数:101,代码来源:endpoints_controller.go
示例5: syncService
func (e *EndpointController) syncService(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := e.serviceStore.Indexer.GetByKey(key)
if err != nil || !exists {
// Delete the corresponding endpoint, as the service has been deleted.
// TODO: Please note that this will delete an endpoint when a
// service is deleted. However, if we're down at the time when
// the service is deleted, we will miss that deletion, so this
// doesn't completely solve the problem. See #6877.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Need to delete endpoint with key %q, but couldn't understand the key: %v", key, err))
// Don't retry, as the key isn't going to magically become understandable.
return nil
}
err = e.client.Core().Endpoints(namespace).Delete(name, nil)
if err != nil && !errors.IsNotFound(err) {
return err
}
return nil
}
service := obj.(*api.Service)
if service.Spec.Selector == nil {
// services without a selector receive no endpoints from this controller;
// these services will receive the endpoints that are created out-of-band via the REST API.
return nil
}
glog.V(5).Infof("About to update endpoints for service %q", key)
pods, err := e.podStore.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelectorPreValidated())
if err != nil {
// Since we're getting stuff from a local cache, it is
// basically impossible to get this error.
return err
}
subsets := []api.EndpointSubset{}
podHostNames := map[string]endpoints.HostRecord{}
var tolerateUnreadyEndpoints bool
if v, ok := service.Annotations[TolerateUnreadyEndpointsAnnotation]; ok {
b, err := strconv.ParseBool(v)
if err == nil {
tolerateUnreadyEndpoints = b
} else {
utilruntime.HandleError(fmt.Errorf("Failed to parse annotation %v: %v", TolerateUnreadyEndpointsAnnotation, err))
}
}
readyEps := 0
notReadyEps := 0
for i := range pods {
// TODO: Do we need to copy here?
pod := &(*pods[i])
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
portName := servicePort.Name
portProto := servicePort.Protocol
portNum, err := podutil.FindPort(pod, servicePort)
if err != nil {
glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
continue
}
if len(pod.Status.PodIP) == 0 {
glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
continue
}
if pod.DeletionTimestamp != nil {
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
continue
}
epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
epa := api.EndpointAddress{
IP: pod.Status.PodIP,
NodeName: &pod.Spec.NodeName,
TargetRef: &api.ObjectReference{
Kind: "Pod",
Namespace: pod.ObjectMeta.Namespace,
Name: pod.ObjectMeta.Name,
UID: pod.ObjectMeta.UID,
ResourceVersion: pod.ObjectMeta.ResourceVersion,
}}
hostname := getHostname(pod)
if len(hostname) > 0 &&
getSubdomain(pod) == service.Name &&
service.Namespace == pod.Namespace {
hostRecord := endpoints.HostRecord{
HostName: hostname,
}
// TODO: stop populating podHostNames annotation in 1.4
podHostNames[string(pod.Status.PodIP)] = hostRecord
//.........这里部分代码省略.........
开发者ID:pst,项目名称:kubernetes,代码行数:101,代码来源:endpoints_controller.go
示例6: syncService
func (e *EndpointController) syncService(key string) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := e.serviceStore.Store.GetByKey(key)
if err != nil || !exists {
// Delete the corresponding endpoint, as the service has been deleted.
// TODO: Please note that this will delete an endpoint when a
// service is deleted. However, if we're down at the time when
// the service is deleted, we will miss that deletion, so this
// doesn't completely solve the problem. See #6877.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.Errorf("Need to delete endpoint with key %q, but couldn't understand the key: %v", key, err)
// Don't retry, as the key isn't going to magically become understandable.
return
}
err = e.client.Endpoints(namespace).Delete(name)
if err != nil && !errors.IsNotFound(err) {
glog.Errorf("Error deleting endpoint %q: %v", key, err)
e.queue.Add(key) // Retry
}
return
}
service := obj.(*api.Service)
if service.Spec.Selector == nil {
// services without a selector receive no endpoints from this controller;
// these services will receive the endpoints that are created out-of-band via the REST API.
return
}
glog.V(5).Infof("About to update endpoints for service %q", key)
pods, err := e.podStore.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelector())
if err != nil {
// Since we're getting stuff from a local cache, it is
// basically impossible to get this error.
glog.Errorf("Error syncing service %q: %v", key, err)
e.queue.Add(key) // Retry
return
}
subsets := []api.EndpointSubset{}
for i := range pods.Items {
pod := &pods.Items[i]
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
portName := servicePort.Name
portProto := servicePort.Protocol
portNum, err := findPort(pod, servicePort)
if err != nil {
glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
continue
}
if len(pod.Status.PodIP) == 0 {
glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
continue
}
if pod.DeletionTimestamp != nil {
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
continue
}
epp := api.EndpointPort{Name: portName, Port: portNum, Protocol: portProto}
epa := api.EndpointAddress{IP: pod.Status.PodIP, TargetRef: &api.ObjectReference{
Kind: "Pod",
Namespace: pod.ObjectMeta.Namespace,
Name: pod.ObjectMeta.Name,
UID: pod.ObjectMeta.UID,
ResourceVersion: pod.ObjectMeta.ResourceVersion,
}}
if api.IsPodReady(pod) {
subsets = append(subsets, api.EndpointSubset{
Addresses: []api.EndpointAddress{epa},
Ports: []api.EndpointPort{epp},
})
} else {
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
subsets = append(subsets, api.EndpointSubset{
NotReadyAddresses: []api.EndpointAddress{epa},
Ports: []api.EndpointPort{epp},
})
}
}
}
subsets = endpoints.RepackSubsets(subsets)
// See if there's actually an update here.
currentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name)
if err != nil {
if errors.IsNotFound(err) {
currentEndpoints = &api.Endpoints{
ObjectMeta: api.ObjectMeta{
Name: service.Name,
Labels: service.Labels,
},
}
//.........这里部分代码省略.........
开发者ID:binblee,项目名称:kubernetes,代码行数:101,代码来源:endpoints_controller.go
示例7: calculateArguments
// calculateArguments determines the arguments for a give delta and updates the argument store, or returns
// an error. If the object can be transformed into a full JSON object, that is also returned.
func (o *ObserveOptions) calculateArguments(delta cache.Delta) (runtime.Object, []string, []byte, error) {
var arguments []string
var object runtime.Object
var key string
var output []byte
switch t := delta.Object.(type) {
case cache.DeletedFinalStateUnknown:
key = t.Key
if obj, ok := t.Obj.(runtime.Object); ok {
object = obj
args, data, err := o.printer.Print(obj)
if err != nil {
return nil, nil, nil, fmt.Errorf("unable to write arguments: %v", err)
}
arguments = args
output = data
} else {
value, _, err := o.argumentStore.GetByKey(key)
if err != nil {
return nil, nil, nil, err
}
if value != nil {
args, ok := value.(objectArguments)
if !ok {
return nil, nil, nil, fmt.Errorf("unexpected cache value %T", value)
}
arguments = args.arguments
output = args.output
}
}
o.argumentStore.Remove(key)
case runtime.Object:
object = t
args, data, err := o.printer.Print(t)
if err != nil {
return nil, nil, nil, fmt.Errorf("unable to write arguments: %v", err)
}
arguments = args
output = data
key, _ = cache.MetaNamespaceKeyFunc(t)
if delta.Type == cache.Deleted {
o.argumentStore.Remove(key)
} else {
saved := objectArguments{key: key, arguments: arguments}
// only cache the object data if the commands will be using it.
if len(o.objectEnvVar) > 0 {
saved.output = output
}
o.argumentStore.Put(key, saved)
}
case objectArguments:
key = t.key
arguments = t.arguments
output = t.output
default:
return nil, nil, nil, fmt.Errorf("unrecognized object %T from cache store", delta.Object)
}
if object == nil {
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return nil, nil, nil, err
}
unstructured := &runtime.Unstructured{}
unstructured.SetNamespace(namespace)
unstructured.SetName(name)
object = unstructured
}
return object, arguments, output, nil
}
开发者ID:php-coder,项目名称:origin,代码行数:82,代码来源:observe.go
示例8: syncService
func (e *NetworkController) syncService(key string) {
glog.V(4).Infof("NetworkController: processing service %v", key)
obj, exists, err := e.serviceStore.Store.GetByKey(key)
if err != nil || !exists {
// Delete the corresponding loadbalancer, as the service has been deleted.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.Errorf("NetworkController: couldn't understand the key %s: %v", key, err)
return
}
loadBalancerFullName := networkprovider.BuildLoadBalancerName(name, namespace)
deleteError := e.netProvider.LoadBalancers().DeleteLoadBalancer(loadBalancerFullName)
if deleteError != nil {
glog.Errorf("NetworkController: delete loadbalancer %s failed: %v", loadBalancerFullName, err)
}
return
}
service := obj.(*api.Service)
if service.Spec.Selector == nil {
// services without a selector receive no endpoints from this controller;
// these services will receive the endpoints that are created out-of-band via the REST API.
return
}
// check if loadbalancer already created
loadBalanerShouldExist := !(len(service.Spec.ExternalIPs) == 0)
var status *api.LoadBalancerStatus
loadBalancerFullName := networkprovider.BuildLoadBalancerName(service.Name, service.Namespace)
loadBalancer, err := e.netProvider.LoadBalancers().GetLoadBalancer(loadBalancerFullName)
if err != nil && err.Error() == networkprovider.ErrNotFound.Error() {
if loadBalanerShouldExist {
// create new loadbalancer
status, _ = e.createLoadBalancer(service)
}
} else if err != nil {
glog.Errorf("NetworkController: couldn't get loadbalancer from networkprovider: %v", err)
return
} else {
if loadBalanerShouldExist {
// update loadbalancer
status, _ = e.updateLoadBalancer(service, loadBalancer)
} else {
// delete loadbalancer
deleteError := e.netProvider.LoadBalancers().DeleteLoadBalancer(loadBalancerFullName)
if deleteError != nil {
glog.Errorf("NetworkController: delete loadbalancer %s failed: %v", loadBalancerFullName, err)
}
}
}
if status != nil {
service.Status.LoadBalancer = *status
err := e.updateService(service)
if err != nil {
e.eventRecorder.Event(service, "created loadbalancer", "created loadbalancer")
}
}
}
开发者ID:kuenzaa,项目名称:hypernetes,代码行数:63,代码来源:network_controller.go
示例9: syncJob
// syncJob will sync the job with the given key if it has had its expectations fulfilled, meaning
// it did not expect to see any more of its pods created or deleted. This function is not meant to be invoked
// concurrently with the same key.
func (jm *JobController) syncJob(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Now().Sub(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
if len(ns) == 0 || len(name) == 0 {
return fmt.Errorf("invalid job key %q: either namespace or name is missing", key)
}
sharedJob, err := jm.jobLister.Jobs(ns).Get(name)
if err != nil {
if errors.IsNotFound(err) {
glog.V(4).Infof("Job has been deleted: %v", key)
jm.expectations.DeleteExpectations(key)
return nil
}
return err
}
job := *sharedJob
// Check the expectations of the job before counting active pods, otherwise a new pod can sneak in
// and update the expectations after we've retrieved active pods from the store. If a new pod enters
// the store after we've checked the expectation, the job sync is just deferred till the next relist.
jobNeedsSync := jm.expectations.SatisfiedExpectations(key)
selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector)
pods, err := jm.podStore.Pods(job.Namespace).List(selector)
if err != nil {
return err
}
activePods := controller.FilterActivePods(pods)
active := int32(len(activePods))
succeeded, failed := getStatus(pods)
conditions := len(job.Status.Conditions)
if job.Status.StartTime == nil {
now := metav1.Now()
job.Status.StartTime = &now
}
// if job was finished previously, we don't want to redo the termination
if IsJobFinished(&job) {
return nil
}
if pastActiveDeadline(&job) {
// TODO: below code should be replaced with pod termination resulting in
// pod failures, rather than killing pods. Unfortunately none such solution
// exists ATM. There's an open discussion in the topic in
// https://github.com/kubernetes/kubernetes/issues/14602 which might give
// some sort of solution to above problem.
// kill remaining active pods
wait := sync.WaitGroup{}
wait.Add(int(active))
for i := int32(0); i < active; i++ {
go func(ix int32) {
defer wait.Done()
if err := jm.podControl.DeletePod(job.Namespace, activePods[ix].Name, &job); err != nil {
defer utilruntime.HandleError(err)
}
}(i)
}
wait.Wait()
// update status values accordingly
failed += active
active = 0
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
jm.recorder.Event(&job, v1.EventTypeNormal, "DeadlineExceeded", "Job was active longer than specified deadline")
} else {
if jobNeedsSync && job.DeletionTimestamp == nil {
active = jm.manageJob(activePods, succeeded, &job)
}
completions := succeeded
complete := false
if job.Spec.Completions == nil {
// This type of job is complete when any pod exits with success.
// Each pod is capable of
// determining whether or not the entire Job is done. Subsequent pods are
// not expected to fail, but if they do, the failure is ignored. Once any
// pod succeeds, the controller waits for remaining pods to finish, and
// then the job is complete.
if succeeded > 0 && active == 0 {
complete = true
}
} else {
// Job specifies a number of completions. This type of job signals
// success by having that number of successes. Since we do not
// start more pods than there are remaining completions, there should
// not be any remaining active pods once this count is reached.
if completions >= *job.Spec.Completions {
complete = true
if active > 0 {
jm.recorder.Event(&job, v1.EventTypeWarning, "TooManyActivePods", "Too many active pods running after completion count reached")
}
if completions > *job.Spec.Completions {
jm.recorder.Event(&job, v1.EventTypeWarning, "TooManySucceededPods", "Too many succeeded pods running after completion count reached")
//.........这里部分代码省略.........
开发者ID:kubernetes,项目名称:kubernetes,代码行数:101,代码来源:jobcontroller.go
示例10: syncService
// HACK(sttts): add annotations to the endpoint about the respective container ports
func (e *endpointController) syncService(key string) error {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing service %q endpoints. (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := e.serviceStore.Store.GetByKey(key)
if err != nil || !exists {
// Delete the corresponding endpoint, as the service has been deleted.
// TODO: Please note that this will delete an endpoint when a
// service is deleted. However, if we're down at the time when
// the service is deleted, we will miss that deletion, so this
// doesn't completely solve the problem. See #6877.
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Need to delete endpoint with key %q, but couldn't understand the key: %v", key, err))
// Don't retry, as the key isn't going to magically become understandable.
return nil
}
err = e.client.Endpoints(namespace).Delete(name, nil)
if err != nil && !errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("Error deleting endpoint %q: %v", key, err))
return err
}
return nil
}
service := obj.(*api.Service)
if service.Spec.Selector == nil {
// services without a selector receive no endpoints from this controller;
// these services will receive the endpoints that are created out-of-band via the REST API.
return nil
}
glog.V(5).Infof("About to update endpoints for service %q", key)
pods, err := e.podStore.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelector())
if err != nil {
// Since we're getting stuff from a local cache, it is
// basically impossible to get this error.
utilruntime.HandleError(fmt.Errorf("Error syncing service %q: %v", key, err))
return err
}
subsets := []api.EndpointSubset{}
containerPortAnnotations := map[string]string{} // by <HostIP>:<Port>
for i := range pods {
// TODO: Do we need to copy here?
pod := &(*pods[i])
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
portName := servicePort.Name
portProto := servicePort.Protocol
portNum, containerPort, err := findPort(pod, servicePort)
if err != nil {
glog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
continue
}
// HACK(jdef): use HostIP instead of pod.CurrentState.PodIP for generic mesos compat
if len(pod.Status.HostIP) == 0 {
glog.V(4).Infof("Failed to find a host IP for pod %s/%s", pod.Namespace, pod.Name)
continue
}
if pod.DeletionTimestamp != nil {
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
continue
}
if !api.IsPodReady(pod) {
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
continue
}
// HACK(jdef): use HostIP instead of pod.CurrentState.PodIP for generic mesos compat
epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
epa := api.EndpointAddress{IP: pod.Status.HostIP, TargetRef: &api.ObjectReference{
Kind: "Pod",
Namespace: pod.ObjectMeta.Namespace,
Name: pod.ObjectMeta.Name,
UID: pod.ObjectMeta.UID,
ResourceVersion: pod.ObjectMeta.ResourceVersion,
}}
subsets = append(subsets, api.EndpointSubset{Addresses: []api.EndpointAddress{epa}, Ports: []api.EndpointPort{epp}})
containerPortAnnotations[fmt.Sprintf(meta.ContainerPortKeyFormat, portProto, pod.Status.HostIP, portNum)] = strconv.Itoa(containerPort)
}
}
subsets = endpoints.RepackSubsets(subsets)
// See if there's actually an update here.
currentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name)
if err != nil {
if errors.IsNotFound(err) {
currentEndpoints = &api.Endpoints{
ObjectMeta: api.ObjectMeta{
Name: service.Name,
Labels: service.Labels,
},
}
} else {
//.........这里部分代码省略.........
开发者ID:PeterLamar,项目名称:kubernetes,代码行数:101,代码来源:endpoints_controller.go
注:本文中的k8s/io/kubernetes/pkg/client/cache.SplitMetaNamespaceKey函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论