本文整理汇总了Golang中k8s/io/kubernetes/pkg/api/v1.GetReference函数的典型用法代码示例。如果您正苦于以下问题:Golang GetReference函数的具体用法?Golang GetReference怎么用?Golang GetReference使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GetReference函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: refJSON
func refJSON(t *testing.T, o runtime.Object) string {
ref, err := apiv1.GetReference(o)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
codec := testapi.Default.Codec()
json := runtime.EncodeOrDie(codec, &apiv1.SerializedReference{Reference: *ref})
return string(json)
}
开发者ID:kubernetes,项目名称:contrib,代码行数:10,代码来源:drain_test.go
示例2: generateEvent
func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp metav1.Time, eventtype, reason, message string) {
ref, err := v1.GetReference(obj)
if err != nil {
return
}
event := f.makeEvent(ref, eventtype, reason, message)
event.Source = f.source
if f.Events != nil {
fmt.Println("write event")
f.Events = append(f.Events, event)
}
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:12,代码来源:test_utils.go
示例3: makeCreatedByRefJson
// makeCreatedByRefJson makes a json string with an object reference for use in "created-by" annotation value
func makeCreatedByRefJson(object runtime.Object) (string, error) {
createdByRef, err := v1.GetReference(object)
if err != nil {
return "", fmt.Errorf("unable to get controller reference: %v", err)
}
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
// We need to consistently handle this case of annotation versioning.
codec := api.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"})
createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{
Reference: *createdByRef,
})
if err != nil {
return "", fmt.Errorf("unable to serialize controller reference: %v", err)
}
return string(createdByRefJson), nil
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:20,代码来源:utils.go
示例4: generateEvent
func (recorder *recorderImpl) generateEvent(object runtime.Object, timestamp metav1.Time, eventtype, reason, message string) {
ref, err := v1.GetReference(object)
if err != nil {
glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
return
}
if !validateEventType(eventtype) {
glog.Errorf("Unsupported event type: '%v'", eventtype)
return
}
event := recorder.makeEvent(ref, eventtype, reason, message)
event.Source = recorder.source
go func() {
// NOTE: events should be a non-blocking operation
defer utilruntime.HandleCrash()
recorder.Action(watch.Added, event)
}()
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:21,代码来源:event.go
示例5: getPodsAnnotationSet
func getPodsAnnotationSet(template *v1.PodTemplateSpec, object runtime.Object) (labels.Set, error) {
desiredAnnotations := make(labels.Set)
for k, v := range template.Annotations {
desiredAnnotations[k] = v
}
createdByRef, err := v1.GetReference(object)
if err != nil {
return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err)
}
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
// We need to consistently handle this case of annotation versioning.
codec := api.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: "v1"})
createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{
Reference: *createdByRef,
})
if err != nil {
return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err)
}
desiredAnnotations[v1.CreatedByAnnotation] = string(createdByRefJson)
return desiredAnnotations, nil
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:24,代码来源:controller_utils.go
示例6: TestPersistentVolumeControllerStartup
// TestPersistentVolumeControllerStartup tests startup of the controller.
// The controller should not unbind any volumes when it starts.
func TestPersistentVolumeControllerStartup(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("controller-startup", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
objCount := getObjectCount()
const shortSyncPeriod = 2 * time.Second
syncPeriod := getSyncPeriod(shortSyncPeriod)
testClient, binder, watchPV, watchPVC := createClients(ns, t, s, shortSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// Create *bound* volumes and PVCs
pvs := make([]*v1.PersistentVolume, objCount)
pvcs := make([]*v1.PersistentVolumeClaim, objCount)
for i := 0; i < objCount; i++ {
pvName := "pv-startup-" + strconv.Itoa(i)
pvcName := "pvc-startup-" + strconv.Itoa(i)
pvc := createPVC(pvcName, ns.Name, "1G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
pvc.Annotations = map[string]string{"annBindCompleted": ""}
pvc.Spec.VolumeName = pvName
newPVC, err := testClient.PersistentVolumeClaims(ns.Name).Create(pvc)
if err != nil {
t.Fatalf("Cannot create claim %q: %v", pvc.Name, err)
}
// Save Bound status as a separate transaction
newPVC.Status.Phase = v1.ClaimBound
newPVC, err = testClient.PersistentVolumeClaims(ns.Name).UpdateStatus(newPVC)
if err != nil {
t.Fatalf("Cannot update claim status %q: %v", pvc.Name, err)
}
pvcs[i] = newPVC
// Drain watchPVC with all events generated by the PVC until it's bound
// We don't want to catch "PVC craated with Status.Phase == Pending"
// later in this test.
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
pv := createPV(pvName, "/tmp/foo"+strconv.Itoa(i), "1G",
[]v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
claimRef, err := v1.GetReference(newPVC)
if err != nil {
glog.V(3).Infof("unexpected error getting claim reference: %v", err)
return
}
pv.Spec.ClaimRef = claimRef
newPV, err := testClient.PersistentVolumes().Create(pv)
if err != nil {
t.Fatalf("Cannot create volume %q: %v", pv.Name, err)
}
// Save Bound status as a separate transaction
newPV.Status.Phase = v1.VolumeBound
newPV, err = testClient.PersistentVolumes().UpdateStatus(newPV)
if err != nil {
t.Fatalf("Cannot update volume status %q: %v", pv.Name, err)
}
pvs[i] = newPV
// Drain watchPV with all events generated by the PV until it's bound
// We don't want to catch "PV craated with Status.Phase == Pending"
// later in this test.
waitForAnyPersistentVolumePhase(watchPV, v1.VolumeBound)
}
// Start the controller when all PVs and PVCs are already saved in etcd
stopCh := make(chan struct{})
binder.Run(stopCh)
defer close(stopCh)
// wait for at least two sync periods for changes. No volume should be
// Released and no claim should be Lost during this time.
timer := time.NewTimer(2 * syncPeriod)
defer timer.Stop()
finished := false
for !finished {
select {
case volumeEvent := <-watchPV.ResultChan():
volume, ok := volumeEvent.Object.(*v1.PersistentVolume)
if !ok {
continue
}
if volume.Status.Phase != v1.VolumeBound {
t.Errorf("volume %s unexpectedly changed state to %s", volume.Name, volume.Status.Phase)
}
case claimEvent := <-watchPVC.ResultChan():
claim, ok := claimEvent.Object.(*v1.PersistentVolumeClaim)
if !ok {
continue
}
if claim.Status.Phase != v1.ClaimBound {
t.Errorf("claim %s unexpectedly changed state to %s", claim.Name, claim.Status.Phase)
}
case <-timer.C:
//.........这里部分代码省略.........
开发者ID:nak3,项目名称:kubernetes,代码行数:101,代码来源:persistent_volumes_test.go
示例7: TestPersistentVolumeBindRace
func TestPersistentVolumeBindRace(t *testing.T) {
// Test a race binding many claims to a PV that is pre-bound to a specific
// PVC. Only this specific PVC should get bound.
glog.V(2).Infof("TestPersistentVolumeBindRace started")
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("pv-bind-race", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
testClient, ctrl, watchPV, watchPVC := createClients(ns, t, s, defaultSyncPeriod)
defer watchPV.Stop()
defer watchPVC.Stop()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (PersistenceVolumes).
defer testClient.Core().PersistentVolumes().DeleteCollection(nil, v1.ListOptions{})
stopCh := make(chan struct{})
ctrl.Run(stopCh)
defer close(stopCh)
pv := createPV("fake-pv-race", "/tmp/foo", "10G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, v1.PersistentVolumeReclaimRetain)
pvc := createPVC("fake-pvc-race", ns.Name, "5G", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce})
counter := 0
maxClaims := 100
claims := []*v1.PersistentVolumeClaim{}
for counter <= maxClaims {
counter += 1
clone, _ := conversion.NewCloner().DeepCopy(pvc)
newPvc, _ := clone.(*v1.PersistentVolumeClaim)
newPvc.ObjectMeta = v1.ObjectMeta{Name: fmt.Sprintf("fake-pvc-race-%d", counter)}
claim, err := testClient.PersistentVolumeClaims(ns.Name).Create(newPvc)
if err != nil {
t.Fatalf("Error creating newPvc: %v", err)
}
claims = append(claims, claim)
}
glog.V(2).Infof("TestPersistentVolumeBindRace claims created")
// putting a bind manually on a pv should only match the claim it is bound to
rand.Seed(time.Now().Unix())
claim := claims[rand.Intn(maxClaims-1)]
claimRef, err := v1.GetReference(claim)
if err != nil {
t.Fatalf("Unexpected error getting claimRef: %v", err)
}
pv.Spec.ClaimRef = claimRef
pv.Spec.ClaimRef.UID = ""
pv, err = testClient.PersistentVolumes().Create(pv)
if err != nil {
t.Fatalf("Unexpected error creating pv: %v", err)
}
glog.V(2).Infof("TestPersistentVolumeBindRace pv created, pre-bound to %s", claim.Name)
waitForPersistentVolumePhase(testClient, pv.Name, watchPV, v1.VolumeBound)
glog.V(2).Infof("TestPersistentVolumeBindRace pv bound")
waitForAnyPersistentVolumeClaimPhase(watchPVC, v1.ClaimBound)
glog.V(2).Infof("TestPersistentVolumeBindRace pvc bound")
pv, err = testClient.PersistentVolumes().Get(pv.Name)
if err != nil {
t.Fatalf("Unexpected error getting pv: %v", err)
}
if pv.Spec.ClaimRef == nil {
t.Fatalf("Unexpected nil claimRef")
}
if pv.Spec.ClaimRef.Namespace != claimRef.Namespace || pv.Spec.ClaimRef.Name != claimRef.Name {
t.Fatalf("Bind mismatch! Expected %s/%s but got %s/%s", claimRef.Namespace, claimRef.Name, pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)
}
}
开发者ID:nak3,项目名称:kubernetes,代码行数:72,代码来源:persistent_volumes_test.go
示例8: TestFindingPreboundVolumes
func TestFindingPreboundVolumes(t *testing.T) {
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claim01",
Namespace: "myns",
SelfLink: testapi.Default.SelfLink("pvc", ""),
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi")}},
},
}
claimRef, err := v1.GetReference(claim)
if err != nil {
t.Errorf("error getting claimRef: %v", err)
}
pv1 := testVolume("pv1", "1Gi")
pv5 := testVolume("pv5", "5Gi")
pv8 := testVolume("pv8", "8Gi")
pvBadSize := testVolume("pvBadSize", "1Mi")
pvBadMode := testVolume("pvBadMode", "1Gi")
pvBadMode.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}
index := newPersistentVolumeOrderedIndex()
index.store.Add(pv1)
index.store.Add(pv5)
index.store.Add(pv8)
index.store.Add(pvBadSize)
index.store.Add(pvBadMode)
// expected exact match on size
volume, _ := index.findBestMatchForClaim(claim)
if volume.Name != pv1.Name {
t.Errorf("Expected %s but got volume %s instead", pv1.Name, volume.Name)
}
// pretend the exact match is pre-bound. should get the next size up.
pv1.Spec.ClaimRef = &v1.ObjectReference{Name: "foo", Namespace: "bar"}
volume, _ = index.findBestMatchForClaim(claim)
if volume.Name != pv5.Name {
t.Errorf("Expected %s but got volume %s instead", pv5.Name, volume.Name)
}
// pretend the exact match is available but the largest volume is pre-bound to the claim.
pv1.Spec.ClaimRef = nil
pv8.Spec.ClaimRef = claimRef
volume, _ = index.findBestMatchForClaim(claim)
if volume.Name != pv8.Name {
t.Errorf("Expected %s but got volume %s instead", pv8.Name, volume.Name)
}
// pretend the volume with too small a size is pre-bound to the claim. should get the exact match.
pv8.Spec.ClaimRef = nil
pvBadSize.Spec.ClaimRef = claimRef
volume, _ = index.findBestMatchForClaim(claim)
if volume.Name != pv1.Name {
t.Errorf("Expected %s but got volume %s instead", pv1.Name, volume.Name)
}
// pretend the volume without the right access mode is pre-bound to the claim. should get the exact match.
pvBadSize.Spec.ClaimRef = nil
pvBadMode.Spec.ClaimRef = claimRef
volume, _ = index.findBestMatchForClaim(claim)
if volume.Name != pv1.Name {
t.Errorf("Expected %s but got volume %s instead", pv1.Name, volume.Name)
}
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:68,代码来源:index_test.go
示例9: SyncPod
// SyncPod syncs the running pod into the desired pod by executing following steps:
//
// 1. Compute sandbox and container changes.
// 2. Kill pod sandbox if necessary.
// 3. Kill any containers that should not be running.
// 4. Create sandbox if necessary.
// 5. Create init containers.
// 6. Create normal containers.
func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
// Step 1: Compute sandbox and container changes.
podContainerChanges := m.computePodContainerChanges(pod, podStatus)
glog.V(3).Infof("computePodContainerChanges got %+v for pod %q", podContainerChanges, format.Pod(pod))
if podContainerChanges.CreateSandbox {
ref, err := v1.GetReference(pod)
if err != nil {
glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err)
}
if podContainerChanges.SandboxID != "" {
m.recorder.Eventf(ref, v1.EventTypeNormal, "SandboxChanged", "Pod sandbox changed, it will be killed and re-created.")
} else {
m.recorder.Eventf(ref, v1.EventTypeNormal, "SandboxReceived", "Pod sandbox received, it will be created.")
}
}
// Step 2: Kill the pod if the sandbox has changed.
if podContainerChanges.CreateSandbox || (len(podContainerChanges.ContainersToKeep) == 0 && len(podContainerChanges.ContainersToStart) == 0) {
if len(podContainerChanges.ContainersToKeep) == 0 && len(podContainerChanges.ContainersToStart) == 0 {
glog.V(4).Infof("Stopping PodSandbox for %q because all other containers are dead.", format.Pod(pod))
} else {
glog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod))
}
killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
result.AddPodSyncResult(killResult)
if killResult.Error() != nil {
glog.Errorf("killPodWithSyncResult failed: %v", killResult.Error())
return
}
} else {
// Step 3: kill any running containers in this pod which are not to keep.
for containerID, containerInfo := range podContainerChanges.ContainersToKill {
glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerInfo.name, containerID, format.Pod(pod))
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name)
result.AddSyncResult(killContainerResult)
if err := m.killContainer(pod, containerID, containerInfo.name, containerInfo.message, nil); err != nil {
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
glog.Errorf("killContainer %q(id=%q) for pod %q failed: %v", containerInfo.name, containerID, format.Pod(pod), err)
return
}
}
}
// Keep terminated init containers fairly aggressively controlled
m.pruneInitContainersBeforeStart(pod, podStatus, podContainerChanges.InitContainersToKeep)
// We pass the value of the podIP down to generatePodSandboxConfig and
// generateContainerConfig, which in turn passes it to various other
// functions, in order to facilitate functionality that requires this
// value (hosts file and downward API) and avoid races determining
// the pod IP in cases where a container requires restart but the
// podIP isn't in the status manager yet.
//
// We default to the IP in the passed-in pod status, and overwrite it if the
// sandbox needs to be (re)started.
podIP := ""
if podStatus != nil {
podIP = podStatus.IP
}
// Step 4: Create a sandbox for the pod if necessary.
podSandboxID := podContainerChanges.SandboxID
if podContainerChanges.CreateSandbox && len(podContainerChanges.ContainersToStart) > 0 {
var msg string
var err error
glog.V(4).Infof("Creating sandbox for pod %q", format.Pod(pod))
createSandboxResult := kubecontainer.NewSyncResult(kubecontainer.CreatePodSandbox, format.Pod(pod))
result.AddSyncResult(createSandboxResult)
podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt)
if err != nil {
createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg)
glog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err)
return
}
podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID)
if err != nil {
glog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod))
result.Fail(err)
return
}
// Overwrite the podIP passed in the pod status, since we just started the pod sandbox.
podIP = m.determinePodSandboxIP(pod.Namespace, pod.Name, podSandboxStatus)
glog.V(4).Infof("Determined the ip %q for pod %q after sandbox changed", podIP, format.Pod(pod))
}
// Get podSandboxConfig for containers to start.
configPodSandboxResult := kubecontainer.NewSyncResult(kubecontainer.ConfigPodSandbox, podSandboxID)
//.........这里部分代码省略.........
开发者ID:kubernetes,项目名称:kubernetes,代码行数:101,代码来源:kuberuntime_manager.go
示例10: getRef
func getRef(object runtime.Object) (*v1.ObjectReference, error) {
return v1.GetReference(object)
}
开发者ID:nak3,项目名称:kubernetes,代码行数:3,代码来源:cronjob_controller.go
注:本文中的k8s/io/kubernetes/pkg/api/v1.GetReference函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论