本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/release_1_5/fake.NewSimpleClientset函数的典型用法代码示例。如果您正苦于以下问题:Golang NewSimpleClientset函数的具体用法?Golang NewSimpleClientset怎么用?Golang NewSimpleClientset使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewSimpleClientset函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: TestPlugin
func TestPlugin(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid")
testVolumeName = "test_volume_name"
testNamespace = "test_configmap_namespace"
testName = "test_configmap_name"
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
configMap = configMap(testNamespace, testName)
client = fake.NewSimpleClientset(&configMap)
pluginMgr = volume.VolumePluginMgr{}
tempDir, host = newTestHost(t, client)
)
defer os.RemoveAll(tempDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
if err != nil {
t.Errorf("Failed to GetVolumeName: %v", err)
}
if vName != "test_volume_name/test_configmap_name" {
t.Errorf("Got unexpect VolumeName %v", vName)
}
volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath)
}
fsGroup := int64(1001)
err = mounter.SetUp(&fsGroup)
if err != nil {
t.Errorf("Failed to setup volume: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
doTestConfigMapDataInVolume(volumePath, configMap, t)
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:60,代码来源:configmap_test.go
示例2: TestDoNotDeleteMirrorPods
func TestDoNotDeleteMirrorPods(t *testing.T) {
staticPod := getTestPod()
staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
mirrorPod := getTestPod()
mirrorPod.UID = "mirror-12345678"
mirrorPod.Annotations = map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
}
// Set the deletion timestamp.
mirrorPod.DeletionTimestamp = new(metav1.Time)
client := fake.NewSimpleClientset(mirrorPod)
m := newTestManager(client)
m.podManager.AddPod(staticPod)
m.podManager.AddPod(mirrorPod)
// Verify setup.
assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)
status := getRandomPodStatus()
now := metav1.Now()
status.StartTime = &now
m.SetPodStatus(staticPod, status)
m.testSyncBatch()
// Expect not to see an delete action.
verifyActions(t, m.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
})
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:32,代码来源:status_manager_test.go
示例3: TestGetNodeAddresses
// TestGetNodeAddresses verifies that proper results are returned
// when requesting node addresses.
func TestGetNodeAddresses(t *testing.T) {
assert := assert.New(t)
fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2"}, apiv1.NodeResources{})).Core().Nodes()
addressProvider := nodeAddressProvider{fakeNodeClient}
// Fail case (no addresses associated with nodes)
nodes, _ := fakeNodeClient.List(apiv1.ListOptions{})
addrs, err := addressProvider.externalAddresses()
assert.Error(err, "addresses should have caused an error as there are no addresses.")
assert.Equal([]string(nil), addrs)
// Pass case with External type IP
nodes, _ = fakeNodeClient.List(apiv1.ListOptions{})
for index := range nodes.Items {
nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}}
fakeNodeClient.Update(&nodes.Items[index])
}
addrs, err = addressProvider.externalAddresses()
assert.NoError(err, "addresses should not have returned an error.")
assert.Equal([]string{"127.0.0.1", "127.0.0.1"}, addrs)
// Pass case with LegacyHost type IP
nodes, _ = fakeNodeClient.List(apiv1.ListOptions{})
for index := range nodes.Items {
nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeLegacyHostIP, Address: "127.0.0.2"}}
fakeNodeClient.Update(&nodes.Items[index])
}
addrs, err = addressProvider.externalAddresses()
assert.NoError(err, "addresses failback should not have returned an error.")
assert.Equal([]string{"127.0.0.2", "127.0.0.2"}, addrs)
}
开发者ID:paralin,项目名称:kubernetes,代码行数:35,代码来源:master_test.go
示例4: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
ep := &v1.Endpoints{
ObjectMeta: v1.ObjectMeta{
Namespace: "nsA",
Name: "ep",
},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}},
}},
}
client := fake.NewSimpleClientset(pv, claim, ep)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
开发者ID:nak3,项目名称:kubernetes,代码行数:60,代码来源:glusterfs_test.go
示例5: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("iscsi_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: "127.0.0.1:3260",
IQN: "iqn.2014-12.server:storage.target01",
FSType: "ext4",
Lun: 0,
},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(iscsiPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:54,代码来源:iscsi_test.go
示例6: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
lun := int32(0)
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
TargetWWNs: []string{"some_wwn"},
FSType: "ext4",
Lun: &lun,
},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(fcPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:54,代码来源:fc_test.go
示例7: TestFederationQueryWithoutCache
func TestFederationQueryWithoutCache(t *testing.T) {
kd := newKubeDNS()
kd.config.Federations = map[string]string{
"myfederation": "example.com",
"secondfederation": "second.example.com",
}
kd.kubeClient = fake.NewSimpleClientset(newNodes())
testValidFederationQueries(t, kd)
testInvalidFederationQueries(t, kd)
}
开发者ID:fejta,项目名称:kubernetes,代码行数:11,代码来源:dns_test.go
示例8: TestSyncBatch
func TestSyncBatch(t *testing.T) {
syncer := newTestManager(&fake.Clientset{})
testPod := getTestPod()
syncer.kubeClient = fake.NewSimpleClientset(testPod)
syncer.SetPodStatus(testPod, getRandomPodStatus())
syncer.testSyncBatch()
verifyActions(t, syncer.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
},
)
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:12,代码来源:status_manager_test.go
示例9: TestPluginReboot
// Test the case where the plugin's ready file exists, but the volume dir is not a
// mountpoint, which is the state the system will be in after reboot. The dir
// should be mounter and the secret data written to it.
func TestPluginReboot(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid3")
testVolumeName = "test_volume_name"
testNamespace = "test_secret_namespace"
testName = "test_secret_name"
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
secret = secret(testNamespace, testName)
client = fake.NewSimpleClientset(&secret)
pluginMgr = volume.VolumePluginMgr{}
rootDir, host = newTestHost(t, client)
)
defer os.RemoveAll(rootDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
plugin, err := pluginMgr.FindPluginByName(secretPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~secret/test_volume_name", rootDir)
util.SetReady(podMetadataDir)
volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~secret/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath)
}
err = mounter.SetUp(nil)
if err != nil {
t.Errorf("Failed to setup volume: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
doTestSecretDataInVolume(volumePath, secret, t)
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:55,代码来源:secret_test.go
示例10: TestUpdateNodeStatusError
func TestUpdateNodeStatusError(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
// No matching node for the kubelet
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
if err := kubelet.updateNodeStatus(); err == nil {
t.Errorf("unexpected non error: %v", err)
}
if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry {
t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions())
}
}
开发者ID:paralin,项目名称:kubernetes,代码行数:13,代码来源:kubelet_node_status_test.go
示例11: TestServiceEvaluatorMatchesResources
func TestServiceEvaluatorMatchesResources(t *testing.T) {
kubeClient := fake.NewSimpleClientset()
evaluator := NewServiceEvaluator(kubeClient)
expected := quota.ToSet([]api.ResourceName{
api.ResourceServices,
api.ResourceServicesNodePorts,
api.ResourceServicesLoadBalancers,
})
actual := quota.ToSet(evaluator.MatchesResources())
if !expected.Equal(actual) {
t.Errorf("expected: %v, actual: %v", expected, actual)
}
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:13,代码来源:services_test.go
示例12: TestReconcilePodStatus
func TestReconcilePodStatus(t *testing.T) {
testPod := getTestPod()
client := fake.NewSimpleClientset(testPod)
syncer := newTestManager(client)
syncer.SetPodStatus(testPod, getRandomPodStatus())
// Call syncBatch directly to test reconcile
syncer.syncBatch() // The apiStatusVersions should be set now
podStatus, ok := syncer.GetPodStatus(testPod.UID)
if !ok {
t.Fatalf("Should find pod status for pod: %#v", testPod)
}
testPod.Status = podStatus
// If the pod status is the same, a reconciliation is not needed,
// syncBatch should do nothing
syncer.podManager.UpdatePod(testPod)
if syncer.needsReconcile(testPod.UID, podStatus) {
t.Errorf("Pod status is the same, a reconciliation is not needed")
}
client.ClearActions()
syncer.syncBatch()
verifyActions(t, client, []core.Action{})
// If the pod status is the same, only the timestamp is in Rfc3339 format (lower precision without nanosecond),
// a reconciliation is not needed, syncBatch should do nothing.
// The StartTime should have been set in SetPodStatus().
// TODO(random-liu): Remove this later when api becomes consistent for timestamp.
normalizedStartTime := testPod.Status.StartTime.Rfc3339Copy()
testPod.Status.StartTime = &normalizedStartTime
syncer.podManager.UpdatePod(testPod)
if syncer.needsReconcile(testPod.UID, podStatus) {
t.Errorf("Pod status only differs for timestamp format, a reconciliation is not needed")
}
client.ClearActions()
syncer.syncBatch()
verifyActions(t, client, []core.Action{})
// If the pod status is different, a reconciliation is needed, syncBatch should trigger an update
testPod.Status = getRandomPodStatus()
syncer.podManager.UpdatePod(testPod)
if !syncer.needsReconcile(testPod.UID, podStatus) {
t.Errorf("Pod status is different, a reconciliation is needed")
}
client.ClearActions()
syncer.syncBatch()
verifyActions(t, client, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
})
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:51,代码来源:status_manager_test.go
示例13: TestSyncResourceQuotaNoChange
func TestSyncResourceQuotaNoChange(t *testing.T) {
resourceQuota := v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
Used: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
},
},
}
kubeClient := fake.NewSimpleClientset(&v1.PodList{}, &resourceQuota)
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
KubeClient: kubeClient,
ResyncPeriod: controller.NoResyncPeriodFunc,
Registry: install.NewRegistry(kubeClient, nil),
GroupKindsToReplenish: []schema.GroupKind{
api.Kind("Pod"),
api.Kind("Service"),
api.Kind("ReplicationController"),
api.Kind("PersistentVolumeClaim"),
},
ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient),
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
}
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
err := quotaController.syncResourceQuota(resourceQuota)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
expectedActionSet := sets.NewString(
strings.Join([]string{"list", "pods", ""}, "-"),
)
actionSet := sets.NewString()
for _, action := range kubeClient.Actions() {
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
}
if !actionSet.HasAll(expectedActionSet.List()...) {
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
}
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:51,代码来源:resource_quota_controller_test.go
示例14: TestSyncBatchChecksMismatchedUID
func TestSyncBatchChecksMismatchedUID(t *testing.T) {
syncer := newTestManager(&fake.Clientset{})
pod := getTestPod()
pod.UID = "first"
syncer.podManager.AddPod(pod)
differentPod := getTestPod()
differentPod.UID = "second"
syncer.podManager.AddPod(differentPod)
syncer.kubeClient = fake.NewSimpleClientset(pod)
syncer.SetPodStatus(differentPod, getRandomPodStatus())
syncer.testSyncBatch()
verifyActions(t, syncer.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
})
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:15,代码来源:status_manager_test.go
示例15: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Quobyte: &v1.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "root", Group: "root"},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
tmpDir, err := utiltesting.MkTmpdir("quobyte_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(quobytePluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:48,代码来源:quobyte_test.go
示例16: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
clientset := fake.NewSimpleClientset(pv, claim)
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, clientset, nil))
plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:48,代码来源:aws_ebs_test.go
示例17: TestWatchPods
func TestWatchPods(t *testing.T) {
testJob := newJob(2, 2)
clientset := fake.NewSimpleClientset(testJob)
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
// Put one job and one pod into the store
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(testJob)
received := make(chan struct{})
// The pod update sent through the fakeWatcher should figure out the managing job and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
t.Errorf("Error getting namespace/name from key %v: %v", key, err)
}
job, err := manager.jobLister.Jobs(ns).Get(name)
if err != nil {
t.Errorf("Expected to find job under key %v: %v", key, err)
}
if !api.Semantic.DeepDerivative(job, testJob) {
t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
close(received)
return nil
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right job.
stopCh := make(chan struct{})
defer close(stopCh)
go sharedInformerFactory.Pods().Informer().Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(1, v1.PodRunning, testJob)
testPod := pods[0]
testPod.Status.Phase = v1.PodFailed
fakeWatch.Add(&testPod)
t.Log("Waiting for pod to reach syncHandler")
<-received
}
开发者ID:paralin,项目名称:kubernetes,代码行数:46,代码来源:jobcontroller_test.go
示例18: TestStaleUpdates
func TestStaleUpdates(t *testing.T) {
pod := getTestPod()
client := fake.NewSimpleClientset(pod)
m := newTestManager(client)
status := v1.PodStatus{Message: "initial status"}
m.SetPodStatus(pod, status)
status.Message = "first version bump"
m.SetPodStatus(pod, status)
status.Message = "second version bump"
m.SetPodStatus(pod, status)
verifyUpdates(t, m, 3)
t.Logf("First sync pushes latest status.")
m.testSyncBatch()
verifyActions(t, m.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
})
client.ClearActions()
for i := 0; i < 2; i++ {
t.Logf("Next 2 syncs should be ignored (%d).", i)
m.testSyncBatch()
verifyActions(t, m.kubeClient, []core.Action{})
}
t.Log("Unchanged status should not send an update.")
m.SetPodStatus(pod, status)
verifyUpdates(t, m, 0)
t.Log("... unless it's stale.")
m.apiStatusVersions[pod.UID] = m.apiStatusVersions[pod.UID] - 1
m.SetPodStatus(pod, status)
m.testSyncBatch()
verifyActions(t, m.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
})
// Nothing stuck in the pipe.
verifyUpdates(t, m, 0)
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:44,代码来源:status_manager_test.go
示例19: run
func (f *fixture) run(deploymentName string) {
f.client = fake.NewSimpleClientset(f.objects...)
informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
c.eventRecorder = &record.FakeRecorder{}
c.dListerSynced = alwaysReady
c.rsListerSynced = alwaysReady
c.podListerSynced = alwaysReady
for _, d := range f.dLister {
c.dLister.Indexer.Add(d)
}
for _, rs := range f.rsLister {
c.rsLister.Indexer.Add(rs)
}
for _, pod := range f.podLister {
c.podLister.Indexer.Add(pod)
}
stopCh := make(chan struct{})
defer close(stopCh)
informers.Start(stopCh)
err := c.syncDeployment(deploymentName)
if err != nil {
f.t.Errorf("error syncing deployment: %v", err)
}
actions := filterInformerActions(f.client.Actions())
for i, action := range actions {
if len(f.actions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
break
}
expectedAction := f.actions[i]
if !expectedAction.Matches(action.GetVerb(), action.GetResource().Resource) {
f.t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expectedAction, action)
continue
}
}
if len(f.actions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
}
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:44,代码来源:deployment_controller_test.go
示例20: TestGetMountedVolumesForPodAndGetVolumesInUse
func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient())
node, pod, pv, claim := createObjects()
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
manager, err := newTestVolumeManager(tmpDir, podManager, kubeClient)
if err != nil {
t.Fatalf("Failed to initialize volume manager: %v", err)
}
stopCh := runVolumeManager(manager)
defer close(stopCh)
podManager.SetPods([]*v1.Pod{pod})
// Fake node status update
go simulateVolumeInUseUpdate(
v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
stopCh,
manager)
err = manager.WaitForAttachAndMount(pod)
if err != nil {
t.Errorf("Expected success: %v", err)
}
expectedMounted := pod.Spec.Volumes[0].Name
actualMounted := manager.GetMountedVolumesForPod(types.UniquePodName(pod.ObjectMeta.UID))
if _, ok := actualMounted[expectedMounted]; !ok || (len(actualMounted) != 1) {
t.Errorf("Expected %v to be mounted to pod but got %v", expectedMounted, actualMounted)
}
expectedInUse := []v1.UniqueVolumeName{v1.UniqueVolumeName(node.Status.VolumesAttached[0].Name)}
actualInUse := manager.GetVolumesInUse()
if !reflect.DeepEqual(expectedInUse, actualInUse) {
t.Errorf("Expected %v to be in use but got %v", expectedInUse, actualInUse)
}
}
开发者ID:alex-mohr,项目名称:kubernetes,代码行数:44,代码来源:volume_manager_test.go
注:本文中的k8s/io/kubernetes/pkg/client/clientset_generated/release_1_5/fake.NewSimpleClientset函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论