本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/clientset.NewForConfigOrDie函数的典型用法代码示例。如果您正苦于以下问题:Golang NewForConfigOrDie函数的具体用法?Golang NewForConfigOrDie怎么用?Golang NewForConfigOrDie使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewForConfigOrDie函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: rmSetup
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
resyncPeriodFunc := func() time.Duration {
return resyncPeriod
}
podInformer := informers.NewPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
rm := replication.NewReplicationManager(
podInformer,
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replication-controller")),
resyncPeriodFunc,
replication.BurstReplicas,
4096,
enableGarbageCollector,
)
if err != nil {
t.Fatalf("Failed to create replication manager")
}
return s, rm, podInformer, clientSet
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:28,代码来源:replicationcontroller_test.go
示例2: createAdClients
func createAdClients(ns *v1.Namespace, t *testing.T, server *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, attachdetach.AttachDetachController, cache.SharedIndexInformer, cache.SharedIndexInformer) {
config := restclient.Config{
Host: server.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion},
QPS: 1000000,
Burst: 1000000,
}
resyncPeriod := 12 * time.Hour
testClient := clientset.NewForConfigOrDie(&config)
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugin := &volumetest.FakeVolumePlugin{
PluginName: provisionerPluginName,
Host: host,
Config: volume.VolumeConfig{},
LastProvisionerOptions: volume.VolumeOptions{},
NewAttacherCallCount: 0,
NewDetacherCallCount: 0,
Mounters: nil,
Unmounters: nil,
Attachers: nil,
Detachers: nil,
}
plugins := []volume.VolumePlugin{plugin}
cloud := &fakecloud.FakeCloud{}
podInformer := informers.NewPodInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
nodeInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "node-informer")), resyncPeriod)
pvcInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pvc-informer")), resyncPeriod)
pvInformer := informers.NewNodeInformer(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pv-informer")), resyncPeriod)
ctrl, err := attachdetach.NewAttachDetachController(testClient, podInformer, nodeInformer, pvcInformer, pvInformer, cloud, plugins)
if err != nil {
t.Fatalf("Error creating AttachDetach : %v", err)
}
return testClient, ctrl, podInformer, nodeInformer
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:35,代码来源:attach_detach_test.go
示例3: rmSetup
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replicaset.ReplicaSetController, cache.SharedIndexInformer, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "rs-informers")), nil, resyncPeriod)
rm := replicaset.NewReplicaSetController(
informers.ReplicaSets(),
informers.Pods(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replicaset-controller")),
replicaset.BurstReplicas,
4096,
enableGarbageCollector,
)
if err != nil {
t.Fatalf("Failed to create replicaset controller")
}
return s, rm, informers.ReplicaSets().Informer(), informers.Pods().Informer(), clientSet
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:26,代码来源:replicaset_test.go
示例4: createClients
func createClients(ns *v1.Namespace, t *testing.T, s *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
// Use higher QPS and Burst, there is a test for race conditions which
// creates many objects and default values were too low.
binderClient := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion},
QPS: 1000000,
Burst: 1000000,
})
testClient := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion},
QPS: 1000000,
Burst: 1000000,
})
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugin := &volumetest.FakeVolumePlugin{
PluginName: provisionerPluginName,
Host: host,
Config: volume.VolumeConfig{},
LastProvisionerOptions: volume.VolumeOptions{},
NewAttacherCallCount: 0,
NewDetacherCallCount: 0,
Mounters: nil,
Unmounters: nil,
Attachers: nil,
Detachers: nil,
}
plugins := []volume.VolumePlugin{plugin}
cloud := &fakecloud.FakeCloud{}
ctrl := persistentvolumecontroller.NewController(
persistentvolumecontroller.ControllerParameters{
KubeClient: binderClient,
SyncPeriod: getSyncPeriod(syncPeriod),
VolumePlugins: plugins,
Cloud: cloud,
EnableDynamicProvisioning: true,
})
watchPV, err := testClient.PersistentVolumes().Watch(v1.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumes: %v", err)
}
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(v1.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumeClaims: %v", err)
}
return testClient, ctrl, watchPV, watchPVC
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:51,代码来源:persistent_volumes_test.go
示例5: TestSyncJobUpdateRequeue
func TestSyncJobUpdateRequeue(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
updateError := fmt.Errorf("Update error")
manager.updateHandler = func(job *batch.Job) error {
manager.queue.AddRateLimited(getKey(job, t))
return updateError
}
job := newJob(2, 2)
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
err := manager.syncJob(getKey(job, t))
if err == nil || err != updateError {
t.Errorf("Expected error %v when syncing jobs, got %v", updateError, err)
}
t.Log("Waiting for a job in the queue")
key, _ := manager.queue.Get()
expectedKey := getKey(job, t)
if key != expectedKey {
t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key)
}
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:25,代码来源:jobcontroller_test.go
示例6: TestSyncEndpointsItemsPreexistingIdentical
func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
ns := v1.NamespaceDefault
testServer, endpointsHandler := makeTestServer(t, v1.NamespaceDefault,
serverResponse{http.StatusOK, &v1.Endpoints{
ObjectMeta: v1.ObjectMeta{
ResourceVersion: "1",
Name: "foo",
Namespace: ns,
},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}},
}})
defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Indexer, v1.NamespaceDefault, 1, 1, 0)
endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: v1.NamespaceDefault},
Spec: v1.ServiceSpec{
Selector: map[string]string{"foo": "bar"},
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
},
})
endpoints.syncService(ns + "/foo")
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", v1.NamespaceDefault, "foo"), "GET", nil)
}
开发者ID:nak3,项目名称:kubernetes,代码行数:29,代码来源:endpoints_controller_test.go
示例7: TestSyncEndpointsItemsPreserveNoSelector
func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
ns := v1.NamespaceDefault
testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &v1.Endpoints{
ObjectMeta: v1.ObjectMeta{
Name: "foo",
Namespace: ns,
ResourceVersion: "1",
},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
Ports: []v1.EndpointPort{{Port: 1000}},
}},
}})
defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady
endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 80}}},
})
endpoints.syncService(ns + "/foo")
endpointsHandler.ValidateRequestCount(t, 0)
}
开发者ID:nak3,项目名称:kubernetes,代码行数:25,代码来源:endpoints_controller_test.go
示例8: TestCheckLeftoverEndpoints
func TestCheckLeftoverEndpoints(t *testing.T) {
ns := v1.NamespaceDefault
// Note that this requests *all* endpoints, therefore the NamespaceAll
// below.
testServer, _ := makeTestServer(t, v1.NamespaceAll,
serverResponse{http.StatusOK, &v1.EndpointsList{
ListMeta: metav1.ListMeta{
ResourceVersion: "1",
},
Items: []v1.Endpoints{{
ObjectMeta: v1.ObjectMeta{
Name: "foo",
Namespace: ns,
ResourceVersion: "1",
},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
Ports: []v1.EndpointPort{{Port: 1000}},
}},
}},
}})
defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady
endpoints.checkLeftoverEndpoints()
if e, a := 1, endpoints.queue.Len(); e != a {
t.Fatalf("Expected %v, got %v", e, a)
}
got, _ := endpoints.queue.Get()
if e, a := ns+"/foo", got; e != a {
t.Errorf("Expected %v, got %v", e, a)
}
}
开发者ID:nak3,项目名称:kubernetes,代码行数:35,代码来源:endpoints_controller_test.go
示例9: TestBind
func TestBind(t *testing.T) {
table := []struct {
binding *v1.Binding
}{
{binding: &v1.Binding{
ObjectMeta: metav1.ObjectMeta{
Namespace: v1.NamespaceDefault,
Name: "foo",
},
Target: v1.ObjectReference{
Name: "foohost.kubernetes.mydomain.com",
},
}},
}
for _, item := range table {
handler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: "",
T: t,
}
server := httptest.NewServer(&handler)
defer server.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
b := binder{client}
if err := b.Bind(item.binding); err != nil {
t.Errorf("Unexpected error: %v", err)
continue
}
expectedBody := runtime.EncodeOrDie(testapi.Default.Codec(), item.binding)
handler.ValidateRequest(t, testapi.Default.ResourcePath("bindings", v1.NamespaceDefault, ""), "POST", &expectedBody)
}
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:34,代码来源:factory_test.go
示例10: TestSyncJobExpectations
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations.
func TestSyncJobExpectations(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
manager.updateHandler = func(job *batch.Job) error { return nil }
job := newJob(2, 2)
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
pods := newPodList(2, v1.PodPending, job)
podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer()
podIndexer.Add(&pods[0])
manager.expectations = FakeJobExpectations{
controller.NewControllerExpectations(), true, func() {
// If we check active pods before checking expectataions, the job
// will create a new replica because it doesn't see this pod, but
// has fulfilled its expectations.
podIndexer.Add(&pods[1])
},
}
manager.syncJob(getKey(job, t))
if len(fakePodControl.Templates) != 0 {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
}
if len(fakePodControl.DeletePodName) != 0 {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
}
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:33,代码来源:jobcontroller_test.go
示例11: TestSyncPastDeadlineJobFinished
func TestSyncPastDeadlineJobFinished(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
var actual *batch.Job
manager.updateHandler = func(job *batch.Job) error {
actual = job
return nil
}
job := newJob(1, 1)
activeDeadlineSeconds := int64(10)
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
start := metav1.Unix(metav1.Now().Time.Unix()-15, 0)
job.Status.StartTime = &start
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
err := manager.syncJob(getKey(job, t))
if err != nil {
t.Errorf("Unexpected error when syncing jobs %v", err)
}
if len(fakePodControl.Templates) != 0 {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
}
if len(fakePodControl.DeletePodName) != 0 {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
}
if actual != nil {
t.Error("Unexpected job modification")
}
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:34,代码来源:jobcontroller_test.go
示例12: TestPodReadOnlyFilesystem
func TestPodReadOnlyFilesystem(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
isReadOnly := true
ns := framework.CreateTestingNamespace("pod-readonly-root", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "xxx",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "fake-name",
Image: "fakeimage",
SecurityContext: &v1.SecurityContext{
ReadOnlyRootFilesystem: &isReadOnly,
},
},
},
},
}
if _, err := client.Core().Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
integration.DeletePodOrErrorf(t, client, ns.Name, pod.Name)
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:33,代码来源:pods_test.go
示例13: newClusterClientset
func newClusterClientset(c *v1beta1.Cluster) (*kubeclientset.Clientset, error) {
clusterConfig, err := util.BuildClusterConfig(c)
if clusterConfig != nil {
clientset := kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(clusterConfig, UserAgentName))
return clientset, nil
}
return nil, err
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:8,代码来源:cluster_helper.go
示例14: TestThirdPartyMultiple
func TestThirdPartyMultiple(t *testing.T) {
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
defer s.Close()
clientConfig := &restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{NegotiatedSerializer: api.Codecs}}
client := clientset.NewForConfigOrDie(clientConfig)
DoTestInstallMultipleAPIs(t, client, clientConfig)
}
开发者ID:jonboulle,项目名称:kubernetes,代码行数:9,代码来源:thirdparty_test.go
示例15: rmSetup
func rmSetup(t *testing.T) (*httptest.Server, *disruption.DisruptionController, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pdb-informers")), nil, resyncPeriod)
rm := disruption.NewDisruptionController(
informers.Pods().Informer(),
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "disruption-controller")),
)
return s, rm, informers.Pods().Informer(), clientSet
}
开发者ID:nak3,项目名称:kubernetes,代码行数:18,代码来源:evictions_test.go
示例16: TestResponsibleForPod
// TestResponsibleForPod tests if a pod with an annotation that should cause it to
// be picked up by the default scheduler, is in fact picked by the default scheduler
// Two schedulers are made in the test: one is default scheduler and other scheduler
// is of name "foo-scheduler". A pod must be picked up by at most one of the two
// schedulers.
func TestResponsibleForPod(t *testing.T) {
handler := utiltesting.FakeHandler{
StatusCode: 500,
ResponseBody: "",
T: t,
}
server := httptest.NewServer(&handler)
defer server.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
// factory of "default-scheduler"
factoryDefaultScheduler := NewConfigFactory(client, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
// factory of "foo-scheduler"
factoryFooScheduler := NewConfigFactory(client, "foo-scheduler", v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
// scheduler annotations to be tested
schedulerFitsDefault := "default-scheduler"
schedulerFitsFoo := "foo-scheduler"
schedulerFitsNone := "bar-scheduler"
tests := []struct {
pod *v1.Pod
pickedByDefault bool
pickedByFoo bool
}{
{
// pod with "spec.Schedulername=default-scheduler" should be picked
// by the scheduler of name "default-scheduler", NOT by the one of name "foo-scheduler"
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, Spec: v1.PodSpec{SchedulerName: schedulerFitsDefault}},
pickedByDefault: true,
pickedByFoo: false,
},
{
// pod with "spec.SchedulerName=foo-scheduler" should be NOT
// be picked by the scheduler of name "default-scheduler", but by the one of name "foo-scheduler"
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, Spec: v1.PodSpec{SchedulerName: schedulerFitsFoo}},
pickedByDefault: false,
pickedByFoo: true,
},
{
// pod with "spec.SchedulerName=foo-scheduler" should be NOT
// be picked by niether the scheduler of name "default-scheduler" nor the one of name "foo-scheduler"
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}, Spec: v1.PodSpec{SchedulerName: schedulerFitsNone}},
pickedByDefault: false,
pickedByFoo: false,
},
}
for _, test := range tests {
podOfDefault := factoryDefaultScheduler.ResponsibleForPod(test.pod)
podOfFoo := factoryFooScheduler.ResponsibleForPod(test.pod)
results := []bool{podOfDefault, podOfFoo}
expected := []bool{test.pickedByDefault, test.pickedByFoo}
if !reflect.DeepEqual(results, expected) {
t.Errorf("expected: {%v, %v}, got {%v, %v}", test.pickedByDefault, test.pickedByFoo, podOfDefault, podOfFoo)
}
}
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:61,代码来源:factory_test.go
示例17: TestSelfLinkOnNamespace
func TestSelfLinkOnNamespace(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
ns := framework.CreateTestingNamespace("selflink", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
c := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
runSelfLinkTestOnNamespace(t, c, ns.Name)
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:11,代码来源:client_test.go
示例18: TestUpdatePetWithoutRetry
func TestUpdatePetWithoutRetry(t *testing.T) {
pcb1, pcb2 := makeTwoDifferntPCB()
// invalid pet with empty pod
invalidPcb := *pcb1
invalidPcb.pod = nil
testCases := []struct {
realPet *pcb
expectedPet *pcb
expectErr bool
requests int
}{
// case 0: error occurs, no need to update
{
realPet: pcb1,
expectedPet: &invalidPcb,
expectErr: true,
requests: 0,
},
// case 1: identical pet, no need to update
{
realPet: pcb1,
expectedPet: pcb1,
expectErr: false,
requests: 0,
},
// case 2: need to call update once
{
realPet: pcb1,
expectedPet: pcb2,
expectErr: false,
requests: 1,
},
}
for k, tc := range testCases {
body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "empty_pod"}})
fakeHandler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: string(body),
}
testServer := httptest.NewServer(&fakeHandler)
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
petClient := newPetClient(client)
err := petClient.Update(tc.realPet, tc.expectedPet)
if tc.expectErr != (err != nil) {
t.Errorf("case %d: expect error(%v), got err: %v", k, tc.expectErr, err)
}
fakeHandler.ValidateRequestCount(t, tc.requests)
testServer.Close()
}
}
开发者ID:nak3,项目名称:kubernetes,代码行数:54,代码来源:pet_test.go
示例19: TestSecrets
// TestSecrets tests apiserver-side behavior of creation of secret objects and their use by pods.
func TestSecrets(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
ns := framework.CreateTestingNamespace("secret", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
DoTestSecrets(t, client, ns)
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:12,代码来源:secrets_test.go
示例20: TestCreate
func TestCreate(t *testing.T) {
handler := utiltesting.FakeHandler{
StatusCode: 500,
ResponseBody: "",
T: t,
}
server := httptest.NewServer(&handler)
defer server.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion}})
factory := NewConfigFactory(client, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
factory.Create()
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:12,代码来源:factory_test.go
注:本文中的k8s/io/kubernetes/pkg/client/clientset_generated/clientset.NewForConfigOrDie函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论