本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/testing/core.DefaultWatchReactor函数的典型用法代码示例。如果您正苦于以下问题:Golang DefaultWatchReactor函数的具体用法?Golang DefaultWatchReactor怎么用?Golang DefaultWatchReactor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DefaultWatchReactor函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: controllerSetup
func controllerSetup(startingObjects []runtime.Object, stopChannel chan struct{}, t *testing.T) ( /*caName*/ string, *fake.Clientset, *watch.FakeWatcher, *ServiceServingCertController) {
certDir, err := ioutil.TempDir("", "serving-cert-unit-")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
caInfo := admin.DefaultServiceSignerCAInfo(certDir)
caOptions := admin.CreateSignerCertOptions{
CertFile: caInfo.CertFile,
KeyFile: caInfo.KeyFile,
Name: admin.DefaultServiceServingCertSignerName(),
Output: ioutil.Discard,
}
ca, err := caOptions.CreateSignerCert()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
kubeclient := fake.NewSimpleClientset(startingObjects...)
fakeWatch := watch.NewFake()
kubeclient.PrependReactor("create", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
})
kubeclient.PrependReactor("update", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.UpdateAction).GetObject(), nil
})
kubeclient.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
controller := NewServiceServingCertController(kubeclient.Core(), kubeclient.Core(), ca, "cluster.local", 10*time.Minute)
return caOptions.Name, kubeclient, fakeWatch, controller
}
开发者ID:LalatenduMohanty,项目名称:origin,代码行数:32,代码来源:secret_creating_controller_test.go
示例2: CreateTestClient
func CreateTestClient() *fake.Clientset {
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &v1.PodList{}
podNamePrefix := "mypod"
namespace := "mynamespace"
for i := 0; i < 5; i++ {
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := v1.Pod{
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: namespace,
Labels: map[string]string{
"name": podName,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "containerName",
Image: "containerImage",
VolumeMounts: []v1.VolumeMount{
{
Name: "volumeMountName",
ReadOnly: false,
MountPath: "/mnt",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "volumeName",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "pdName",
FSType: "ext4",
ReadOnly: false,
},
},
},
},
},
}
obj.Items = append(obj.Items, pod)
}
return true, obj, nil
})
fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
return fakeClient
}
开发者ID:nak3,项目名称:kubernetes,代码行数:58,代码来源:testvolumespec.go
示例3: TestHookExecutor_executeExecNewPodFailed
func TestHookExecutor_executeExecNewPodFailed(t *testing.T) {
hook := &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
},
}
config := deploytest.OkDeploymentConfig(1)
deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
client := newTestClient(config)
podCreated := make(chan struct{})
var createdPod *kapi.Pod
client.AddReactor("create", "pods", func(a core.Action) (handled bool, ret runtime.Object, err error) {
defer close(podCreated)
action := a.(core.CreateAction)
object := action.GetObject()
createdPod = object.(*kapi.Pod)
return true, createdPod, nil
})
podsWatch := watch.NewFake()
client.AddWatchReactor("pods", core.DefaultWatchReactor(podsWatch, nil))
go func() {
<-podCreated
podsWatch.Add(createdPod)
podCopy, _ := kapi.Scheme.Copy(createdPod)
updatedPod := podCopy.(*kapi.Pod)
updatedPod.Status.Phase = kapi.PodFailed
podsWatch.Modify(updatedPod)
}()
executor := &HookExecutor{
pods: client.Core(),
out: ioutil.Discard,
decoder: kapi.Codecs.UniversalDecoder(),
getPodLogs: func(*kapi.Pod) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("test")), nil
},
}
err := executor.executeExecNewPod(hook, deployment, "hook", "test")
if err == nil {
t.Fatalf("expected an error, got none")
}
t.Logf("got expected error: %T", err)
}
开发者ID:LalatenduMohanty,项目名称:origin,代码行数:49,代码来源:lifecycle_test.go
示例4: TestWatchPods
func TestWatchPods(t *testing.T) {
testJob := newJob(2, 2)
clientset := fake.NewSimpleClientset(testJob)
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
// Put one job and one pod into the store
manager.jobStore.Store.Add(testJob)
received := make(chan struct{})
// The pod update sent through the fakeWatcher should figure out the managing job and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.jobStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find job under key %v", key)
close(received)
return nil
}
job, ok := obj.(*batch.Job)
if !ok {
t.Errorf("unexpected type: %v %#v", reflect.TypeOf(obj), obj)
close(received)
return nil
}
if !api.Semantic.DeepDerivative(job, testJob) {
t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
close(received)
return nil
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right job.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.internalPodInformer.Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(1, api.PodRunning, testJob)
testPod := pods[0]
testPod.Status.Phase = api.PodFailed
fakeWatch.Add(&testPod)
t.Log("Waiting for pod to reach syncHandler")
<-received
}
开发者ID:RyanBinfeng,项目名称:kubernetes,代码行数:49,代码来源:jobcontroller_test.go
示例5: NewSimpleClientset
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
fakePtr := core.Fake{}
fakePtr.AddReactor("*", "*", core.ObjectReaction(o, api.Registry.RESTMapper()))
fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil))
return &Clientset{fakePtr}
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:19,代码来源:clientset_generated.go
示例6: NewSimpleFake
// NewSimpleFake returns a client that will respond with the provided objects
func NewSimpleFake(objects ...runtime.Object) *Fake {
o := core.NewObjectTracker(kapi.Scheme, kapi.Codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
fakeClient := &Fake{}
fakeClient.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper()))
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil))
return fakeClient
}
开发者ID:xgwang-zte,项目名称:origin,代码行数:16,代码来源:fake.go
示例7: TestWatchPods
func TestWatchPods(t *testing.T) {
testJob := newJob(2, 2)
clientset := fake.NewSimpleClientset(testJob)
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
// Put one job and one pod into the store
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(testJob)
received := make(chan struct{})
// The pod update sent through the fakeWatcher should figure out the managing job and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
t.Errorf("Error getting namespace/name from key %v: %v", key, err)
}
job, err := manager.jobLister.Jobs(ns).Get(name)
if err != nil {
t.Errorf("Expected to find job under key %v: %v", key, err)
}
if !api.Semantic.DeepDerivative(job, testJob) {
t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
close(received)
return nil
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right job.
stopCh := make(chan struct{})
defer close(stopCh)
go sharedInformerFactory.Pods().Informer().Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(1, api.PodRunning, testJob)
testPod := pods[0]
testPod.Status.Phase = api.PodFailed
fakeWatch.Add(&testPod)
t.Log("Waiting for pod to reach syncHandler")
<-received
}
开发者ID:eljefedelrodeodeljefe,项目名称:kubernetes,代码行数:46,代码来源:jobcontroller_test.go
示例8: TestWatchPods
func TestWatchPods(t *testing.T) {
fakeWatch := watch.NewFake()
client := &fake.Clientset{}
client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady
// Put one ReplicaSet and one pod into the controller's stores
labelMap := map[string]string{"foo": "bar"}
testRSSpec := newReplicaSet(1, labelMap)
manager.rsStore.Store.Add(testRSSpec)
received := make(chan string)
// The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.rsStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find replica set under key %v", key)
}
rsSpec := obj.(*extensions.ReplicaSet)
if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) {
t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec)
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right ReplicaSet.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.podController.Run(stopCh)
go manager.internalPodInformer.Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(nil, 1, api.PodRunning, labelMap, testRSSpec, "pod")
testPod := pods.Items[0]
testPod.Status.Phase = api.PodFailed
fakeWatch.Add(&testPod)
select {
case <-received:
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("unexpected timeout from result channel")
}
}
开发者ID:simonswine,项目名称:kubernetes,代码行数:46,代码来源:replica_set_test.go
示例9: TestWatchPods
func TestWatchPods(t *testing.T) {
fakeWatch := watch.NewFake()
c := &fake.Clientset{}
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
manager.podStoreSynced = alwaysReady
// Put one rc and one pod into the controller's stores
testControllerSpec := newReplicationController(1)
manager.rcStore.Store.Add(testControllerSpec)
received := make(chan string)
// The pod update sent through the fakeWatcher should figure out the managing rc and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.rcStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find controller under key %v", key)
}
controllerSpec := obj.(*api.ReplicationController)
if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
t.Errorf("\nExpected %#v,\nbut got %#v", testControllerSpec, controllerSpec)
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right rc.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.podController.Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(nil, 1, api.PodRunning, testControllerSpec)
testPod := pods.Items[0]
testPod.Status.Phase = api.PodFailed
fakeWatch.Add(&testPod)
select {
case <-received:
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Expected 1 call but got 0")
}
}
开发者ID:jetsanix,项目名称:kubernetes,代码行数:44,代码来源:replication_controller_test.go
示例10: TestWatchJobs
func TestWatchJobs(t *testing.T) {
clientset := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil))
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
var testJob batch.Job
received := make(chan struct{})
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler.
manager.syncHandler = func(key string) error {
defer close(received)
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
t.Errorf("Error getting namespace/name from key %v: %v", key, err)
}
job, err := manager.jobLister.Jobs(ns).Get(name)
if err != nil || job == nil {
t.Errorf("Expected to find job under key %v: %v", key, err)
return nil
}
if !api.Semantic.DeepDerivative(*job, testJob) {
t.Errorf("Expected %#v, but got %#v", testJob, *job)
}
return nil
}
// Start only the job watcher and the workqueue, send a watch event,
// and make sure it hits the sync method.
stopCh := make(chan struct{})
defer close(stopCh)
sharedInformerFactory.Start(stopCh)
go manager.Run(1, stopCh)
// We're sending new job to see if it reaches syncHandler.
testJob.Namespace = "bar"
testJob.Name = "foo"
fakeWatch.Add(&testJob)
t.Log("Waiting for job to reach syncHandler")
<-received
}
开发者ID:eljefedelrodeodeljefe,项目名称:kubernetes,代码行数:43,代码来源:jobcontroller_test.go
示例11: TestWatchControllers
func TestWatchControllers(t *testing.T) {
fakeWatch := watch.NewFake()
client := &fake.Clientset{}
client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady
var testRSSpec extensions.ReplicaSet
received := make(chan string)
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler. The handler validates the received controller
// and closes the received channel to indicate that the test can finish.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.rsStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find replica set under key %v", key)
}
rsSpec := *obj.(*extensions.ReplicaSet)
if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) {
t.Errorf("Expected %#v, but got %#v", testRSSpec, rsSpec)
}
close(received)
return nil
}
// Start only the ReplicaSet watcher and the workqueue, send a watch event,
// and make sure it hits the sync method.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.rsController.Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
testRSSpec.Name = "foo"
fakeWatch.Add(&testRSSpec)
select {
case <-received:
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("unexpected timeout from result channel")
}
}
开发者ID:simonswine,项目名称:kubernetes,代码行数:42,代码来源:replica_set_test.go
示例12: controllerSetup
func controllerSetup(startingObjects []runtime.Object, t *testing.T) (*fake.Clientset, *watch.FakeWatcher, *DockerRegistryServiceController) {
kubeclient := fake.NewSimpleClientset(startingObjects...)
fakeWatch := watch.NewFake()
kubeclient.PrependReactor("create", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
})
kubeclient.PrependReactor("update", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.UpdateAction).GetObject(), nil
})
kubeclient.PrependWatchReactor("services", core.DefaultWatchReactor(fakeWatch, nil))
controller := NewDockerRegistryServiceController(kubeclient, DockerRegistryServiceControllerOptions{
Resync: 10 * time.Minute,
RegistryNamespace: registryNamespace,
RegistryServiceName: registryName,
DockercfgController: &DockercfgController{},
DockerURLsIntialized: make(chan struct{}),
})
return kubeclient, fakeWatch, controller
}
开发者ID:LalatenduMohanty,项目名称:origin,代码行数:21,代码来源:docker_registry_service_test.go
示例13: TestWatchJobs
func TestWatchJobs(t *testing.T) {
clientset := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil))
manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
var testJob batch.Job
received := make(chan struct{})
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.jobStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find job under key %v", key)
}
job, ok := obj.(*batch.Job)
if !ok {
t.Fatalf("unexpected type: %v %#v", reflect.TypeOf(obj), obj)
}
if !api.Semantic.DeepDerivative(*job, testJob) {
t.Errorf("Expected %#v, but got %#v", testJob, *job)
}
close(received)
return nil
}
// Start only the job watcher and the workqueue, send a watch event,
// and make sure it hits the sync method.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.Run(1, stopCh)
// We're sending new job to see if it reaches syncHandler.
testJob.Name = "foo"
fakeWatch.Add(&testJob)
t.Log("Waiting for job to reach syncHandler")
<-received
}
开发者ID:XbinZh,项目名称:kubernetes,代码行数:40,代码来源:controller_test.go
示例14: controllerSetup
func controllerSetup(t *testing.T, startingObjects []runtime.Object) (*fake.Clientset, *watch.FakeWatcher, *IngressIPController) {
client := fake.NewSimpleClientset(startingObjects...)
fakeWatch := watch.NewFake()
client.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
client.PrependReactor("create", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(core.CreateAction).GetObject()
fakeWatch.Add(obj)
return true, obj, nil
})
// Ensure that updates the controller makes are passed through to the watcher.
client.PrependReactor("update", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(core.CreateAction).GetObject()
fakeWatch.Modify(obj)
return true, obj, nil
})
controller := newController(t, client)
return client, fakeWatch, controller
}
开发者ID:LalatenduMohanty,项目名称:origin,代码行数:23,代码来源:controller_test.go
示例15: TestReplicationControllerStop
//.........这里部分代码省略.........
Namespace: ns,
},
Spec: api.ReplicationControllerSpec{
Replicas: 0,
Selector: map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}},
},
{
ObjectMeta: api.ObjectMeta{
Name: "zaz",
Namespace: ns,
},
Spec: api.ReplicationControllerSpec{
Replicas: 0,
Selector: map[string]string{"k1": "v1"}},
},
{
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: api.ReplicationControllerSpec{
Replicas: 0,
Selector: map[string]string{"k1": "v1", "k2": "v2"}},
},
},
},
},
StopError: fmt.Errorf("Detected overlapping controllers for rc foo: baz,zaz, please manage deletion individually with --cascade=false."),
ExpectedActions: []string{"get", "list"},
},
{
Name: "TwoExactMatchRCs",
Objs: []runtime.Object{
&api.ReplicationControllerList{ // LIST
Items: []api.ReplicationController{
{
ObjectMeta: api.ObjectMeta{
Name: "zaz",
Namespace: ns,
},
Spec: api.ReplicationControllerSpec{
Replicas: 0,
Selector: map[string]string{"k1": "v1"}},
},
{
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: api.ReplicationControllerSpec{
Replicas: 0,
Selector: map[string]string{"k1": "v1"}},
},
},
},
},
StopError: nil,
ExpectedActions: []string{"get", "list", "delete"},
},
}
for _, test := range tests {
copiedForWatch, err := api.Scheme.Copy(test.Objs[0])
if err != nil {
t.Fatalf("%s unexpected error: %v", test.Name, err)
}
fake := fake.NewSimpleClientset(test.Objs...)
fakeWatch := watch.NewFake()
fake.PrependWatchReactor("replicationcontrollers", testcore.DefaultWatchReactor(fakeWatch, nil))
go func() {
fakeWatch.Add(copiedForWatch)
}()
reaper := ReplicationControllerReaper{fake.Core(), time.Millisecond, time.Millisecond}
err = reaper.Stop(ns, name, 0, nil)
if !reflect.DeepEqual(err, test.StopError) {
t.Errorf("%s unexpected error: %v", test.Name, err)
continue
}
actions := fake.Actions()
if len(actions) != len(test.ExpectedActions) {
t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions))
continue
}
for i, verb := range test.ExpectedActions {
if actions[i].GetResource().GroupResource() != api.Resource("replicationcontrollers") {
t.Errorf("%s unexpected action: %+v, expected %s-replicationController", test.Name, actions[i], verb)
}
if actions[i].GetVerb() != verb {
t.Errorf("%s unexpected action: %+v, expected %s-replicationController", test.Name, actions[i], verb)
}
}
}
}
开发者ID:paralin,项目名称:kubernetes,代码行数:101,代码来源:stop_test.go
示例16: TestReplicaSetController
func TestReplicaSetController(t *testing.T) {
flag.Set("logtostderr", "true")
flag.Set("v", "5")
flag.Parse()
replicaSetReviewDelay = 10 * time.Millisecond
clusterAvailableDelay = 20 * time.Millisecond
clusterUnavailableDelay = 60 * time.Millisecond
allReplicaSetReviewDealy = 120 * time.Millisecond
fedclientset := fedclientfake.NewSimpleClientset()
fedrswatch := watch.NewFake()
fedclientset.PrependWatchReactor("replicasets", core.DefaultWatchReactor(fedrswatch, nil))
fedclientset.Federation().Clusters().Create(testutil.NewCluster("k8s-1", apiv1.ConditionTrue))
fedclientset.Federation().Clusters().Create(testutil.NewCluster("k8s-2", apiv1.ConditionTrue))
kube1clientset := kubeclientfake.NewSimpleClientset()
kube1rswatch := watch.NewFake()
kube1clientset.PrependWatchReactor("replicasets", core.DefaultWatchReactor(kube1rswatch, nil))
kube1Podwatch := watch.NewFake()
kube1clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(kube1Podwatch, nil))
kube2clientset := kubeclientfake.NewSimpleClientset()
kube2rswatch := watch.NewFake()
kube2clientset.PrependWatchReactor("replicasets", core.DefaultWatchReactor(kube2rswatch, nil))
kube2Podwatch := watch.NewFake()
kube2clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(kube2Podwatch, nil))
fedInformerClientFactory := func(cluster *fedv1.Cluster) (kube_release_1_4.Interface, error) {
switch cluster.Name {
case "k8s-1":
return kube1clientset, nil
case "k8s-2":
return kube2clientset, nil
default:
return nil, fmt.Errorf("Unknown cluster: %v", cluster.Name)
}
}
replicaSetController := NewReplicaSetController(fedclientset)
rsFedinformer := testutil.ToFederatedInformerForTestOnly(replicaSetController.fedReplicaSetInformer)
rsFedinformer.SetClientFactory(fedInformerClientFactory)
podFedinformer := testutil.ToFederatedInformerForTestOnly(replicaSetController.fedPodInformer)
podFedinformer.SetClientFactory(fedInformerClientFactory)
stopChan := make(chan struct{})
defer close(stopChan)
go replicaSetController.Run(1, stopChan)
rs := newReplicaSetWithReplicas("rs", 9)
rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Create(rs)
fedrswatch.Add(rs)
time.Sleep(1 * time.Second)
rs1, _ := kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
kube1rswatch.Add(rs1)
rs1.Status.Replicas = *rs1.Spec.Replicas
rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs1)
kube1rswatch.Modify(rs1)
rs2, _ := kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
kube2rswatch.Add(rs2)
rs2.Status.Replicas = *rs2.Spec.Replicas
rs2, _ = kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs2)
kube2rswatch.Modify(rs2)
time.Sleep(1 * time.Second)
rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
assert.Equal(t, *rs.Spec.Replicas, *rs1.Spec.Replicas+*rs2.Spec.Replicas)
assert.Equal(t, rs.Status.Replicas, rs1.Status.Replicas+rs2.Status.Replicas)
var replicas int32 = 20
rs.Spec.Replicas = &replicas
rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Update(rs)
fedrswatch.Modify(rs)
time.Sleep(1 * time.Second)
rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
rs1.Status.Replicas = *rs1.Spec.Replicas
rs1, _ = kube1clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs1)
kube1rswatch.Modify(rs1)
rs2, _ = kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
rs2.Status.Replicas = *rs2.Spec.Replicas
rs2, _ = kube2clientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).UpdateStatus(rs2)
kube2rswatch.Modify(rs2)
time.Sleep(1 * time.Second)
rs, _ = fedclientset.Extensions().ReplicaSets(apiv1.NamespaceDefault).Get(rs.Name)
assert.Equal(t, *rs.Spec.Replicas, *rs1.Spec.Replicas+*rs2.Spec.Replicas)
assert.Equal(t, rs.Status.Replicas, rs1.Status.Replicas+rs2.Status.Replicas)
}
开发者ID:Aleishus,项目名称:kubernetes,代码行数:91,代码来源:replicasetcontroller_test.go
示例17: TestGetFirstPod
//.........这里部分代码省略.........
podList: newPodList(2, -1, -1, labelSet),
sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) },
expected: &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "pod-1",
Namespace: api.NamespaceDefault,
CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
expectedNum: 2,
},
{
name: "kubectl attach - wait for ready pod",
podList: newPodList(1, 1, -1, labelSet),
watching: []watch.Event{
{
Type: watch.Modified,
Object: &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "pod-1",
Namespace: api.NamespaceDefault,
CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
},
},
sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) },
expected: &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "pod-1",
Namespace: api.NamespaceDefault,
CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
expectedNum: 1,
},
}
for i := range tests {
test := tests[i]
fake := fake.NewSimpleClientset(test.podList)
if len(test.watching) > 0 {
watcher := watch.NewFake()
for _, event := range test.watching {
switch event.Type {
case watch.Added:
go watcher.Add(event.Object)
case watch.Modified:
go watcher.Modify(event.Object)
}
}
fake.PrependWatchReactor("pods", testcore.DefaultWatchReactor(watcher, nil))
}
selector := labels.Set(labelSet).AsSelector()
pod, numPods, err := GetFirstPod(fake.Core(), api.NamespaceDefault, selector, 1*time.Minute, test.sortBy)
if !test.expectedErr && err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
if test.expectedErr && err == nil {
t.Errorf("%s: expected an error", test.name)
continue
}
if test.expectedNum != numPods {
t.Errorf("%s: expected %d pods, got %d", test.name, test.expectedNum, numPods)
continue
}
if !reflect.DeepEqual(test.expected, pod) {
t.Errorf("%s:\nexpected pod:\n%#v\ngot:\n%#v\n\n", test.name, test.expected, pod)
}
}
}
开发者ID:ravihansa3000,项目名称:kubernetes,代码行数:101,代码来源:factory_test.go
示例18: TestHookExecutor_executeExecNewPodSucceeded
func TestHookExecutor_executeExecNewPodSucceeded(t *testing.T) {
hook := &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
},
}
config := deploytest.OkDeploymentConfig(1)
deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
deployment.Spec.Template.Spec.NodeSelector = map[string]string{"labelKey1": "labelValue1", "labelKey2": "labelValue2"}
client := newTestClient(config)
podCreated := make(chan struct{})
var createdPod *kapi.Pod
client.AddReactor("create", "pods", func(a core.Action) (handled bool, ret runtime.Object, err error) {
defer close(podCreated)
action := a.(core.CreateAction)
object := action.GetObject()
createdPod = object.(*kapi.Pod)
return true, createdPod, nil
})
podsWatch := watch.NewFake()
client.AddWatchReactor("pods", core.DefaultWatchReactor(podsWatch, nil))
podLogs := &bytes.Buffer{}
// Simulate creation of the lifecycle pod
go func() {
<-podCreated
podsWatch.Add(createdPod)
podCopy, _ := kapi.Scheme.Copy(createdPod)
updatedPod := podCopy.(*kapi.Pod)
updatedPod.Status.Phase = kapi.PodSucceeded
podsWatch.Modify(updatedPod)
}()
executor := &HookExecutor{
pods: client.Core(),
out: podLogs,
decoder: kapi.Codecs.UniversalDecoder(),
getPodLogs: func(*kapi.Pod) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("test")), nil
},
}
err := executor.executeExecNewPod(hook, deployment, "hook", "test")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := "--> test: Running hook pod ...\ntest--> test: Success\n", podLogs.String(); e != a {
t.Fatalf("expected pod logs to be %q, got %q", e, a)
}
if e, a := deployment.Spec.Template.Spec.NodeSelector, createdPod.Spec.NodeSelector; !reflect.DeepEqual(e, a) {
t.Fatalf("expected pod NodeSelector %v, got %v", e, a)
}
if createdPod.Spec.ActiveDeadlineSeconds == nil {
t.Fatalf("expected ActiveDeadlineSeconds to be set on the deployment hook executor pod")
}
if *createdPod.Spec.ActiveDeadlineSeconds >= deployapi.MaxDeploymentDurationSeconds {
t.Fatalf("expected ActiveDeadlineSeconds %+v to be lower than %+v", *createdPod.Spec.ActiveDeadlineSeconds, deployapi.MaxDeploymentDurationSeconds)
}
}
开发者ID:LalatenduMohanty,项目名称:origin,代码行数:68,代码来源:lifecycle_test.go
示例19: prepareTestClient
//.........这里部分代码省略.........
podPresent[num] = true
}
timestamp := time.Now()
metrics := heapster.MetricResultList{}
for i, level := range tc.reportedLevels {
if !podPresent[i] {
continue
}
metric := heapster.MetricResult{
Metrics: []heapster.MetricPoint{{Timestamp: timestamp, Value: level, FloatValue: nil}},
LatestTimestamp: timestamp,
}
metrics.Items = append(metrics.Items, metric)
}
heapsterRawMemResponse, _ = json.Marshal(&metrics)
}
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
})
fakeClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*autoscaling.HorizontalPodAutoscaler)
assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected")
assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected")
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected")
if tc.verifyCPUCurrent {
assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil")
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
}
tc.statusUpdated = true
// Every time we reconcile HPA object we are updating status.
tc.processed <- obj.Name
return true, obj, nil
})
fakeClient.AddReactor("*", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.CreateAction).GetObject().(*v1.Event)
if tc.verifyEvents {
switch obj.Reason {
case "SuccessfulRescale":
assert.Equal(t, fmt.Sprintf("New size: %d; reason: CPU utilization above target", tc.desiredReplicas), obj.Message)
case "DesiredReplicasComputed":
assert.Equal(t, fmt.Sprintf(
"Computed the desired num of replicas: %d (avgCPUutil: %d, current replicas: %d)",
tc.desiredReplicas,
(int64(tc.reportedLevels[0])*100)/tc.reportedCPURequests[0].MilliValue(), tc.initialReplicas), obj.Message)
default:
assert.False(t, true, fmt.Sprintf("Unexpected event: %s / %s", obj.Reason, obj.Message))
}
}
tc.eventCreated = true
return true, obj, nil
})
fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
return fakeClient
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:101,代码来源:horizontal_test.go
|
请发表评论