本文整理汇总了Golang中github.com/square/p2/pkg/kp/kptest.NewFakePodStore函数的典型用法代码示例。如果您正苦于以下问题:Golang NewFakePodStore函数的具体用法?Golang NewFakePodStore怎么用?Golang NewFakePodStore使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewFakePodStore函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: newRollStore
func newRollStore(t *testing.T, entries []fields.Update) consulStore {
storeFields := make(map[string]*api.KVPair)
for _, u := range entries {
path, err := RollPath(fields.ID(u.NewRC))
if err != nil {
t.Fatalf("Unable to create roll store for test: %s", err)
}
json, err := json.Marshal(u)
if err != nil {
t.Fatalf("Unable to marshal test field as JSON: %s", err)
}
storeFields[path] = &api.KVPair{
Key: path,
Value: json,
}
}
return consulStore{
kv: &consulutil.FakeKV{
Entries: storeFields,
},
store: kptest.NewFakePodStore(nil, nil),
rcstore: rcstore.NewFake(),
labeler: labels.NewFakeApplicator(),
}
}
开发者ID:petertseng,项目名称:p2,代码行数:25,代码来源:consul_store_test.go
示例2: TestLockForUpdateCreation
func TestLockForUpdateCreation(t *testing.T) {
kpStore := kptest.NewFakePodStore(nil, nil)
session, _, err := kpStore.NewSession("test rc update creation session", nil)
if err != nil {
t.Fatalf("Unable to create fake session in fake store: %s", err)
}
rcstore := consulStore{}
unlocker, err := rcstore.LockForUpdateCreation(testRCId, session)
if err != nil {
t.Fatalf("Unable to lock rc for update creation: %s", err)
}
expectedKey := fmt.Sprintf("%s/%s/%s/%s", consulutil.LOCK_TREE, rcTree, testRCId, updateCreationSuffix)
if unlocker.Key() != expectedKey {
t.Errorf("Key did not match expected: wanted '%s' but got '%s'", expectedKey, unlocker.Key())
}
}
开发者ID:petertseng,项目名称:p2,代码行数:19,代码来源:store_test.go
示例3: TestLockRCs
func TestLockRCs(t *testing.T) {
fakeStore := kptest.NewFakePodStore(nil, nil)
session, _, err := fakeStore.NewSession("fake rc lock session", nil)
Assert(t).IsNil(err, "Should not have erred getting fake session")
update := NewUpdate(fields.Update{
NewRC: rc_fields.ID("new_rc"),
OldRC: rc_fields.ID("old_rc"),
},
nil,
rcstore.NewFake(),
nil,
nil,
nil,
logging.DefaultLogger,
session,
nil,
).(*update)
err = update.lockRCs(make(<-chan struct{}))
Assert(t).IsNil(err, "should not have erred locking RCs")
Assert(t).IsNotNil(update.newRCUnlocker, "should have consulutil.Unlocker for unlocking new rc")
Assert(t).IsNotNil(update.oldRCUnlocker, "should have consulutil.Unlocker for unlocking old rc")
}
开发者ID:petertseng,项目名称:p2,代码行数:23,代码来源:update_test.go
示例4: updateWithHealth
func updateWithHealth(t *testing.T,
desiredOld, desiredNew int,
oldNodes, newNodes map[types.NodeName]bool,
checks map[types.NodeName]health.Result,
) (update, manifest.Manifest, manifest.Manifest) {
podID := "mypod"
oldManifest := podWithIDAndPort(podID, 9001)
newManifest := podWithIDAndPort(podID, 9002)
podMap := map[kptest.FakePodStoreKey]manifest.Manifest{}
assignManifestsToNodes(types.PodID(podID), oldNodes, podMap, oldManifest, newManifest)
assignManifestsToNodes(types.PodID(podID), newNodes, podMap, newManifest, oldManifest)
kps := kptest.NewFakePodStore(podMap, nil)
rcs := rcstore.NewFake()
applicator := labels.NewFakeApplicator()
oldRC, err := createRC(rcs, applicator, oldManifest, desiredOld, oldNodes)
Assert(t).IsNil(err, "expected no error setting up old RC")
newRC, err := createRC(rcs, applicator, newManifest, desiredNew, newNodes)
Assert(t).IsNil(err, "expected no error setting up new RC")
return update{
kps: kps,
rcs: rcs,
hcheck: checkertest.NewSingleService(podID, checks),
labeler: applicator,
logger: logging.TestLogger(),
Update: fields.Update{
OldRC: oldRC.ID,
NewRC: newRC.ID,
},
}, oldManifest, newManifest
}
开发者ID:petertseng,项目名称:p2,代码行数:36,代码来源:update_test.go
示例5: TestMultipleFarms
func TestMultipleFarms(t *testing.T) {
retryInterval = testFarmRetryInterval
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
session := kptest.NewSession()
firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "firstMultiple",
})
var allNodes []types.NodeName
allNodes = append(allNodes, "node1", "node2", "node3")
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("good_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
//
// Instantiate first farm
//
firstFarm := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: session,
logger: firstLogger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
firstQuitCh := make(chan struct{})
defer close(firstQuitCh)
go func() {
go firstFarm.cleanupDaemonSetPods(firstQuitCh)
firstFarm.mainLoop(firstQuitCh)
}()
//
// Instantiate second farm
//
secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "secondMultiple",
})
secondFarm := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: session,
logger: secondLogger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
secondQuitCh := make(chan struct{})
defer close(secondQuitCh)
go func() {
go secondFarm.cleanupDaemonSetPods(secondQuitCh)
secondFarm.mainLoop(secondQuitCh)
}()
// Make two daemon sets with difference node selectors
// First daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
// Second daemon set
anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
// Make a node and verify that it was scheduled by the first daemon set
applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")
labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
dsID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
// Make a second node and verify that it was scheduled by the second daemon set
applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2")
labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例6: TestCleanupPods
func TestCleanupPods(t *testing.T) {
retryInterval = testFarmRetryInterval
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
// Make some dangling pod labels and instantiate a farm and expect it clean it up
podID := types.PodID("testPod")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
var allNodes []types.NodeName
allNodes = append(allNodes)
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
err := applicator.SetLabel(labels.POD, id, DSIDLabel, "impossible_id")
Assert(t).IsNil(err, "Expected no error labeling node")
_, err = kpStore.SetPod(kp.INTENT_TREE, types.NodeName(nodeName), podManifest)
Assert(t).IsNil(err, "Expected no error added pod to intent tree")
}
// Assert that precondition is true
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
labeled, err := applicator.GetLabels(labels.POD, id)
Assert(t).IsNil(err, "Expected no error getting labels")
Assert(t).IsTrue(labeled.Labels.Has(DSIDLabel), "Precondition failed: Pod must have a dsID label")
_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
Assert(t).IsNil(err, "Expected no error getting pod from intent store")
Assert(t).AreNotEqual(err, pods.NoCurrentManifest, "Precondition failed: Pod was not in intent store")
}
// Instantiate farm
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "cleanupPods",
})
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
// Make there are no nodes left
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
_, err := waitForPodLabel(applicator, false, id)
Assert(t).IsNil(err, "Expected pod not to have a dsID label")
condition := func() error {
_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
if err != pods.NoCurrentManifest {
return util.Errorf("Expected pod to be deleted in intent store")
}
return nil
}
err = waitForCondition(condition)
Assert(t).IsNil(err, "Error cleaning up pods")
}
}
开发者ID:drcapulet,项目名称:p2,代码行数:88,代码来源:farm_test.go
示例7: TestContendNodes
// Tests dsContends for changes to both daemon sets and nodes
func TestContendNodes(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Instantiate farm
//
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "contendNodes",
})
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
allNodes = append(allNodes, "node1")
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
//
// Check for contention between two daemon sets among their nodes
//
// Make a daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, dsData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
// Make a node and verify that it was scheduled
applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")
labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
dsID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
// Make another daemon set with a contending AvailabilityZoneLabel and verify
// that it gets disabled and that the node label does not change
anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).AreNotEqual(dsData.ID.String(), anotherDSData.ID.String(), "Precondition failed")
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, anotherDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
anotherDSID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")
// Expect the new daemon set to be disabled both in the farm and in the dsStore
err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
Assert(t).IsNil(err, "Error disabling daemon set!")
//
// Make a third daemon set and update its node selector to force a contend,
// then verify that it has been disabled and the node hasn't been overwritten
//
anotherSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"undefined"})
badDS, err := dsStore.Create(podManifest, minHealth, clusterName, anotherSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, badDS.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.NodeSelector = nodeSelector
return dsToUpdate, nil
}
badDS, err = dsStore.MutateDS(badDS.ID, mutator)
Assert(t).IsNil(err, "Expected no error mutating daemon set")
err = waitForMutateSelector(dsf, badDS)
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例8: TestContendSelectors
// Tests dsContends for NodeSelectors
func TestContendSelectors(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Instantiate farm
//
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "contendSelectors",
})
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
//
// Make two daemon sets with a everything selector and verify that they trivially
// contend and that only the second daemon set gets disabled
//
// Make a daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
everythingSelector := klabels.Everything()
firstDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, firstDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
secondDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, secondDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
// Verify that only the second daemon set is disabled
err = waitForDisabled(dsf, dsStore, firstDSData.ID, false)
Assert(t).IsNil(err, "First daemon set should not be disabled")
err = waitForDisabled(dsf, dsStore, secondDSData.ID, true)
Assert(t).IsNil(err, "Error disabling second daemon set")
// Add another daemon set with different selector and verify it gets disabled
someSelector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"nowhere"})
thirdDSData, err := dsStore.Create(podManifest, minHealth, clusterName, someSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, thirdDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
Assert(t).IsNil(err, "Error disabling third daemon set")
//
// Disable first daemon set, then enable second and third daemon sets in that order
// and then there should be a contend on the third daemon set
//
disableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.Disabled = true
return dsToUpdate, nil
}
enableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.Disabled = false
return dsToUpdate, nil
}
// Disable first ds and verify it is disabled
_, err = dsStore.MutateDS(firstDSData.ID, disableMutator)
Assert(t).IsNil(err, "Expected no error getting daemon set")
err = waitForDisabled(dsf, dsStore, firstDSData.ID, true)
Assert(t).IsNil(err, "Error disabling first daemon set")
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例9: TestPublishToReplication
func TestPublishToReplication(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Setup fixture and schedule a pod
//
dsStore := dsstoretest.NewFake()
podID := types.PodID("testPod")
minHealth := 1
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
timeout := replication.NoTimeout
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
Assert(t).IsNil(err, "expected no error creating request")
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
allNodes = append(allNodes, "node1", "node2")
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("good_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
ds := New(
dsData,
dsStore,
kpStore,
applicator,
applicator,
logging.DefaultLogger,
&happyHealthChecker,
0,
false,
).(*daemonSet)
scheduled := scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")
err = waitForPodsInIntent(kpStore, 0)
Assert(t).IsNil(err, "Unexpected number of pods scheduled")
err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "good")
Assert(t).IsNil(err, "expected no error labeling node1")
err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
Assert(t).IsNil(err, "expected no error labeling node1")
//
// Adds a watch that will automatically send a signal when a change was made
// to the daemon set
//
quitCh := make(chan struct{})
updatedCh := make(chan *ds_fields.DaemonSet)
deletedCh := make(chan *ds_fields.DaemonSet)
defer close(quitCh)
defer close(updatedCh)
defer close(deletedCh)
desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)
//
// Verify that 2 pods have been scheduled
//
numNodes := waitForNodes(t, ds, 2, desiresErrCh, dsChangesErrCh)
Assert(t).AreEqual(numNodes, 2, "took too long to schedule")
scheduled = scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 2, "expected a node to have been labeled")
// Mutate the daemon set so that the node is unscheduled, this should not produce an error
mutator := func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToChange.NodeSelector = klabels.Everything().
Add("nodeQuality", klabels.EqualsOperator, []string{"bad"})
return dsToChange, nil
}
_, err = dsStore.MutateDS(ds.ID(), mutator)
Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")
select {
case <-time.After(1 * time.Second):
case err := <-desiresErrCh:
t.Fatalf("Unexpected error unscheduling pod: %v", err)
}
numNodes = waitForNodes(t, ds, 0, desiresErrCh, dsChangesErrCh)
Assert(t).AreEqual(numNodes, 0, "took too long to unschedule")
}
开发者ID:petertseng,项目名称:p2,代码行数:98,代码来源:daemon_set_test.go
示例10: TestSchedule
// TestSchedule checks consecutive scheduling and unscheduling for:
// - creation of a daemon set
// - different node selectors
// - changes to nodes allocations
// - mutations to a daemon set
// - deleting a daemon set
func TestSchedule(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Setup fixture and schedule a pod
//
dsStore := dsstoretest.NewFake()
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
timeout := replication.NoTimeout
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
Assert(t).IsNil(err, "expected no error creating request")
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
allNodes = append(allNodes, "node1", "node2", "nodeOk")
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("good_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("bad_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
ds := New(
dsData,
dsStore,
kpStore,
applicator,
applicator,
logging.DefaultLogger,
&happyHealthChecker,
0,
false,
).(*daemonSet)
scheduled := scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")
err = waitForPodsInIntent(kpStore, 0)
Assert(t).IsNil(err, "Unexpected number of pods scheduled")
err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "bad")
Assert(t).IsNil(err, "expected no error labeling node1")
err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
Assert(t).IsNil(err, "expected no error labeling node2")
//
// Adds a watch that will automatically send a signal when a change was made
// to the daemon set
//
quitCh := make(chan struct{})
updatedCh := make(chan *ds_fields.DaemonSet)
deletedCh := make(chan *ds_fields.DaemonSet)
defer close(quitCh)
defer close(updatedCh)
defer close(deletedCh)
desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)
//
// Verify that the pod has been scheduled
//
numNodes := waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
Assert(t).AreEqual(numNodes, 1, "took too long to schedule")
scheduled = scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 1, "expected a node to have been labeled")
Assert(t).AreEqual(scheduled[0].ID, "node2/testPod", "expected node labeled with the daemon set's id")
// Verify that the scheduled pod is correct
err = waitForSpecificPod(kpStore, "node2", types.PodID("testPod"))
Assert(t).IsNil(err, "Unexpected pod scheduled")
//
// Add 10 good nodes and 10 bad nodes then verify
//
//.........这里部分代码省略.........
开发者ID:petertseng,项目名称:p2,代码行数:101,代码来源:daemon_set_test.go
注:本文中的github.com/square/p2/pkg/kp/kptest.NewFakePodStore函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论