本文整理汇总了Golang中github.com/square/p2/pkg/labels.NewFakeApplicator函数的典型用法代码示例。如果您正苦于以下问题:Golang NewFakeApplicator函数的具体用法?Golang NewFakeApplicator怎么用?Golang NewFakeApplicator使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewFakeApplicator函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: consulStoreWithFakeKV
func consulStoreWithFakeKV() *consulStore {
return &consulStore{
kv: consulutil.NewFakeClient().KV(),
applicator: labels.NewFakeApplicator(),
logger: logging.DefaultLogger,
}
}
开发者ID:drcapulet,项目名称:p2,代码行数:7,代码来源:consul_store_test.go
示例2: TestWouldWorkOn
func TestWouldWorkOn(t *testing.T) {
fakeLabels := labels.NewFakeApplicator()
fakeLabels.SetLabel(labels.RC, "abc-123", "color", "red")
fakeLabels.SetLabel(labels.RC, "def-456", "color", "blue")
f := &Farm{
labeler: fakeLabels,
rcSelector: klabels.Everything().Add("color", klabels.EqualsOperator, []string{"red"}),
}
workOn, err := f.shouldWorkOn(rc_fields.ID("abc-123"))
Assert(t).IsNil(err, "should not have erred on abc-123")
Assert(t).IsTrue(workOn, "should have worked on abc-123, but didn't")
dontWorkOn, err := f.shouldWorkOn(rc_fields.ID("def-456"))
Assert(t).IsNil(err, "should not have erred on def-456")
Assert(t).IsFalse(dontWorkOn, "should not have worked on def-456, but did")
dontWorkOn, err = f.shouldWorkOn(rc_fields.ID("987-cba"))
Assert(t).IsNil(err, "should not have erred on 987-cba")
Assert(t).IsFalse(dontWorkOn, "should not have worked on 987-cba, but did")
f.rcSelector = klabels.Everything()
workOn, err = f.shouldWorkOn(rc_fields.ID("def-456"))
Assert(t).IsNil(err, "should not have erred on def-456")
Assert(t).IsTrue(workOn, "should have worked on def-456, but didn't")
}
开发者ID:petertseng,项目名称:p2,代码行数:28,代码来源:update_test.go
示例3: setup
func setup(t *testing.T) (
rcStore rcstore.Store,
kpStore fakeKpStore,
applicator labels.Applicator,
rc ReplicationController) {
rcStore = rcstore.NewFake()
manifestBuilder := pods.NewManifestBuilder()
manifestBuilder.SetID("testPod")
manifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
podLabels := map[string]string{"podTest": "successful"}
rcData, err := rcStore.Create(manifest, nodeSelector, podLabels)
Assert(t).IsNil(err, "expected no error creating request")
kpStore = fakeKpStore{manifests: make(map[string]pods.Manifest)}
applicator = labels.NewFakeApplicator()
rc = New(
rcData,
&kpStore,
rcStore,
NewApplicatorScheduler(applicator),
applicator,
logging.DefaultLogger,
)
return
}
开发者ID:tomzhang,项目名称:p2,代码行数:32,代码来源:replication_controller_test.go
示例4: newRollStore
func newRollStore(t *testing.T, entries []fields.Update) consulStore {
storeFields := make(map[string]*api.KVPair)
for _, u := range entries {
path, err := RollPath(fields.ID(u.NewRC))
if err != nil {
t.Fatalf("Unable to create roll store for test: %s", err)
}
json, err := json.Marshal(u)
if err != nil {
t.Fatalf("Unable to marshal test field as JSON: %s", err)
}
storeFields[path] = &api.KVPair{
Key: path,
Value: json,
}
}
return consulStore{
kv: &consulutil.FakeKV{
Entries: storeFields,
},
store: kptest.NewFakePodStore(nil, nil),
rcstore: rcstore.NewFake(),
labeler: labels.NewFakeApplicator(),
}
}
开发者ID:petertseng,项目名称:p2,代码行数:25,代码来源:consul_store_test.go
示例5: TestNewConsul
func TestNewConsul(t *testing.T) {
store := NewConsul(kp.NewConsulClient(kp.Options{}), labels.NewFakeApplicator(), nil)
rollstore := store.(consulStore)
if rollstore.kv == nil {
t.Fatal("kv should not be nil for constructed rollstore")
}
if rollstore.rcstore == nil {
t.Fatal("rcstore should not be nil for constructed rollstore")
}
if rollstore.labeler == nil {
t.Fatal("labeler should not be nil for constructed rollstore")
}
if rollstore.store == nil {
t.Fatal("store should not be nil for constructed rollstore")
}
}
开发者ID:petertseng,项目名称:p2,代码行数:19,代码来源:consul_store_test.go
示例6: updateWithHealth
func updateWithHealth(t *testing.T,
desiredOld, desiredNew int,
oldNodes, newNodes map[types.NodeName]bool,
checks map[types.NodeName]health.Result,
) (update, manifest.Manifest, manifest.Manifest) {
podID := "mypod"
oldManifest := podWithIDAndPort(podID, 9001)
newManifest := podWithIDAndPort(podID, 9002)
podMap := map[kptest.FakePodStoreKey]manifest.Manifest{}
assignManifestsToNodes(types.PodID(podID), oldNodes, podMap, oldManifest, newManifest)
assignManifestsToNodes(types.PodID(podID), newNodes, podMap, newManifest, oldManifest)
kps := kptest.NewFakePodStore(podMap, nil)
rcs := rcstore.NewFake()
applicator := labels.NewFakeApplicator()
oldRC, err := createRC(rcs, applicator, oldManifest, desiredOld, oldNodes)
Assert(t).IsNil(err, "expected no error setting up old RC")
newRC, err := createRC(rcs, applicator, newManifest, desiredNew, newNodes)
Assert(t).IsNil(err, "expected no error setting up new RC")
return update{
kps: kps,
rcs: rcs,
hcheck: checkertest.NewSingleService(podID, checks),
labeler: applicator,
logger: logging.TestLogger(),
Update: fields.Update{
OldRC: oldRC.ID,
NewRC: newRC.ID,
},
}, oldManifest, newManifest
}
开发者ID:petertseng,项目名称:p2,代码行数:36,代码来源:update_test.go
示例7: TestPublishLatestRCsWithLockInfoWithLocks
func TestPublishLatestRCsWithLockInfoWithLocks(t *testing.T) {
client := consulutil.NewFakeClient()
fakeKV := client.KV()
rcstore := NewConsul(client, labels.NewFakeApplicator(), 1)
inCh := make(chan []fields.RC)
defer close(inCh)
quitCh := make(chan struct{})
defer close(quitCh)
lockResultCh, errCh := rcstore.publishLatestRCsWithLockInfo(inCh, quitCh)
go func() {
for err := range errCh {
t.Fatalf("Unexpected error on errCh: %s", err)
}
}()
// Create a test case with 2 RCs with no locks
lockedCase := LockInfoTestCase{
InputRCs: []fields.RC{{ID: "abc"}, {ID: "123"}},
ExpectedOutput: []RCLockResult{
{
RC: fields.RC{ID: "abc"},
LockedForMutation: true,
},
{
RC: fields.RC{ID: "123"},
LockedForOwnership: true,
},
},
}
ownershipLockPath, err := rcstore.ownershipLockPath("123")
if err != nil {
t.Fatalf("Unable to compute ownership lock path: %s", err)
}
_, _, err = fakeKV.Acquire(&api.KVPair{
Key: ownershipLockPath,
}, nil)
if err != nil {
t.Fatalf("Unable to lock for ownership: %s", err)
}
mutationLockPath, err := rcstore.mutationLockPath("abc")
if err != nil {
t.Fatalf("Unable to compute mutation lock path: %s", err)
}
_, _, err = fakeKV.Acquire(&api.KVPair{
Key: mutationLockPath,
}, nil)
if err != nil {
t.Fatalf("Unable to lock for mutation: %s", err)
}
verifyLockInfoTestCase(t, lockedCase, inCh, lockResultCh)
// Add an update creation lock to the second one
lockedCase2 := LockInfoTestCase{
InputRCs: []fields.RC{{ID: "abc"}, {ID: "123"}},
ExpectedOutput: []RCLockResult{
{
RC: fields.RC{ID: "abc"},
LockedForMutation: true,
},
{
RC: fields.RC{ID: "123"},
LockedForOwnership: true,
LockedForUpdateCreation: true,
},
},
}
updateCreationLockPath, err := rcstore.updateCreationLockPath("123")
if err != nil {
t.Fatalf("Unable to compute update creation lock path: %s", err)
}
_, _, err = fakeKV.Acquire(&api.KVPair{
Key: updateCreationLockPath,
}, nil)
if err != nil {
t.Fatalf("Unable to lock for updateCreation: %s", err)
}
verifyLockInfoTestCase(t, lockedCase2, inCh, lockResultCh)
}
开发者ID:petertseng,项目名称:p2,代码行数:85,代码来源:consul_store_test.go
示例8: TestPublishLatestRCsWithLockInfoNoLocks
func TestPublishLatestRCsWithLockInfoNoLocks(t *testing.T) {
client := consulutil.NewFakeClient()
rcstore := NewConsul(client, labels.NewFakeApplicator(), 1)
inCh := make(chan []fields.RC)
defer close(inCh)
quitCh := make(chan struct{})
defer close(quitCh)
lockResultCh, errCh := rcstore.publishLatestRCsWithLockInfo(inCh, quitCh)
go func() {
for err := range errCh {
t.Fatalf("Unexpected error on errCh: %s", err)
}
}()
// Create a test case with 2 RCs with no locks
unlockedCase := LockInfoTestCase{
InputRCs: []fields.RC{{ID: "abc"}, {ID: "123"}},
ExpectedOutput: []RCLockResult{
{
RC: fields.RC{ID: "abc"},
},
{
RC: fields.RC{ID: "123"},
},
},
}
verifyLockInfoTestCase(t, unlockedCase, inCh, lockResultCh)
// Write the same input once more without reading the channel and make
// sure that doesn't cause timeouts
select {
case inCh <- unlockedCase.InputRCs:
case <-time.After(1 * time.Second):
t.Fatalf("Timed out writing to input channel")
}
// create a new case
unlockedCase2 := LockInfoTestCase{
InputRCs: []fields.RC{
{ID: "abc"},
{ID: "123"},
{ID: "456"},
},
ExpectedOutput: []RCLockResult{
{
RC: fields.RC{ID: "abc"},
},
{
RC: fields.RC{ID: "123"},
},
{
RC: fields.RC{ID: "456"},
},
},
}
verifyLockInfoTestCase(t, unlockedCase2, inCh, lockResultCh)
}
开发者ID:petertseng,项目名称:p2,代码行数:61,代码来源:consul_store_test.go
示例9: TestMultipleFarms
func TestMultipleFarms(t *testing.T) {
retryInterval = testFarmRetryInterval
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
session := kptest.NewSession()
firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "firstMultiple",
})
var allNodes []types.NodeName
allNodes = append(allNodes, "node1", "node2", "node3")
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("good_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
//
// Instantiate first farm
//
firstFarm := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: session,
logger: firstLogger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
firstQuitCh := make(chan struct{})
defer close(firstQuitCh)
go func() {
go firstFarm.cleanupDaemonSetPods(firstQuitCh)
firstFarm.mainLoop(firstQuitCh)
}()
//
// Instantiate second farm
//
secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "secondMultiple",
})
secondFarm := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: session,
logger: secondLogger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
secondQuitCh := make(chan struct{})
defer close(secondQuitCh)
go func() {
go secondFarm.cleanupDaemonSetPods(secondQuitCh)
secondFarm.mainLoop(secondQuitCh)
}()
// Make two daemon sets with difference node selectors
// First daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
// Second daemon set
anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
// Make a node and verify that it was scheduled by the first daemon set
applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")
labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
dsID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
// Make a second node and verify that it was scheduled by the second daemon set
applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2")
labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例10: TestCleanupPods
func TestCleanupPods(t *testing.T) {
retryInterval = testFarmRetryInterval
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
// Make some dangling pod labels and instantiate a farm and expect it clean it up
podID := types.PodID("testPod")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
var allNodes []types.NodeName
allNodes = append(allNodes)
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
err := applicator.SetLabel(labels.POD, id, DSIDLabel, "impossible_id")
Assert(t).IsNil(err, "Expected no error labeling node")
_, err = kpStore.SetPod(kp.INTENT_TREE, types.NodeName(nodeName), podManifest)
Assert(t).IsNil(err, "Expected no error added pod to intent tree")
}
// Assert that precondition is true
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
labeled, err := applicator.GetLabels(labels.POD, id)
Assert(t).IsNil(err, "Expected no error getting labels")
Assert(t).IsTrue(labeled.Labels.Has(DSIDLabel), "Precondition failed: Pod must have a dsID label")
_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
Assert(t).IsNil(err, "Expected no error getting pod from intent store")
Assert(t).AreNotEqual(err, pods.NoCurrentManifest, "Precondition failed: Pod was not in intent store")
}
// Instantiate farm
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "cleanupPods",
})
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
// Make there are no nodes left
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
_, err := waitForPodLabel(applicator, false, id)
Assert(t).IsNil(err, "Expected pod not to have a dsID label")
condition := func() error {
_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
if err != pods.NoCurrentManifest {
return util.Errorf("Expected pod to be deleted in intent store")
}
return nil
}
err = waitForCondition(condition)
Assert(t).IsNil(err, "Error cleaning up pods")
}
}
开发者ID:drcapulet,项目名称:p2,代码行数:88,代码来源:farm_test.go
示例11: TestContendNodes
// Tests dsContends for changes to both daemon sets and nodes
func TestContendNodes(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Instantiate farm
//
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "contendNodes",
})
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
allNodes = append(allNodes, "node1")
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
//
// Check for contention between two daemon sets among their nodes
//
// Make a daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, dsData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
// Make a node and verify that it was scheduled
applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")
labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
dsID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
// Make another daemon set with a contending AvailabilityZoneLabel and verify
// that it gets disabled and that the node label does not change
anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).AreNotEqual(dsData.ID.String(), anotherDSData.ID.String(), "Precondition failed")
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, anotherDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
anotherDSID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")
// Expect the new daemon set to be disabled both in the farm and in the dsStore
err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
Assert(t).IsNil(err, "Error disabling daemon set!")
//
// Make a third daemon set and update its node selector to force a contend,
// then verify that it has been disabled and the node hasn't been overwritten
//
anotherSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"undefined"})
badDS, err := dsStore.Create(podManifest, minHealth, clusterName, anotherSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, badDS.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.NodeSelector = nodeSelector
return dsToUpdate, nil
}
badDS, err = dsStore.MutateDS(badDS.ID, mutator)
Assert(t).IsNil(err, "Expected no error mutating daemon set")
err = waitForMutateSelector(dsf, badDS)
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例12: TestContendSelectors
// Tests dsContends for NodeSelectors
func TestContendSelectors(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Instantiate farm
//
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "contendSelectors",
})
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
//
// Make two daemon sets with a everything selector and verify that they trivially
// contend and that only the second daemon set gets disabled
//
// Make a daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
everythingSelector := klabels.Everything()
firstDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, firstDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
secondDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, secondDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
// Verify that only the second daemon set is disabled
err = waitForDisabled(dsf, dsStore, firstDSData.ID, false)
Assert(t).IsNil(err, "First daemon set should not be disabled")
err = waitForDisabled(dsf, dsStore, secondDSData.ID, true)
Assert(t).IsNil(err, "Error disabling second daemon set")
// Add another daemon set with different selector and verify it gets disabled
someSelector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"nowhere"})
thirdDSData, err := dsStore.Create(podManifest, minHealth, clusterName, someSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, thirdDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
Assert(t).IsNil(err, "Error disabling third daemon set")
//
// Disable first daemon set, then enable second and third daemon sets in that order
// and then there should be a contend on the third daemon set
//
disableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.Disabled = true
return dsToUpdate, nil
}
enableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.Disabled = false
return dsToUpdate, nil
}
// Disable first ds and verify it is disabled
_, err = dsStore.MutateDS(firstDSData.ID, disableMutator)
Assert(t).IsNil(err, "Expected no error getting daemon set")
err = waitForDisabled(dsf, dsStore, firstDSData.ID, true)
Assert(t).IsNil(err, "Error disabling first daemon set")
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例13: TestPublishToReplication
func TestPublishToReplication(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Setup fixture and schedule a pod
//
dsStore := dsstoretest.NewFake()
podID := types.PodID("testPod")
minHealth := 1
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
timeout := replication.NoTimeout
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
Assert(t).IsNil(err, "expected no error creating request")
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
allNodes = append(allNodes, "node1", "node2")
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("good_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
ds := New(
dsData,
dsStore,
kpStore,
applicator,
applicator,
logging.DefaultLogger,
&happyHealthChecker,
0,
false,
).(*daemonSet)
scheduled := scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")
err = waitForPodsInIntent(kpStore, 0)
Assert(t).IsNil(err, "Unexpected number of pods scheduled")
err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "good")
Assert(t).IsNil(err, "expected no error labeling node1")
err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
Assert(t).IsNil(err, "expected no error labeling node1")
//
// Adds a watch that will automatically send a signal when a change was made
// to the daemon set
//
quitCh := make(chan struct{})
updatedCh := make(chan *ds_fields.DaemonSet)
deletedCh := make(chan *ds_fields.DaemonSet)
defer close(quitCh)
defer close(updatedCh)
defer close(deletedCh)
desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)
//
// Verify that 2 pods have been scheduled
//
numNodes := waitForNodes(t, ds, 2, desiresErrCh, dsChangesErrCh)
Assert(t).AreEqual(numNodes, 2, "took too long to schedule")
scheduled = scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 2, "expected a node to have been labeled")
// Mutate the daemon set so that the node is unscheduled, this should not produce an error
mutator := func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToChange.NodeSelector = klabels.Everything().
Add("nodeQuality", klabels.EqualsOperator, []string{"bad"})
return dsToChange, nil
}
_, err = dsStore.MutateDS(ds.ID(), mutator)
Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")
select {
case <-time.After(1 * time.Second):
case err := <-desiresErrCh:
t.Fatalf("Unexpected error unscheduling pod: %v", err)
}
numNodes = waitForNodes(t, ds, 0, desiresErrCh, dsChangesErrCh)
Assert(t).AreEqual(numNodes, 0, "took too long to unschedule")
}
开发者ID:petertseng,项目名称:p2,代码行数:98,代码来源:daemon_set_test.go
示例14: TestSchedule
// TestSchedule checks consecutive scheduling and unscheduling for:
// - creation of a daemon set
// - different node selectors
// - changes to nodes allocations
// - mutations to a daemon set
// - deleting a daemon set
func TestSchedule(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Setup fixture and schedule a pod
//
dsStore := dsstoretest.NewFake()
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
timeout := replication.NoTimeout
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
Assert(t).IsNil(err, "expected no error creating request")
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
allNodes = append(allNodes, "node1", "node2", "nodeOk")
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("good_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("bad_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
ds := New(
dsData,
dsStore,
kpStore,
applicator,
applicator,
logging.DefaultLogger,
&happyHealthChecker,
0,
false,
).(*daemonSet)
scheduled := scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")
err = waitForPodsInIntent(kpStore, 0)
Assert(t).IsNil(err, "Unexpected number of pods scheduled")
err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "bad")
Assert(t).IsNil(err, "expected no error labeling node1")
err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
Assert(t).IsNil(err, "expected no error labeling node2")
//
// Adds a watch that will automatically send a signal when a change was made
// to the daemon set
//
quitCh := make(chan struct{})
updatedCh := make(chan *ds_fields.DaemonSet)
deletedCh := make(chan *ds_fields.DaemonSet)
defer close(quitCh)
defer close(updatedCh)
defer close(deletedCh)
desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)
//
// Verify that the pod has been scheduled
//
numNodes := waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
Assert(t).AreEqual(numNodes, 1, "took too long to schedule")
scheduled = scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 1, "expected a node to have been labeled")
Assert(t).AreEqual(scheduled[0].ID, "node2/testPod", "expected node labeled with the daemon set's id")
// Verify that the scheduled pod is correct
err = waitForSpecificPod(kpStore, "node2", types.PodID("testPod"))
Assert(t).IsNil(err, "Unexpected pod scheduled")
//
// Add 10 good nodes and 10 bad nodes then verify
//
//.........这里部分代码省略.........
开发者ID:petertseng,项目名称:p2,代码行数:101,代码来源:daemon_set_test.go
注:本文中的github.com/square/p2/pkg/labels.NewFakeApplicator函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论