本文整理汇总了Golang中github.com/square/p2/pkg/ds/fields.ClusterName函数的典型用法代码示例。如果您正苦于以下问题:Golang ClusterName函数的具体用法?Golang ClusterName怎么用?Golang ClusterName使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ClusterName函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: TestList
func TestList(t *testing.T) {
store := consulStoreWithFakeKV()
// Create first DaemonSet
firstPodID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(firstPodID)
firstManifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
firstDS, err := store.Create(firstManifest, minHealth, clusterName, selector, firstPodID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
// Create second DaemonSet
secondPodID := types.PodID("different_pod_id")
manifestBuilder = manifest.NewBuilder()
manifestBuilder.SetID(secondPodID)
secondManifest := manifestBuilder.GetManifest()
secondDS, err := store.Create(secondManifest, minHealth, clusterName, selector, secondPodID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
daemonSetList, err := store.List()
if err != nil {
t.Fatalf("Error getting list of daemon sets: %s", err)
}
Assert(t).AreEqual(len(daemonSetList), 2, "Unexpected number of daemon sets listed")
for _, daemonSet := range daemonSetList {
if daemonSet.ID == firstDS.ID {
Assert(t).AreEqual(daemonSet.PodID, firstPodID, "Listed daemon set pod ids were not equal")
} else if daemonSet.PodID == secondDS.PodID {
Assert(t).AreEqual(daemonSet.PodID, secondPodID, "Listed daemon set pod ids were not equal")
} else {
t.Errorf("Unexpected daemon set listed: %v", daemonSet)
}
}
}
开发者ID:rudle,项目名称:p2,代码行数:55,代码来源:consul_store_test.go
示例2: createDaemonSet
func createDaemonSet(store *consulStore, t *testing.T) ds_fields.DaemonSet {
podID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
manifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ds, err := store.Create(manifest, minHealth, clusterName, selector, podID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
if ds.ID == "" {
t.Error("daemon set should have an id")
}
Assert(t).AreNotEqual(ds.ID, "", "Daemon set should have an id")
Assert(t).AreNotEqual(ds.PodID, "", "Daemon set should have a pod id")
Assert(t).AreEqual(ds.PodID, podID, "Daemon set pod id was not set correctly")
Assert(t).AreEqual(ds.MinHealth, minHealth, "Daemon set minimum health was not set correctly")
Assert(t).AreEqual(ds.Name, clusterName, "Daemon set cluster name was not set correctly")
Assert(t).IsFalse(ds.Disabled, "Daemon set disabled field was not set correctly")
testLabels := klabels.Set{
pc_fields.AvailabilityZoneLabel: azLabel.String(),
}
if matches := ds.NodeSelector.Matches(testLabels); !matches {
t.Error("The daemon set has a bad node selector")
}
originalSHA, err := manifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest")
}
getSHA, err := ds.Manifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
}
Assert(t).AreEqual(originalSHA, getSHA, "Daemon set manifest not set correctly")
return ds
}
开发者ID:rudle,项目名称:p2,代码行数:52,代码来源:consul_store_test.go
示例3: TestCreate
func TestCreate(t *testing.T) {
store := consulStoreWithFakeKV()
createDaemonSet(store, t)
// Create a bad DaemonSet
podID := types.PodID("")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID("")
podManifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
t.Error("Expected create to fail on bad pod id")
}
podID = types.PodID("pod_id")
if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
t.Error("Expected create to fail on bad manifest pod id")
}
manifestBuilder = manifest.NewBuilder()
manifestBuilder.SetID("different_pod_id")
podManifest = manifestBuilder.GetManifest()
if _, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout); err == nil {
t.Error("Expected create to fail on pod id and manifest pod id mismatch")
}
}
开发者ID:rudle,项目名称:p2,代码行数:37,代码来源:consul_store_test.go
示例4: main
func main() {
cmd, consulOpts := flags.ParseWithConsulOptions()
client := kp.NewConsulClient(consulOpts)
logger := logging.NewLogger(logrus.Fields{})
dsstore := dsstore.NewConsul(client, 3, &logger)
applicator := labels.NewConsulApplicator(client, 3)
switch cmd {
case CmdCreate:
minHealth, err := strconv.Atoi(*createMinHealth)
if err != nil {
log.Fatalf("Invalid value for minimum health, expected integer: %v", err)
}
name := ds_fields.ClusterName(*createName)
manifest, err := manifest.FromPath(*createManifest)
if err != nil {
log.Fatalf("%s", err)
}
podID := manifest.ID()
if *createTimeout <= time.Duration(0) {
log.Fatalf("Timeout must be a positive non-zero value, got '%v'", *createTimeout)
}
selectorString := *createSelector
if *createEverywhere {
selectorString = klabels.Everything().String()
} else if selectorString == "" {
selectorString = klabels.Nothing().String()
log.Fatal("Explicit everything selector not allowed, please use the --everwhere flag")
}
selector, err := parseNodeSelectorWithPrompt(klabels.Nothing(), selectorString, applicator)
if err != nil {
log.Fatalf("Error occurred: %v", err)
}
if err = confirmMinheathForSelector(minHealth, selector, applicator); err != nil {
log.Fatalf("Error occurred: %v", err)
}
ds, err := dsstore.Create(manifest, minHealth, name, selector, podID, *createTimeout)
if err != nil {
log.Fatalf("err: %v", err)
}
fmt.Printf("%v has been created in consul", ds.ID)
fmt.Println()
case CmdGet:
id := ds_fields.ID(*getID)
ds, _, err := dsstore.Get(id)
if err != nil {
log.Fatalf("err: %v", err)
}
bytes, err := json.Marshal(ds)
if err != nil {
logger.WithError(err).Fatalln("Unable to marshal daemon set as JSON")
}
fmt.Printf("%s", bytes)
case CmdList:
dsList, err := dsstore.List()
if err != nil {
log.Fatalf("err: %v", err)
}
podID := types.PodID(*listPod)
for _, ds := range dsList {
if *listPod == "" || podID == ds.PodID {
fmt.Printf("%s/%s:%s\n", ds.PodID, ds.Name, ds.ID)
}
}
case CmdEnable:
id := ds_fields.ID(*enableID)
mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
if !ds.Disabled {
return ds, util.Errorf("Daemon set has already been enabled")
}
ds.Disabled = false
return ds, nil
}
_, err := dsstore.MutateDS(id, mutator)
if err != nil {
log.Fatalf("err: %v", err)
}
fmt.Printf("The daemon set '%s' has been successfully enabled in consul", id.String())
fmt.Println()
case CmdDisable:
id := ds_fields.ID(*disableID)
mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
if ds.Disabled {
return ds, util.Errorf("Daemon set has already been disabled")
}
ds.Disabled = true
return ds, nil
//.........这里部分代码省略.........
开发者ID:rudle,项目名称:p2,代码行数:101,代码来源:main.go
示例5: TestMultipleFarms
func TestMultipleFarms(t *testing.T) {
retryInterval = testFarmRetryInterval
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
session := kptest.NewSession()
firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "firstMultiple",
})
var allNodes []types.NodeName
allNodes = append(allNodes, "node1", "node2", "node3")
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("good_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
//
// Instantiate first farm
//
firstFarm := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: session,
logger: firstLogger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
firstQuitCh := make(chan struct{})
defer close(firstQuitCh)
go func() {
go firstFarm.cleanupDaemonSetPods(firstQuitCh)
firstFarm.mainLoop(firstQuitCh)
}()
//
// Instantiate second farm
//
secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "secondMultiple",
})
secondFarm := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: session,
logger: secondLogger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
secondQuitCh := make(chan struct{})
defer close(secondQuitCh)
go func() {
go secondFarm.cleanupDaemonSetPods(secondQuitCh)
secondFarm.mainLoop(secondQuitCh)
}()
// Make two daemon sets with difference node selectors
// First daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
// Second daemon set
anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
// Make a node and verify that it was scheduled by the first daemon set
applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")
labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
dsID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
// Make a second node and verify that it was scheduled by the second daemon set
applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2")
labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例6: TestContendNodes
// Tests dsContends for changes to both daemon sets and nodes
func TestContendNodes(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Instantiate farm
//
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "contendNodes",
})
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
allNodes = append(allNodes, "node1")
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
//
// Check for contention between two daemon sets among their nodes
//
// Make a daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, dsData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
// Make a node and verify that it was scheduled
applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")
labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
dsID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
// Make another daemon set with a contending AvailabilityZoneLabel and verify
// that it gets disabled and that the node label does not change
anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).AreNotEqual(dsData.ID.String(), anotherDSData.ID.String(), "Precondition failed")
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, anotherDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
anotherDSID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")
// Expect the new daemon set to be disabled both in the farm and in the dsStore
err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
Assert(t).IsNil(err, "Error disabling daemon set!")
//
// Make a third daemon set and update its node selector to force a contend,
// then verify that it has been disabled and the node hasn't been overwritten
//
anotherSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"undefined"})
badDS, err := dsStore.Create(podManifest, minHealth, clusterName, anotherSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, badDS.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.NodeSelector = nodeSelector
return dsToUpdate, nil
}
badDS, err = dsStore.MutateDS(badDS.ID, mutator)
Assert(t).IsNil(err, "Expected no error mutating daemon set")
err = waitForMutateSelector(dsf, badDS)
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例7: TestContendSelectors
// Tests dsContends for NodeSelectors
func TestContendSelectors(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Instantiate farm
//
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "contendSelectors",
})
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
//
// Make two daemon sets with a everything selector and verify that they trivially
// contend and that only the second daemon set gets disabled
//
// Make a daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
everythingSelector := klabels.Everything()
firstDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, firstDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
secondDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, secondDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
// Verify that only the second daemon set is disabled
err = waitForDisabled(dsf, dsStore, firstDSData.ID, false)
Assert(t).IsNil(err, "First daemon set should not be disabled")
err = waitForDisabled(dsf, dsStore, secondDSData.ID, true)
Assert(t).IsNil(err, "Error disabling second daemon set")
// Add another daemon set with different selector and verify it gets disabled
someSelector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"nowhere"})
thirdDSData, err := dsStore.Create(podManifest, minHealth, clusterName, someSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, thirdDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
Assert(t).IsNil(err, "Error disabling third daemon set")
//
// Disable first daemon set, then enable second and third daemon sets in that order
// and then there should be a contend on the third daemon set
//
disableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.Disabled = true
return dsToUpdate, nil
}
enableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.Disabled = false
return dsToUpdate, nil
}
// Disable first ds and verify it is disabled
_, err = dsStore.MutateDS(firstDSData.ID, disableMutator)
Assert(t).IsNil(err, "Expected no error getting daemon set")
err = waitForDisabled(dsf, dsStore, firstDSData.ID, true)
Assert(t).IsNil(err, "Error disabling first daemon set")
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例8: TestWatchAll
func TestWatchAll(t *testing.T) {
store := consulStoreWithFakeKV()
//
// Create a new daemon set
//
podID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ds, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
//
// Create another new daemon set
//
someOtherPodID := types.PodID("some_other_pod_id")
manifestBuilder = manifest.NewBuilder()
manifestBuilder.SetID(someOtherPodID)
someOtherManifest := manifestBuilder.GetManifest()
someOtherDS, err := store.Create(someOtherManifest, minHealth, clusterName, selector, someOtherPodID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
//
// Watch for create and verify
//
quitCh := make(chan struct{})
inCh := store.WatchAll(quitCh, 0)
defer close(quitCh)
var watched WatchedDaemonSetList
select {
case watched = <-inCh:
case <-time.After(5 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
if watched.Err != nil {
t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
}
Assert(t).AreEqual(len(watched.DaemonSets), 2, "Unexpected number of daemon sets watched")
for _, watchedDS := range watched.DaemonSets {
if watchedDS.ID == ds.ID {
Assert(t).AreEqual(watchedDS.PodID, ds.PodID, "Daemon sets should have equal pod ids")
} else if watchedDS.ID == someOtherDS.ID {
Assert(t).AreEqual(watchedDS.PodID, someOtherDS.PodID, "Daemon sets should have equal pod ids")
} else {
t.Errorf("Expected to find id '%s' among watch results, but was not present", watchedDS.ID)
}
}
//
// Watch for delete and verify
//
err = store.Delete(someOtherDS.ID)
if err != nil {
t.Error("Unable to delete daemon set")
}
select {
case watched = <-inCh:
case <-time.After(5 * time.Second):
t.Fatal("Expected something on channel but found nothing")
}
if watched.Err != nil {
t.Errorf("Unexpected error on watched daemon sets: %s", watched.Err)
}
Assert(t).AreEqual(len(watched.DaemonSets), 1, "Unexpected number of daemon sets watched")
Assert(t).AreEqual(ds.ID, watched.DaemonSets[0].ID, "Daemon sets should have equal ids")
//
// Watch for update and verify
//
mutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToMutate.Disabled = !dsToMutate.Disabled
return dsToMutate, nil
}
ds, err = store.MutateDS(ds.ID, mutator)
if err != nil {
t.Fatalf("Unable to mutate daemon set: %s", err)
}
select {
case watched = <-inCh:
case <-time.After(5 * time.Second):
//.........这里部分代码省略.........
开发者ID:rudle,项目名称:p2,代码行数:101,代码来源:consul_store_test.go
示例9: TestMutate
func TestMutate(t *testing.T) {
store := consulStoreWithFakeKV()
podID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ds, err := store.Create(podManifest, minHealth, clusterName, selector, podID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
//
// Invalid mutates
//
errorMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
return dsToMutate, util.Errorf("This is an error")
}
_, err = store.MutateDS(ds.ID, errorMutator)
if err == nil {
t.Error("Expected error when mutator produces an error")
}
badIDMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToMutate.ID = ""
return dsToMutate, nil
}
_, err = store.MutateDS(ds.ID, badIDMutator)
if err == nil {
t.Error("Expected error when mutating daemon set ID")
}
badPodIDMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToMutate.PodID = ""
return dsToMutate, nil
}
_, err = store.MutateDS(ds.ID, badPodIDMutator)
if err == nil {
t.Error("Expected error when mutating daemon set PodID to mismatch manifest")
}
//
// A valid mutate followed by validation
//
someOtherDisabled := !ds.Disabled
someOtherMinHealth := 42
someOtherName := ds_fields.ClusterName("some_other_name")
someOtherPodID := types.PodID("some_other_pod_id")
manifestBuilder = manifest.NewBuilder()
manifestBuilder.SetID(someOtherPodID)
someOtherManifest := manifestBuilder.GetManifest()
someOtherAZLabel := pc_fields.AvailabilityZone("some_other_zone")
someOtherSelector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{someOtherAZLabel.String()})
goodMutator := func(dsToMutate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToMutate.Disabled = someOtherDisabled
dsToMutate.Manifest = someOtherManifest
dsToMutate.MinHealth = someOtherMinHealth
dsToMutate.Name = someOtherName
dsToMutate.NodeSelector = someOtherSelector
dsToMutate.PodID = someOtherPodID
return dsToMutate, nil
}
someOtherDS, err := store.MutateDS(ds.ID, goodMutator)
if err != nil {
t.Fatalf("Unable to mutate daemon set: %s", err)
}
Assert(t).AreEqual(someOtherDS.ID, ds.ID, "Daemon sets should be equal ids")
Assert(t).AreEqual(someOtherDS.PodID, someOtherPodID, "Daemon sets should have equal pod ids")
Assert(t).AreEqual(someOtherDS.MinHealth, someOtherMinHealth, "Daemon sets should have equal minimum healths")
Assert(t).AreEqual(someOtherDS.Name, someOtherName, "Daemon sets should have equal names")
Assert(t).AreEqual(someOtherDS.Disabled, someOtherDisabled, "Daemon sets should have same disabled fields")
someOtherLabels := klabels.Set{
pc_fields.AvailabilityZoneLabel: someOtherAZLabel.String(),
}
if matches := someOtherDS.NodeSelector.Matches(someOtherLabels); !matches {
t.Error("The daemon set has a bad node selector")
}
someOtherSHA, err := someOtherManifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest")
}
dsSHA, err := someOtherDS.Manifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
}
//.........这里部分代码省略.........
开发者ID:rudle,项目名称:p2,代码行数:101,代码来源:consul_store_test.go
示例10: TestGet
func TestGet(t *testing.T) {
store := consulStoreWithFakeKV()
//
// Create DaemonSet
//
podID := types.PodID("some_pod_id")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
azLabel := pc_fields.AvailabilityZone("some_zone")
selector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{azLabel.String()})
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
manifest := manifestBuilder.GetManifest()
timeout := replication.NoTimeout
ds, err := store.Create(manifest, minHealth, clusterName, selector, podID, timeout)
if err != nil {
t.Fatalf("Unable to create daemon set: %s", err)
}
Assert(t).AreNotEqual(ds.ID, "", "Daemon set should have an id")
//
// Get DaemonSet and verify it is the same
//
getDS, _, err := store.Get(ds.ID)
if err != nil {
t.Fatalf("Error retrieving created daemon set: %s", err)
}
Assert(t).AreNotEqual(getDS.ID, "", "Daemon set should have an id")
Assert(t).AreNotEqual(getDS.PodID, "", "Daemon set should have a pod id")
Assert(t).AreEqual(ds.ID, getDS.ID, "Daemon set should be equal ids")
Assert(t).AreEqual(ds.PodID, getDS.PodID, "Daemon set should have equal pod ids")
Assert(t).AreEqual(ds.MinHealth, getDS.MinHealth, "Daemon set should have equal minimum healths")
Assert(t).AreEqual(ds.Name, getDS.Name, "Daemon set should have equal names")
Assert(t).AreEqual(ds.Disabled, getDS.Disabled, "Daemon set should have same disabled fields")
testLabels := klabels.Set{
pc_fields.AvailabilityZoneLabel: azLabel.String(),
}
if matches := getDS.NodeSelector.Matches(testLabels); !matches {
t.Error("The daemon set has a bad node selector")
}
originalSHA, err := manifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest")
}
getSHA, err := getDS.Manifest.SHA()
if err != nil {
t.Fatal("Unable to retrieve SHA from manifest retrieved from daemon set")
}
Assert(t).AreEqual(originalSHA, getSHA, "Daemon set shas were not equal")
// Invalid get opertaion
_, _, err = store.Get("bad_id")
if err == nil {
t.Error("Expected get operation to fail when getting a daemon set which does not exist")
}
}
开发者ID:rudle,项目名称:p2,代码行数:67,代码来源:consul_store_test.go
示例11: TestPublishToReplication
func TestPublishToReplication(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Setup fixture and schedule a pod
//
dsStore := dsstoretest.NewFake()
podID := types.PodID("testPod")
minHealth := 1
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
timeout := replication.NoTimeout
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
Assert(t).IsNil(err, "expected no error creating request")
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
allNodes = append(allNodes, "node1", "node2")
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("good_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
ds := New(
dsData,
dsStore,
kpStore,
applicator,
applicator,
logging.DefaultLogger,
&happyHealthChecker,
0,
false,
).(*daemonSet)
scheduled := scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")
err = waitForPodsInIntent(kpStore, 0)
Assert(t).IsNil(err, "Unexpected number of pods scheduled")
err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "good")
Assert(t).IsNil(err, "expected no error labeling node1")
err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
Assert(t).IsNil(err, "expected no error labeling node1")
//
// Adds a watch that will automatically send a signal when a change was made
// to the daemon set
//
quitCh := make(chan struct{})
updatedCh := make(chan *ds_fields.DaemonSet)
deletedCh := make(chan *ds_fields.DaemonSet)
defer close(quitCh)
defer close(updatedCh)
defer close(deletedCh)
desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)
//
// Verify that 2 pods have been scheduled
//
numNodes := waitForNodes(t, ds, 2, desiresErrCh, dsChangesErrCh)
Assert(t).AreEqual(numNodes, 2, "took too long to schedule")
scheduled = scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 2, "expected a node to have been labeled")
// Mutate the daemon set so that the node is unscheduled, this should not produce an error
mutator := func(dsToChange ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToChange.NodeSelector = klabels.Everything().
Add("nodeQuality", klabels.EqualsOperator, []string{"bad"})
return dsToChange, nil
}
_, err = dsStore.MutateDS(ds.ID(), mutator)
Assert(t).IsNil(err, "Unxpected error trying to mutate daemon set")
select {
case <-time.After(1 * time.Second):
case err := <-desiresErrCh:
t.Fatalf("Unexpected error unscheduling pod: %v", err)
}
numNodes = waitForNodes(t, ds, 0, desiresErrCh, dsChangesErrCh)
Assert(t).AreEqual(numNodes, 0, "took too long to unschedule")
}
开发者ID:petertseng,项目名称:p2,代码行数:98,代码来源:daemon_set_test.go
示例12: TestSchedule
// TestSchedule checks consecutive scheduling and unscheduling for:
// - creation of a daemon set
// - different node selectors
// - changes to nodes allocations
// - mutations to a daemon set
// - deleting a daemon set
func TestSchedule(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Setup fixture and schedule a pod
//
dsStore := dsstoretest.NewFake()
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
timeout := replication.NoTimeout
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, timeout)
Assert(t).IsNil(err, "expected no error creating request")
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
allNodes = append(allNodes, "node1", "node2", "nodeOk")
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("good_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("bad_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
ds := New(
dsData,
dsStore,
kpStore,
applicator,
applicator,
logging.DefaultLogger,
&happyHealthChecker,
0,
false,
).(*daemonSet)
scheduled := scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 0, "expected no pods to have been labeled")
err = waitForPodsInIntent(kpStore, 0)
Assert(t).IsNil(err, "Unexpected number of pods scheduled")
err = applicator.SetLabel(labels.NODE, "node1", "nodeQuality", "bad")
Assert(t).IsNil(err, "expected no error labeling node1")
err = applicator.SetLabel(labels.NODE, "node2", "nodeQuality", "good")
Assert(t).IsNil(err, "expected no error labeling node2")
//
// Adds a watch that will automatically send a signal when a change was made
// to the daemon set
//
quitCh := make(chan struct{})
updatedCh := make(chan *ds_fields.DaemonSet)
deletedCh := make(chan *ds_fields.DaemonSet)
defer close(quitCh)
defer close(updatedCh)
defer close(deletedCh)
desiresErrCh := ds.WatchDesires(quitCh, updatedCh, deletedCh)
dsChangesErrCh := watchDSChanges(ds, dsStore, quitCh, updatedCh, deletedCh)
//
// Verify that the pod has been scheduled
//
numNodes := waitForNodes(t, ds, 1, desiresErrCh, dsChangesErrCh)
Assert(t).AreEqual(numNodes, 1, "took too long to schedule")
scheduled = scheduledPods(t, ds)
Assert(t).AreEqual(len(scheduled), 1, "expected a node to have been labeled")
Assert(t).AreEqual(scheduled[0].ID, "node2/testPod", "expected node labeled with the daemon set's id")
// Verify that the scheduled pod is correct
err = waitForSpecificPod(kpStore, "node2", types.PodID("testPod"))
Assert(t).IsNil(err, "Unexpected pod scheduled")
//
// Add 10 good nodes and 10 bad nodes then verify
//
//.........这里部分代码省略.........
开发者ID:petertseng,项目名称:p2,代码行数:101,代码来源:daemon_set_test.go
注:本文中的github.com/square/p2/pkg/ds/fields.ClusterName函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论