本文整理汇总了Golang中github.com/square/p2/pkg/scheduler.NewApplicatorScheduler函数的典型用法代码示例。如果您正苦于以下问题:Golang NewApplicatorScheduler函数的具体用法?Golang NewApplicatorScheduler怎么用?Golang NewApplicatorScheduler使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewApplicatorScheduler函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: New
func New(
fields fields.DaemonSet,
dsStore dsstore.Store,
kpStore kp.Store,
applicator Labeler,
watcher LabelWatcher,
logger logging.Logger,
healthChecker *checker.ConsulHealthChecker,
rateLimitInterval time.Duration,
cachedPodMatch bool,
) DaemonSet {
return &daemonSet{
DaemonSet: fields,
dsStore: dsStore,
kpStore: kpStore,
logger: logger,
applicator: applicator,
watcher: watcher,
scheduler: scheduler.NewApplicatorScheduler(applicator),
healthChecker: healthChecker,
currentReplication: nil,
rateLimitInterval: rateLimitInterval,
cachedPodMatch: cachedPodMatch,
}
}
开发者ID:rudle,项目名称:p2,代码行数:26,代码来源:daemon_set.go
示例2: NewFarm
func NewFarm(
kpStore kp.Store,
dsStore dsstore.Store,
applicator labels.Applicator,
sessions <-chan string,
logger logging.Logger,
alerter alerting.Alerter,
healthChecker *checker.ConsulHealthChecker,
rateLimitInterval time.Duration,
) *Farm {
if alerter == nil {
alerter = alerting.NewNop()
}
return &Farm{
kpStore: kpStore,
dsStore: dsStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
sessions: sessions,
children: make(map[fields.ID]*childDS),
logger: logger,
alerter: alerter,
healthChecker: healthChecker,
rateLimitInterval: rateLimitInterval,
}
}
开发者ID:drcapulet,项目名称:p2,代码行数:27,代码来源:farm.go
示例3: NewFarm
func NewFarm(
store store,
dsStore dsstore.Store,
labeler Labeler,
watcher LabelWatcher,
sessions <-chan string,
logger logging.Logger,
alerter alerting.Alerter,
healthChecker *checker.ConsulHealthChecker,
rateLimitInterval time.Duration,
cachedPodMatch bool,
) *Farm {
if alerter == nil {
alerter = alerting.NewNop()
}
return &Farm{
store: store,
dsStore: dsStore,
scheduler: scheduler.NewApplicatorScheduler(labeler),
labeler: labeler,
watcher: watcher,
sessions: sessions,
children: make(map[fields.ID]*childDS),
logger: logger,
alerter: alerter,
healthChecker: healthChecker,
rateLimitInterval: rateLimitInterval,
cachedPodMatch: cachedPodMatch,
}
}
开发者ID:petertseng,项目名称:p2,代码行数:31,代码来源:farm.go
示例4: main
func main() {
kingpin.Version(version.VERSION)
cmd, opts, labeler := flags.ParseWithConsulOptions()
logger := logging.NewLogger(logrus.Fields{})
if *logJSON {
logger.Logger.Formatter = &logrus.JSONFormatter{}
} else {
logger.Logger.Formatter = &logrus.TextFormatter{}
}
if *logLevel != "" {
lv, err := logrus.ParseLevel(*logLevel)
if err != nil {
logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).Fatalln("Could not parse log level")
}
logger.Logger.Level = lv
}
httpClient := cleanhttp.DefaultClient()
client := kp.NewConsulClient(opts)
sched := scheduler.NewApplicatorScheduler(labeler)
rctl := rctlParams{
httpClient: httpClient,
baseClient: client,
rcs: rcstore.NewConsul(client, labeler, 3),
rls: rollstore.NewConsul(client, labeler, nil),
kps: kp.NewConsulStore(client),
labeler: labeler,
sched: sched,
hcheck: checker.NewConsulHealthChecker(client),
logger: logger,
}
switch cmd {
case cmdCreateText:
rctl.Create(*createManifest, *createNodeSel, *createPodLabels, *createRCLabels)
case cmdDeleteText:
rctl.Delete(*deleteID, *deleteForce)
case cmdReplicasText:
rctl.SetReplicas(*replicasID, *replicasNum)
case cmdListText:
rctl.List(*listJSON)
case cmdGetText:
rctl.Get(*getID, *getManifest)
case cmdEnableText:
rctl.Enable(*enableID)
case cmdDisableText:
rctl.Disable(*disableID)
case cmdRollText:
rctl.RollingUpdate(*rollOldID, *rollNewID, *rollWant, *rollNeed, *rollPagerdutyServiceKey)
case cmdSchedupText:
rctl.ScheduleUpdate(*schedupOldID, *schedupNewID, *schedupWant, *schedupNeed)
}
}
开发者ID:petertseng,项目名称:p2,代码行数:55,代码来源:main.go
示例5: setup
func setup(t *testing.T) (
rcStore rcstore.Store,
kpStore fakeKpStore,
applicator labels.Applicator,
rc *replicationController,
alerter *alertingtest.AlertRecorder,
) {
rcStore = rcstore.NewFake()
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID("testPod")
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add("nodeQuality", klabels.EqualsOperator, []string{"good"})
podLabels := map[string]string{"podTest": "successful"}
rcData, err := rcStore.Create(podManifest, nodeSelector, podLabels)
Assert(t).IsNil(err, "expected no error creating request")
kpStore = fakeKpStore{manifests: make(map[string]manifest.Manifest)}
applicator = labels.NewFakeApplicator()
alerter = alertingtest.NewRecorder()
rc = New(
rcData,
&kpStore,
rcStore,
scheduler.NewApplicatorScheduler(applicator),
applicator,
logging.DefaultLogger,
alerter,
).(*replicationController)
return
}
开发者ID:drcapulet,项目名称:p2,代码行数:36,代码来源:replication_controller_test.go
示例6: TestMultipleFarms
func TestMultipleFarms(t *testing.T) {
retryInterval = testFarmRetryInterval
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
session := kptest.NewSession()
firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "firstMultiple",
})
var allNodes []types.NodeName
allNodes = append(allNodes, "node1", "node2", "node3")
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("good_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
//
// Instantiate first farm
//
firstFarm := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: session,
logger: firstLogger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
firstQuitCh := make(chan struct{})
defer close(firstQuitCh)
go func() {
go firstFarm.cleanupDaemonSetPods(firstQuitCh)
firstFarm.mainLoop(firstQuitCh)
}()
//
// Instantiate second farm
//
secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "secondMultiple",
})
secondFarm := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: session,
logger: secondLogger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
secondQuitCh := make(chan struct{})
defer close(secondQuitCh)
go func() {
go secondFarm.cleanupDaemonSetPods(secondQuitCh)
secondFarm.mainLoop(secondQuitCh)
}()
// Make two daemon sets with difference node selectors
// First daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
// Second daemon set
anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
// Make a node and verify that it was scheduled by the first daemon set
applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")
labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
dsID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
// Make a second node and verify that it was scheduled by the second daemon set
applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2")
labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例7: TestCleanupPods
func TestCleanupPods(t *testing.T) {
retryInterval = testFarmRetryInterval
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
// Make some dangling pod labels and instantiate a farm and expect it clean it up
podID := types.PodID("testPod")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
var allNodes []types.NodeName
allNodes = append(allNodes)
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
err := applicator.SetLabel(labels.POD, id, DSIDLabel, "impossible_id")
Assert(t).IsNil(err, "Expected no error labeling node")
_, err = kpStore.SetPod(kp.INTENT_TREE, types.NodeName(nodeName), podManifest)
Assert(t).IsNil(err, "Expected no error added pod to intent tree")
}
// Assert that precondition is true
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
labeled, err := applicator.GetLabels(labels.POD, id)
Assert(t).IsNil(err, "Expected no error getting labels")
Assert(t).IsTrue(labeled.Labels.Has(DSIDLabel), "Precondition failed: Pod must have a dsID label")
_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
Assert(t).IsNil(err, "Expected no error getting pod from intent store")
Assert(t).AreNotEqual(err, pods.NoCurrentManifest, "Precondition failed: Pod was not in intent store")
}
// Instantiate farm
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "cleanupPods",
})
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
// Make there are no nodes left
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
_, err := waitForPodLabel(applicator, false, id)
Assert(t).IsNil(err, "Expected pod not to have a dsID label")
condition := func() error {
_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
if err != pods.NoCurrentManifest {
return util.Errorf("Expected pod to be deleted in intent store")
}
return nil
}
err = waitForCondition(condition)
Assert(t).IsNil(err, "Error cleaning up pods")
}
}
开发者ID:drcapulet,项目名称:p2,代码行数:88,代码来源:farm_test.go
示例8: TestContendNodes
// Tests dsContends for changes to both daemon sets and nodes
func TestContendNodes(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Instantiate farm
//
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "contendNodes",
})
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
allNodes = append(allNodes, "node1")
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
//
// Check for contention between two daemon sets among their nodes
//
// Make a daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, dsData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
// Make a node and verify that it was scheduled
applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")
labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
dsID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
// Make another daemon set with a contending AvailabilityZoneLabel and verify
// that it gets disabled and that the node label does not change
anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).AreNotEqual(dsData.ID.String(), anotherDSData.ID.String(), "Precondition failed")
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, anotherDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
anotherDSID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")
// Expect the new daemon set to be disabled both in the farm and in the dsStore
err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
Assert(t).IsNil(err, "Error disabling daemon set!")
//
// Make a third daemon set and update its node selector to force a contend,
// then verify that it has been disabled and the node hasn't been overwritten
//
anotherSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"undefined"})
badDS, err := dsStore.Create(podManifest, minHealth, clusterName, anotherSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, badDS.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.NodeSelector = nodeSelector
return dsToUpdate, nil
}
badDS, err = dsStore.MutateDS(badDS.ID, mutator)
Assert(t).IsNil(err, "Expected no error mutating daemon set")
err = waitForMutateSelector(dsf, badDS)
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例9: TestContendSelectors
// Tests dsContends for NodeSelectors
func TestContendSelectors(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Instantiate farm
//
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "contendSelectors",
})
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
//
// Make two daemon sets with a everything selector and verify that they trivially
// contend and that only the second daemon set gets disabled
//
// Make a daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
everythingSelector := klabels.Everything()
firstDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, firstDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
secondDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, secondDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
// Verify that only the second daemon set is disabled
err = waitForDisabled(dsf, dsStore, firstDSData.ID, false)
Assert(t).IsNil(err, "First daemon set should not be disabled")
err = waitForDisabled(dsf, dsStore, secondDSData.ID, true)
Assert(t).IsNil(err, "Error disabling second daemon set")
// Add another daemon set with different selector and verify it gets disabled
someSelector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"nowhere"})
thirdDSData, err := dsStore.Create(podManifest, minHealth, clusterName, someSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, thirdDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
Assert(t).IsNil(err, "Error disabling third daemon set")
//
// Disable first daemon set, then enable second and third daemon sets in that order
// and then there should be a contend on the third daemon set
//
disableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.Disabled = true
return dsToUpdate, nil
}
enableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.Disabled = false
return dsToUpdate, nil
}
// Disable first ds and verify it is disabled
_, err = dsStore.MutateDS(firstDSData.ID, disableMutator)
Assert(t).IsNil(err, "Expected no error getting daemon set")
err = waitForDisabled(dsf, dsStore, firstDSData.ID, true)
Assert(t).IsNil(err, "Error disabling first daemon set")
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例10: main
func main() {
// Parse custom flags + standard Consul routing options
kingpin.Version(version.VERSION)
_, opts := flags.ParseWithConsulOptions()
// Set up the logger
logger := logging.NewLogger(logrus.Fields{})
logger.Logger.Formatter = new(logrus.TextFormatter)
if *logLevel != "" {
lv, err := logrus.ParseLevel(*logLevel)
if err != nil {
logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).
Fatalln("Could not parse log level")
}
logger.Logger.Level = lv
}
// Initialize the myriad of different storage components
httpClient := cleanhttp.DefaultClient()
client := kp.NewConsulClient(opts)
kpStore := kp.NewConsulStore(client)
rcStore := rcstore.NewConsul(client, RetryCount)
rollStore := rollstore.NewConsul(client, nil)
healthChecker := checker.NewConsulHealthChecker(client)
labeler := labels.NewConsulApplicator(client, RetryCount)
var sched scheduler.Scheduler
if *labelEndpoint != "" {
endpoint, err := url.Parse(*labelEndpoint)
if err != nil {
logger.WithErrorAndFields(err, logrus.Fields{
"url": *labelEndpoint,
}).Fatalln("Could not parse URL from label endpoint")
}
httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint)
if err != nil {
logger.WithError(err).Fatalln("Could not create label applicator from endpoint")
}
sched = scheduler.NewApplicatorScheduler(httpLabeler)
} else {
sched = scheduler.NewApplicatorScheduler(labeler)
}
// Start acquiring sessions
sessions := make(chan string)
go consulutil.SessionManager(api.SessionEntry{
Name: SessionName(),
LockDelay: 5 * time.Second,
Behavior: api.SessionBehaviorDelete,
TTL: "15s",
}, client, sessions, nil, logger)
pub := stream.NewStringValuePublisher(sessions, "")
alerter := alerting.NewNop()
if *pagerdutyServiceKey != "" {
var err error
alerter, err = alerting.NewPagerduty(*pagerdutyServiceKey, httpClient)
if err != nil {
logger.WithError(err).Fatalln(
"Unable to initialize pagerduty alerter",
)
}
}
// Run the farms!
go rc.NewFarm(
kpStore,
rcStore,
sched,
labeler,
pub.Subscribe().Chan(),
logger,
klabels.Everything(),
alerter,
).Start(nil)
roll.NewFarm(
roll.UpdateFactory{
KPStore: kpStore,
RCStore: rcStore,
HealthChecker: healthChecker,
Labeler: labeler,
Scheduler: sched,
},
kpStore,
rollStore,
rcStore,
pub.Subscribe().Chan(),
logger,
labeler,
klabels.Everything(),
alerter,
).Start(nil)
}
开发者ID:drcapulet,项目名称:p2,代码行数:92,代码来源:main.go
注:本文中的github.com/square/p2/pkg/scheduler.NewApplicatorScheduler函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论