本文整理汇总了Golang中github.com/square/p2/pkg/alerting.NewNop函数的典型用法代码示例。如果您正苦于以下问题:Golang NewNop函数的具体用法?Golang NewNop怎么用?Golang NewNop使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewNop函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: NewFarm
func NewFarm(
kpStore kp.Store,
rcs rcstore.Store,
scheduler scheduler.Scheduler,
labeler labels.Applicator,
sessions <-chan string,
logger logging.Logger,
rcSelector klabels.Selector,
alerter alerting.Alerter,
) *Farm {
if alerter == nil {
alerter = alerting.NewNop()
}
return &Farm{
kpStore: kpStore,
rcStore: rcs,
scheduler: scheduler,
labeler: labeler,
sessions: sessions,
logger: logger,
children: make(map[fields.ID]childRC),
alerter: alerter,
rcSelector: rcSelector,
metrics: rcmetrics.NewMetrics(logger),
}
}
开发者ID:drcapulet,项目名称:p2,代码行数:27,代码来源:farm.go
示例2: New
func (f UpdateFactory) New(u roll_fields.Update, l logging.Logger, session kp.Session, alerter alerting.Alerter) Update {
if alerter == nil {
alerter = alerting.NewNop()
}
return NewUpdate(u, f.KPStore, f.RCStore, f.HealthChecker, f.Labeler, f.Scheduler, l, session, alerter)
}
开发者ID:rudle,项目名称:p2,代码行数:7,代码来源:farm.go
示例3: NewFarm
func NewFarm(
factory Factory,
kps kp.Store,
rls rollstore.Store,
rcs RCGetter,
sessions <-chan string,
logger logging.Logger,
labeler rc.Labeler,
rcSelector klabels.Selector,
alerter alerting.Alerter,
) *Farm {
if alerter == nil {
alerter = alerting.NewNop()
}
return &Farm{
factory: factory,
kps: kps,
rls: rls,
rcs: rcs,
sessions: sessions,
logger: logger,
children: make(map[roll_fields.ID]childRU),
labeler: labeler,
rcSelector: rcSelector,
alerter: alerter,
}
}
开发者ID:rudle,项目名称:p2,代码行数:27,代码来源:farm.go
示例4: NewFarm
func NewFarm(
store store,
rcs rcstore.Store,
scheduler scheduler.Scheduler,
labeler Labeler,
sessions <-chan string,
logger logging.Logger,
rcSelector klabels.Selector,
alerter alerting.Alerter,
rcWatchPauseTime time.Duration,
) *Farm {
if alerter == nil {
alerter = alerting.NewNop()
}
return &Farm{
store: store,
rcStore: rcs,
scheduler: scheduler,
labeler: labeler,
sessions: sessions,
logger: logger,
children: make(map[fields.ID]childRC),
alerter: alerter,
rcSelector: rcSelector,
rcWatchPauseTime: rcWatchPauseTime,
}
}
开发者ID:petertseng,项目名称:p2,代码行数:28,代码来源:farm.go
示例5: NewUpdate
// Create a new Update. The kp.Store, rcstore.Store, labels.Applicator and
// scheduler.Scheduler arguments should be the same as those of the RCs themselves. The
// session must be valid for the lifetime of the Update; maintaining this is the
// responsibility of the caller.
func NewUpdate(
f fields.Update,
kps kp.Store,
rcs rcstore.Store,
hcheck checker.ConsulHealthChecker,
labeler rc.Labeler,
sched scheduler.Scheduler,
logger logging.Logger,
session kp.Session,
alerter alerting.Alerter,
) Update {
if alerter == nil {
alerter = alerting.NewNop()
}
logger = logger.SubLogger(logrus.Fields{
"desired_replicas": f.DesiredReplicas,
"minimum_replicas": f.MinimumReplicas,
})
return &update{
Update: f,
kps: kps,
rcs: rcs,
hcheck: hcheck,
labeler: labeler,
sched: sched,
logger: logger,
session: session,
alerter: alerter,
}
}
开发者ID:rudle,项目名称:p2,代码行数:35,代码来源:run_update.go
示例6: New
func New(
fields fields.RC,
kpStore kpStore,
rcStore rcstore.Store,
scheduler scheduler.Scheduler,
podApplicator labels.Applicator,
logger logging.Logger,
alerter alerting.Alerter,
) ReplicationController {
if alerter == nil {
alerter = alerting.NewNop()
}
return &replicationController{
RC: fields,
logger: logger,
kpStore: kpStore,
rcStore: rcStore,
scheduler: scheduler,
podApplicator: podApplicator,
alerter: alerter,
}
}
开发者ID:drcapulet,项目名称:p2,代码行数:24,代码来源:replication_controller.go
示例7: TestMultipleFarms
func TestMultipleFarms(t *testing.T) {
retryInterval = testFarmRetryInterval
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
session := kptest.NewSession()
firstLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "firstMultiple",
})
var allNodes []types.NodeName
allNodes = append(allNodes, "node1", "node2", "node3")
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("good_node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
//
// Instantiate first farm
//
firstFarm := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: session,
logger: firstLogger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
firstQuitCh := make(chan struct{})
defer close(firstQuitCh)
go func() {
go firstFarm.cleanupDaemonSetPods(firstQuitCh)
firstFarm.mainLoop(firstQuitCh)
}()
//
// Instantiate second farm
//
secondLogger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "secondMultiple",
})
secondFarm := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: session,
logger: secondLogger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
secondQuitCh := make(chan struct{})
defer close(secondQuitCh)
go func() {
go secondFarm.cleanupDaemonSetPods(secondQuitCh)
secondFarm.mainLoop(secondQuitCh)
}()
// Make two daemon sets with difference node selectors
// First daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
// Second daemon set
anotherNodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az2"})
anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, anotherNodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
// Make a node and verify that it was scheduled by the first daemon set
applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")
labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
dsID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
// Make a second node and verify that it was scheduled by the second daemon set
applicator.SetLabel(labels.NODE, "node2", pc_fields.AvailabilityZoneLabel, "az2")
labeled, err = waitForPodLabel(applicator, true, "node2/testPod")
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例8: TestCleanupPods
func TestCleanupPods(t *testing.T) {
retryInterval = testFarmRetryInterval
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
// Make some dangling pod labels and instantiate a farm and expect it clean it up
podID := types.PodID("testPod")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
var allNodes []types.NodeName
allNodes = append(allNodes)
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
allNodes = append(allNodes, types.NodeName(nodeName))
}
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
err := applicator.SetLabel(labels.POD, id, DSIDLabel, "impossible_id")
Assert(t).IsNil(err, "Expected no error labeling node")
_, err = kpStore.SetPod(kp.INTENT_TREE, types.NodeName(nodeName), podManifest)
Assert(t).IsNil(err, "Expected no error added pod to intent tree")
}
// Assert that precondition is true
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
labeled, err := applicator.GetLabels(labels.POD, id)
Assert(t).IsNil(err, "Expected no error getting labels")
Assert(t).IsTrue(labeled.Labels.Has(DSIDLabel), "Precondition failed: Pod must have a dsID label")
_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
Assert(t).IsNil(err, "Expected no error getting pod from intent store")
Assert(t).AreNotEqual(err, pods.NoCurrentManifest, "Precondition failed: Pod was not in intent store")
}
// Instantiate farm
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "cleanupPods",
})
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
// Make there are no nodes left
for i := 0; i < 10; i++ {
nodeName := fmt.Sprintf("node%v", i)
id := labels.MakePodLabelKey(types.NodeName(nodeName), podID)
_, err := waitForPodLabel(applicator, false, id)
Assert(t).IsNil(err, "Expected pod not to have a dsID label")
condition := func() error {
_, _, err = kpStore.Pod(kp.INTENT_TREE, types.NodeName(nodeName), podID)
if err != pods.NoCurrentManifest {
return util.Errorf("Expected pod to be deleted in intent store")
}
return nil
}
err = waitForCondition(condition)
Assert(t).IsNil(err, "Error cleaning up pods")
}
}
开发者ID:drcapulet,项目名称:p2,代码行数:88,代码来源:farm_test.go
示例9: TestContendNodes
// Tests dsContends for changes to both daemon sets and nodes
func TestContendNodes(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Instantiate farm
//
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "contendNodes",
})
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
allNodes = append(allNodes, "node1")
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
//
// Check for contention between two daemon sets among their nodes
//
// Make a daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
nodeSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"az1"})
dsData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, dsData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
// Make a node and verify that it was scheduled
applicator.SetLabel(labels.NODE, "node1", pc_fields.AvailabilityZoneLabel, "az1")
labeled, err := waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
dsID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), dsID, "Unexpected dsID labeled")
// Make another daemon set with a contending AvailabilityZoneLabel and verify
// that it gets disabled and that the node label does not change
anotherDSData, err := dsStore.Create(podManifest, minHealth, clusterName, nodeSelector, podID, replicationTimeout)
Assert(t).AreNotEqual(dsData.ID.String(), anotherDSData.ID.String(), "Precondition failed")
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, anotherDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
labeled, err = waitForPodLabel(applicator, true, "node1/testPod")
Assert(t).IsNil(err, "Expected pod to have a dsID label")
anotherDSID := labeled.Labels.Get(DSIDLabel)
Assert(t).AreEqual(dsData.ID.String(), anotherDSID, "Expected pod label not to be overwritten")
// Expect the new daemon set to be disabled both in the farm and in the dsStore
err = waitForDisabled(dsf, dsStore, anotherDSData.ID, true)
Assert(t).IsNil(err, "Error disabling daemon set!")
//
// Make a third daemon set and update its node selector to force a contend,
// then verify that it has been disabled and the node hasn't been overwritten
//
anotherSelector := klabels.Everything().Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"undefined"})
badDS, err := dsStore.Create(podManifest, minHealth, clusterName, anotherSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, badDS.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
mutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.NodeSelector = nodeSelector
return dsToUpdate, nil
}
badDS, err = dsStore.MutateDS(badDS.ID, mutator)
Assert(t).IsNil(err, "Expected no error mutating daemon set")
err = waitForMutateSelector(dsf, badDS)
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例10: TestContendSelectors
// Tests dsContends for NodeSelectors
func TestContendSelectors(t *testing.T) {
retryInterval = testFarmRetryInterval
//
// Instantiate farm
//
dsStore := dsstoretest.NewFake()
kpStore := kptest.NewFakePodStore(make(map[kptest.FakePodStoreKey]manifest.Manifest), make(map[string]kp.WatchResult))
applicator := labels.NewFakeApplicator()
logger := logging.DefaultLogger.SubLogger(logrus.Fields{
"farm": "contendSelectors",
})
preparer := kptest.NewFakePreparer(kpStore, logging.DefaultLogger)
preparer.Enable()
defer preparer.Disable()
var allNodes []types.NodeName
happyHealthChecker := fake_checker.HappyHealthChecker(allNodes)
dsf := &Farm{
dsStore: dsStore,
kpStore: kpStore,
scheduler: scheduler.NewApplicatorScheduler(applicator),
applicator: applicator,
children: make(map[ds_fields.ID]*childDS),
session: kptest.NewSession(),
logger: logger,
alerter: alerting.NewNop(),
healthChecker: &happyHealthChecker,
}
quitCh := make(chan struct{})
defer close(quitCh)
go func() {
go dsf.cleanupDaemonSetPods(quitCh)
dsf.mainLoop(quitCh)
}()
//
// Make two daemon sets with a everything selector and verify that they trivially
// contend and that only the second daemon set gets disabled
//
// Make a daemon set
podID := types.PodID("testPod")
minHealth := 0
clusterName := ds_fields.ClusterName("some_name")
manifestBuilder := manifest.NewBuilder()
manifestBuilder.SetID(podID)
podManifest := manifestBuilder.GetManifest()
everythingSelector := klabels.Everything()
firstDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, firstDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
secondDSData, err := dsStore.Create(podManifest, minHealth, clusterName, everythingSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, secondDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
// Verify that only the second daemon set is disabled
err = waitForDisabled(dsf, dsStore, firstDSData.ID, false)
Assert(t).IsNil(err, "First daemon set should not be disabled")
err = waitForDisabled(dsf, dsStore, secondDSData.ID, true)
Assert(t).IsNil(err, "Error disabling second daemon set")
// Add another daemon set with different selector and verify it gets disabled
someSelector := klabels.Everything().
Add(pc_fields.AvailabilityZoneLabel, klabels.EqualsOperator, []string{"nowhere"})
thirdDSData, err := dsStore.Create(podManifest, minHealth, clusterName, someSelector, podID, replicationTimeout)
Assert(t).IsNil(err, "Expected no error creating request")
err = waitForCreate(dsf, thirdDSData.ID)
Assert(t).IsNil(err, "Expected daemon set to be created")
err = waitForDisabled(dsf, dsStore, thirdDSData.ID, true)
Assert(t).IsNil(err, "Error disabling third daemon set")
//
// Disable first daemon set, then enable second and third daemon sets in that order
// and then there should be a contend on the third daemon set
//
disableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.Disabled = true
return dsToUpdate, nil
}
enableMutator := func(dsToUpdate ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
dsToUpdate.Disabled = false
return dsToUpdate, nil
}
// Disable first ds and verify it is disabled
_, err = dsStore.MutateDS(firstDSData.ID, disableMutator)
Assert(t).IsNil(err, "Expected no error getting daemon set")
err = waitForDisabled(dsf, dsStore, firstDSData.ID, true)
Assert(t).IsNil(err, "Error disabling first daemon set")
//.........这里部分代码省略.........
开发者ID:drcapulet,项目名称:p2,代码行数:101,代码来源:farm_test.go
示例11: RollingUpdate
func (r rctlParams) RollingUpdate(oldID, newID string, want, need int, pagerdutyServiceKey string) {
if want < need {
r.logger.WithFields(logrus.Fields{
"want": want,
"need": need,
}).Fatalln("Cannot run update with desired replicas less than minimum replicas")
}
sessions := make(chan string)
quit := make(chan struct{})
go consulutil.SessionManager(api.SessionEntry{
Name: SessionName(),
LockDelay: 5 * time.Second,
Behavior: api.SessionBehaviorDelete,
TTL: "15s",
}, r.baseClient, sessions, quit, r.logger)
sessionID := <-sessions
if sessionID == "" {
r.logger.NoFields().Fatalln("Could not acquire session")
}
session := r.kps.NewUnmanagedSession(sessionID, "")
alerter := alerting.NewNop()
if pagerdutyServiceKey != "" {
var err error
alerter, err = alerting.NewPagerduty(pagerdutyServiceKey, r.httpClient)
if err != nil {
r.logger.WithError(err).Fatalln("Could not initialize pagerduty alerter")
}
}
result := make(chan bool, 1)
go func() {
result <- roll.NewUpdate(roll_fields.Update{
OldRC: rc_fields.ID(oldID),
NewRC: rc_fields.ID(newID),
DesiredReplicas: want,
MinimumReplicas: need,
}, r.kps, r.rcs, r.hcheck, r.labeler, r.sched, r.logger, session, alerter).Run(quit)
close(result)
}()
signals := make(chan os.Signal, 2)
signal.Notify(signals, syscall.SIGTERM, os.Interrupt)
LOOP:
for {
select {
case <-signals:
// try to clean up locks on ^C
close(quit)
// do not exit right away - the session and result channels will be
// closed after the quit is requested, ensuring that the locks held
// by the farm were released.
r.logger.NoFields().Errorln("Got signal, exiting")
case <-sessions:
r.logger.NoFields().Fatalln("Lost session")
case res := <-result:
// done, either due to ^C (already printed message above) or
// clean finish
if res {
r.logger.NoFields().Infoln("Done")
}
break LOOP
}
}
}
开发者ID:petertseng,项目名称:p2,代码行数:68,代码来源:main.go
示例12: main
func main() {
// Parse custom flags + standard Consul routing options
kingpin.Version(version.VERSION)
_, opts := flags.ParseWithConsulOptions()
// Set up the logger
logger := logging.NewLogger(logrus.Fields{})
logger.Logger.Formatter = new(logrus.TextFormatter)
if *logLevel != "" {
lv, err := logrus.ParseLevel(*logLevel)
if err != nil {
logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).
Fatalln("Could not parse log level")
}
logger.Logger.Level = lv
}
// Initialize the myriad of different storage components
httpClient := cleanhttp.DefaultClient()
client := kp.NewConsulClient(opts)
kpStore := kp.NewConsulStore(client)
rcStore := rcstore.NewConsul(client, RetryCount)
rollStore := rollstore.NewConsul(client, nil)
healthChecker := checker.NewConsulHealthChecker(client)
labeler := labels.NewConsulApplicator(client, RetryCount)
var sched scheduler.Scheduler
if *labelEndpoint != "" {
endpoint, err := url.Parse(*labelEndpoint)
if err != nil {
logger.WithErrorAndFields(err, logrus.Fields{
"url": *labelEndpoint,
}).Fatalln("Could not parse URL from label endpoint")
}
httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint)
if err != nil {
logger.WithError(err).Fatalln("Could not create label applicator from endpoint")
}
sched = scheduler.NewApplicatorScheduler(httpLabeler)
} else {
sched = scheduler.NewApplicatorScheduler(labeler)
}
// Start acquiring sessions
sessions := make(chan string)
go consulutil.SessionManager(api.SessionEntry{
Name: SessionName(),
LockDelay: 5 * time.Second,
Behavior: api.SessionBehaviorDelete,
TTL: "15s",
}, client, sessions, nil, logger)
pub := stream.NewStringValuePublisher(sessions, "")
alerter := alerting.NewNop()
if *pagerdutyServiceKey != "" {
var err error
alerter, err = alerting.NewPagerduty(*pagerdutyServiceKey, httpClient)
if err != nil {
logger.WithError(err).Fatalln(
"Unable to initialize pagerduty alerter",
)
}
}
// Run the farms!
go rc.NewFarm(
kpStore,
rcStore,
sched,
labeler,
pub.Subscribe().Chan(),
logger,
klabels.Everything(),
alerter,
).Start(nil)
roll.NewFarm(
roll.UpdateFactory{
KPStore: kpStore,
RCStore: rcStore,
HealthChecker: healthChecker,
Labeler: labeler,
Scheduler: sched,
},
kpStore,
rollStore,
rcStore,
pub.Subscribe().Chan(),
logger,
labeler,
klabels.Everything(),
alerter,
).Start(nil)
}
开发者ID:drcapulet,项目名称:p2,代码行数:92,代码来源:main.go
注:本文中的github.com/square/p2/pkg/alerting.NewNop函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论