本文整理汇总了Golang中github.com/square/p2/pkg/logging.NewLogger函数的典型用法代码示例。如果您正苦于以下问题:Golang NewLogger函数的具体用法?Golang NewLogger怎么用?Golang NewLogger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewLogger函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: basicLogger
func basicLogger() logging.Logger {
return logging.NewLogger(
logrus.Fields{
"pod": "testpod",
},
)
}
开发者ID:tomzhang,项目名称:p2,代码行数:7,代码来源:common_test_setup.go
示例2: TestUpdatePods
// UpdatePods looks at the pods currently being monitored and
// compares that to what the reality store indicates should be
// running. UpdatePods then shuts down the monitors for dead
// pods and creates PodWatch structs for new pods.
func TestUpdatePods(t *testing.T) {
var current []PodWatch
var reality []kp.ManifestResult
// ids for current: 0, 1, 2, 3
for i := 0; i < 4; i++ {
current = append(current, *newWatch(types.PodID(strconv.Itoa(i))))
}
// ids for reality: 1, 2, test
for i := 1; i < 3; i++ {
// Health checking is not supported for uuid pods, so ensure that even
// if /reality contains a uuid pod we don't actually watch its health
uuidKeyResult := newManifestResult("some_uuid_pod")
uuidKeyResult.PodUniqueKey = types.NewPodUUID()
reality = append(reality, uuidKeyResult)
reality = append(reality, newManifestResult(current[i].manifest.ID()))
}
reality = append(reality, newManifestResult("test"))
// ids for pods: 1, 2, test
// 0, 3 should have values in their shutdownCh
logger := logging.NewLogger(logrus.Fields{})
pods := updatePods(&MockHealthManager{}, nil, nil, current, reality, "", &logger)
Assert(t).AreEqual(true, <-current[0].shutdownCh, "this PodWatch should have been shutdown")
Assert(t).AreEqual(true, <-current[3].shutdownCh, "this PodWatch should have been shutdown")
Assert(t).AreEqual(current[1].manifest.ID(), pods[0].manifest.ID(), "pod with id:1 should have been returned")
Assert(t).AreEqual(current[2].manifest.ID(), pods[1].manifest.ID(), "pod with id:1 should have been returned")
Assert(t).AreEqual("test", string(pods[2].manifest.ID()), "should have added pod with id:test to list")
}
开发者ID:petertseng,项目名称:p2,代码行数:33,代码来源:health_test.go
示例3: main
func main() {
quitCh := make(chan struct{})
_, consulOpts, labeler := flags.ParseWithConsulOptions()
client := kp.NewConsulClient(consulOpts)
logger := logging.NewLogger(logrus.Fields{})
dsStore := dsstore.NewConsul(client, 3, &logger)
kpStore := kp.NewConsulStore(client)
healthChecker := checker.NewConsulHealthChecker(client)
sessions := make(chan string)
go consulutil.SessionManager(api.SessionEntry{
Name: SessionName(),
LockDelay: 5 * time.Second,
Behavior: api.SessionBehaviorDelete,
TTL: "15s",
}, client, sessions, quitCh, logger)
dsf := ds_farm.NewFarm(kpStore, dsStore, labeler, labels.NewConsulApplicator(client, 0), sessions, logger, nil, &healthChecker, 1*time.Second, *useCachePodMatches)
go func() {
// clear lock immediately on ctrl-C
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
<-signals
close(quitCh)
}()
dsf.Start(quitCh)
}
开发者ID:petertseng,项目名称:p2,代码行数:30,代码来源:main.go
示例4: main
func main() {
logger := logging.NewLogger(logrus.Fields{})
configPath := os.Getenv("CONFIG_PATH")
if configPath == "" {
logger.NoFields().Fatalln("No CONFIG_PATH variable was given")
}
preparerConfig, err := preparer.LoadPreparerConfig(configPath)
if err != nil {
logger.WithField("inner_err", err).Fatalln("could not load preparer config")
}
prep, err := preparer.New(preparerConfig, logger)
if err != nil {
logger.WithField("inner_err", err).Fatalln("Could not initialize preparer")
}
defer prep.Close()
logger.WithFields(logrus.Fields{
"starting": true,
"node_name": preparerConfig.NodeName,
"consul": preparerConfig.ConsulAddress,
"hooks_dir": preparerConfig.HooksDirectory,
"status_port": preparerConfig.StatusPort,
"auth_type": preparerConfig.Auth["type"],
"keyring": preparerConfig.Auth["keyring"],
"version": version.VERSION,
}).Infoln("Preparer started successfully")
quitMainUpdate := make(chan struct{})
quitHookUpdate := make(chan struct{})
quitMonitorPodHealth := make(chan struct{})
go prep.WatchForPodManifestsForNode(quitMainUpdate)
go prep.WatchForHooks(quitHookUpdate)
if preparerConfig.StatusPort != 0 {
http.HandleFunc("/_status",
func(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "p2-preparer OK")
})
go http.ListenAndServe(fmt.Sprintf(":%d", preparerConfig.StatusPort), nil)
}
// Launch health checking watch. This watch tracks health of
// all pods on this host and writes the information to consul
go watch.MonitorPodHealth(preparerConfig, &logger, quitMonitorPodHealth)
waitForTermination(logger, quitMainUpdate, quitHookUpdate, quitMonitorPodHealth)
logger.NoFields().Infoln("Terminating")
}
开发者ID:robertabbott,项目名称:p2,代码行数:51,代码来源:main.go
示例5: TestUpdatePods
// UpdatePods looks at the pods currently being monitored and
// compares that to what the reality store indicates should be
// running. UpdatePods then shuts down the monitors for dead
// pods and creates PodWatch structs for new pods.
func TestUpdatePods(t *testing.T) {
var current []PodWatch
var reality []kp.ManifestResult
// ids for current: 0, 1, 2, 3
for i := 0; i < 4; i++ {
current = append(current, *newWatch(strconv.Itoa(i)))
}
// ids for reality: 1, 2, test
for i := 1; i < 3; i++ {
reality = append(reality, newManifestResult(current[i].manifest.ID()))
}
reality = append(reality, newManifestResult("test"))
// ids for pods: 1, 2, test
// 0, 3 should have values in their shutdownCh
logger := logging.NewLogger(logrus.Fields{})
pods := updatePods(&MockHealthManager{}, nil, current, reality, "", &logger)
Assert(t).AreEqual(true, <-current[0].shutdownCh, "this PodWatch should have been shutdown")
Assert(t).AreEqual(true, <-current[3].shutdownCh, "this PodWatch should have been shutdown")
Assert(t).AreEqual(current[1].manifest.ID(), pods[0].manifest.ID(), "pod with id:1 should have been returned")
Assert(t).AreEqual(current[2].manifest.ID(), pods[1].manifest.ID(), "pod with id:1 should have been returned")
Assert(t).AreEqual("test", pods[2].manifest.ID(), "should have added pod with id:test to list")
}
开发者ID:tomzhang,项目名称:p2,代码行数:28,代码来源:health_test.go
示例6: main
func main() {
kingpin.CommandLine.Name = "p2-replicate"
kingpin.CommandLine.Help = `p2-replicate uses the replication package to schedule deployment of a pod across multiple nodes. See the replication package's README and godoc for more information.
Example invocation: p2-replicate --min-nodes 2 helloworld.yaml aws{1,2,3}.example.com
This will take the pod whose manifest is located at helloworld.yaml and
deploy it to the three nodes aws1.example.com, aws2.example.com, and
aws3.example.com
Because of --min-nodes 2, the replicator will ensure that at least two healthy
nodes remain up at all times, according to p2's health checks.
`
kingpin.Version(version.VERSION)
_, opts := flags.ParseWithConsulOptions()
client := kp.NewConsulClient(opts)
store := kp.NewConsulStore(client)
healthChecker := checker.NewConsulHealthChecker(client)
manifest, err := pods.ManifestFromURI(*manifestUri)
if err != nil {
log.Fatalf("%s", err)
}
logger := logging.NewLogger(logrus.Fields{
"pod": manifest.ID(),
})
logger.Logger.Formatter = &logrus.TextFormatter{
DisableTimestamp: false,
FullTimestamp: true,
TimestampFormat: "15:04:05.000",
}
// create a lock with a meaningful name and set up a renewal loop for it
thisHost, err := os.Hostname()
if err != nil {
log.Fatalf("Could not retrieve hostname: %s", err)
}
thisUser, err := user.Current()
if err != nil {
log.Fatalf("Could not retrieve user: %s", err)
}
lockMessage := fmt.Sprintf("%q from %q at %q", thisUser.Username, thisHost, time.Now())
repl, err := replication.NewReplicator(
manifest,
logger,
*hosts,
len(*hosts)-*minNodes,
store,
healthChecker,
health.HealthState(*threshold),
lockMessage,
)
if err != nil {
log.Fatalf("Could not initialize replicator: %s", err)
}
replication, errCh, err := repl.InitializeReplication(*overrideLock)
if err != nil {
log.Fatalf("Unable to initialize replication: %s", err)
}
// auto-drain this channel
go func() {
for range errCh {
}
}()
go func() {
// clear lock immediately on ctrl-C
signals := make(chan os.Signal, 1)
signal.Notify(signals, os.Interrupt)
<-signals
replication.Cancel()
os.Exit(1)
}()
replication.Enact()
}
开发者ID:tomzhang,项目名称:p2,代码行数:80,代码来源:main.go
示例7: main
func main() {
cmd, consulOpts := flags.ParseWithConsulOptions()
client := kp.NewConsulClient(consulOpts)
logger := logging.NewLogger(logrus.Fields{})
dsstore := dsstore.NewConsul(client, 3, &logger)
applicator := labels.NewConsulApplicator(client, 3)
switch cmd {
case CmdCreate:
minHealth, err := strconv.Atoi(*createMinHealth)
if err != nil {
log.Fatalf("Invalid value for minimum health, expected integer: %v", err)
}
name := ds_fields.ClusterName(*createName)
manifest, err := manifest.FromPath(*createManifest)
if err != nil {
log.Fatalf("%s", err)
}
podID := manifest.ID()
if *createTimeout <= time.Duration(0) {
log.Fatalf("Timeout must be a positive non-zero value, got '%v'", *createTimeout)
}
selectorString := *createSelector
if *createEverywhere {
selectorString = klabels.Everything().String()
} else if selectorString == "" {
selectorString = klabels.Nothing().String()
log.Fatal("Explicit everything selector not allowed, please use the --everwhere flag")
}
selector, err := parseNodeSelectorWithPrompt(klabels.Nothing(), selectorString, applicator)
if err != nil {
log.Fatalf("Error occurred: %v", err)
}
if err = confirmMinheathForSelector(minHealth, selector, applicator); err != nil {
log.Fatalf("Error occurred: %v", err)
}
ds, err := dsstore.Create(manifest, minHealth, name, selector, podID, *createTimeout)
if err != nil {
log.Fatalf("err: %v", err)
}
fmt.Printf("%v has been created in consul", ds.ID)
fmt.Println()
case CmdGet:
id := ds_fields.ID(*getID)
ds, _, err := dsstore.Get(id)
if err != nil {
log.Fatalf("err: %v", err)
}
bytes, err := json.Marshal(ds)
if err != nil {
logger.WithError(err).Fatalln("Unable to marshal daemon set as JSON")
}
fmt.Printf("%s", bytes)
case CmdList:
dsList, err := dsstore.List()
if err != nil {
log.Fatalf("err: %v", err)
}
podID := types.PodID(*listPod)
for _, ds := range dsList {
if *listPod == "" || podID == ds.PodID {
fmt.Printf("%s/%s:%s\n", ds.PodID, ds.Name, ds.ID)
}
}
case CmdEnable:
id := ds_fields.ID(*enableID)
mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
if !ds.Disabled {
return ds, util.Errorf("Daemon set has already been enabled")
}
ds.Disabled = false
return ds, nil
}
_, err := dsstore.MutateDS(id, mutator)
if err != nil {
log.Fatalf("err: %v", err)
}
fmt.Printf("The daemon set '%s' has been successfully enabled in consul", id.String())
fmt.Println()
case CmdDisable:
id := ds_fields.ID(*disableID)
mutator := func(ds ds_fields.DaemonSet) (ds_fields.DaemonSet, error) {
if ds.Disabled {
return ds, util.Errorf("Daemon set has already been disabled")
}
ds.Disabled = true
return ds, nil
//.........这里部分代码省略.........
开发者ID:rudle,项目名称:p2,代码行数:101,代码来源:main.go
示例8: init
func init() {
Log = logging.NewLogger(logrus.Fields{})
}
开发者ID:rudle,项目名称:p2,代码行数:3,代码来源:factory.go
示例9: authorize
func authorize(manifest manifest.Manifest) error {
var policy auth.Policy
var err error
switch *authType {
case auth.Null:
if *keyring != "" {
return util.Errorf("--keyring may not be specified if --auth-type is '%s'", *authType)
}
if *deployPolicy != "" {
return util.Errorf("--deploy-policy may not be specified if --auth-type is '%s'", *authType)
}
if len(*allowedUsers) != 0 {
return util.Errorf("--allowed-users may not be specified if --auth-type is '%s'", *authType)
}
return nil
case auth.Keyring:
if *keyring == "" {
return util.Errorf("Must specify --keyring if --auth-type is '%s'", *authType)
}
if len(*allowedUsers) == 0 {
return util.Errorf("Must specify at least one allowed user if using a keyring auth type")
}
policy, err = auth.NewFileKeyringPolicy(
*keyring,
map[types.PodID][]string{
constants.PreparerPodID: *allowedUsers,
},
)
if err != nil {
return err
}
case auth.User:
if *keyring == "" {
return util.Errorf("Must specify --keyring if --auth-type is '%s'", *authType)
}
if *deployPolicy == "" {
return util.Errorf("Must specify --deploy-policy if --auth-type is '%s'", *authType)
}
policy, err = auth.NewUserPolicy(
*keyring,
*deployPolicy,
constants.PreparerPodID,
constants.PreparerPodID.String(),
)
if err != nil {
return err
}
default:
return util.Errorf("Unknown --auth-type: %s", *authType)
}
logger := logging.NewLogger(logrus.Fields{})
logger.Logger.Formatter = new(logrus.TextFormatter)
err = policy.AuthorizeApp(manifest, logger)
if err != nil {
if err, ok := err.(auth.Error); ok {
logger.WithFields(err.Fields).Errorln(err)
} else {
logger.NoFields().Errorln(err)
}
return err
}
return nil
}
开发者ID:petertseng,项目名称:p2,代码行数:69,代码来源:main.go
示例10: main
func main() {
// Parse custom flags + standard Consul routing options
kingpin.Version(version.VERSION)
_, opts := flags.ParseWithConsulOptions()
// Set up the logger
logger := logging.NewLogger(logrus.Fields{})
logger.Logger.Formatter = new(logrus.TextFormatter)
if *logLevel != "" {
lv, err := logrus.ParseLevel(*logLevel)
if err != nil {
logger.WithErrorAndFields(err, logrus.Fields{"level": *logLevel}).
Fatalln("Could not parse log level")
}
logger.Logger.Level = lv
}
// Initialize the myriad of different storage components
httpClient := cleanhttp.DefaultClient()
client := kp.NewConsulClient(opts)
kpStore := kp.NewConsulStore(client)
rcStore := rcstore.NewConsul(client, RetryCount)
rollStore := rollstore.NewConsul(client, nil)
healthChecker := checker.NewConsulHealthChecker(client)
labeler := labels.NewConsulApplicator(client, RetryCount)
var sched scheduler.Scheduler
if *labelEndpoint != "" {
endpoint, err := url.Parse(*labelEndpoint)
if err != nil {
logger.WithErrorAndFields(err, logrus.Fields{
"url": *labelEndpoint,
}).Fatalln("Could not parse URL from label endpoint")
}
httpLabeler, err := labels.NewHttpApplicator(opts.Client, endpoint)
if err != nil {
logger.WithError(err).Fatalln("Could not create label applicator from endpoint")
}
sched = scheduler.NewApplicatorScheduler(httpLabeler)
} else {
sched = scheduler.NewApplicatorScheduler(labeler)
}
// Start acquiring sessions
sessions := make(chan string)
go consulutil.SessionManager(api.SessionEntry{
Name: SessionName(),
LockDelay: 5 * time.Second,
Behavior: api.SessionBehaviorDelete,
TTL: "15s",
}, client, sessions, nil, logger)
pub := stream.NewStringValuePublisher(sessions, "")
alerter := alerting.NewNop()
if *pagerdutyServiceKey != "" {
var err error
alerter, err = alerting.NewPagerduty(*pagerdutyServiceKey, httpClient)
if err != nil {
logger.WithError(err).Fatalln(
"Unable to initialize pagerduty alerter",
)
}
}
// Run the farms!
go rc.NewFarm(
kpStore,
rcStore,
sched,
labeler,
pub.Subscribe().Chan(),
logger,
klabels.Everything(),
alerter,
).Start(nil)
roll.NewFarm(
roll.UpdateFactory{
KPStore: kpStore,
RCStore: rcStore,
HealthChecker: healthChecker,
Labeler: labeler,
Scheduler: sched,
},
kpStore,
rollStore,
rcStore,
pub.Subscribe().Chan(),
logger,
labeler,
klabels.Everything(),
alerter,
).Start(nil)
}
开发者ID:drcapulet,项目名称:p2,代码行数:92,代码来源:main.go
注:本文中的github.com/square/p2/pkg/logging.NewLogger函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论