本文整理汇总了Golang中github.com/Shopify/sarama.NewClient函数的典型用法代码示例。如果您正苦于以下问题:Golang NewClient函数的具体用法?Golang NewClient怎么用?Golang NewClient使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewClient函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: NewKafka
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
pubClient, _ := sarama.NewClient("pub", []string{"localhost:9092"}, sarama.NewClientConfig())
subClient, _ := sarama.NewClient("sub", []string{"localhost:9092"}, sarama.NewClientConfig())
topic := "test"
pub, _ := sarama.NewProducer(pubClient, sarama.NewProducerConfig())
consumerConfig := sarama.NewConsumerConfig()
consumerConfig.OffsetMethod = sarama.OffsetMethodNewest // Only read new messages
consumerConfig.DefaultFetchSize = 10 * 1024 * 1024
sub, _ := sarama.NewConsumer(subClient, topic, 0, "test", consumerConfig)
var handler benchmark.MessageHandler
if testLatency {
handler = &benchmark.LatencyMessageHandler{
NumberOfMessages: numberOfMessages,
Latencies: []float32{},
}
} else {
handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
}
return &Kafka{
handler: handler,
pubClient: pubClient,
subClient: subClient,
pub: pub,
sub: sub,
topic: topic,
}
}
开发者ID:hitomi333,项目名称:mq-benchmarking,代码行数:30,代码来源:kafka.go
示例2: main
func main() {
client, err := sarama.NewClient("a_logger_for_mhub", []string{"localhost:9092"}, nil)
if err != nil {
panic(err)
} else {
os.Stderr.WriteString("> connected\n")
}
defer client.Close()
consumer, err := sarama.NewConsumer(client, "received", 0, "", nil)
if err != nil {
panic(err)
} else {
os.Stderr.WriteString("> consumer ready\n")
}
defer consumer.Close()
for {
select {
case event := <-consumer.Events():
if event.Err != nil {
panic(event.Err)
}
fmt.Println(utf8.FullRune(event.Value))
}
}
}
开发者ID:kaiserprogrammer,项目名称:mhub,代码行数:27,代码来源:main.go
示例3: NewPeer
// NewPeer creates and returns a new Peer for communicating with Kafka.
func NewPeer(host string) (*Peer, error) {
host = strings.Split(host, ":")[0] + ":9092"
config := sarama.NewConfig()
client, err := sarama.NewClient([]string{host}, config)
if err != nil {
return nil, err
}
producer, err := sarama.NewAsyncProducer([]string{host}, config)
if err != nil {
return nil, err
}
consumer, err := sarama.NewConsumer([]string{host}, config)
if err != nil {
return nil, err
}
partitionConsumer, err := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
if err != nil {
return nil, err
}
return &Peer{
client: client,
producer: producer,
consumer: partitionConsumer,
send: make(chan []byte),
errors: make(chan error, 1),
done: make(chan bool),
}, nil
}
开发者ID:huikang,项目名称:Flotilla,代码行数:33,代码来源:kafka.go
示例4: clusterSummary
func (this *Topics) clusterSummary(zkcluster *zk.ZkCluster) []topicSummary {
r := make([]topicSummary, 0, 10)
kfk, err := sarama.NewClient(zkcluster.BrokerList(), saramaConfig())
if err != nil {
this.Ui.Error(err.Error())
return nil
}
defer kfk.Close()
topicInfos, _ := kfk.Topics()
for _, t := range topicInfos {
flat := int64(0)
cum := int64(0)
alivePartitions, _ := kfk.WritablePartitions(t)
for _, partitionID := range alivePartitions {
latestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetNewest)
oldestOffset, _ := kfk.GetOffset(t, partitionID, sarama.OffsetOldest)
flat += (latestOffset - oldestOffset)
cum += latestOffset
}
r = append(r, topicSummary{zkcluster.ZkZone().Name(), zkcluster.Name(), t, len(alivePartitions), flat, cum})
}
return r
}
开发者ID:funkygao,项目名称:gafka,代码行数:27,代码来源:topics.go
示例5: NewKafkaDeliver
func NewKafkaDeliver(store *Store, clientId string, brokerList []string) (*KafkaDeliver, error) {
log.Println("go=kafka at=new-kafka-deliver")
clientConfig := sarama.NewClientConfig()
producerConfig := sarama.NewProducerConfig()
client, err := sarama.NewClient(clientId, brokerList, clientConfig)
if err != nil {
return nil, err
}
log.Println("go=kafka at=created-client")
producer, err := sarama.NewProducer(client, producerConfig)
if err != nil {
return nil, err
}
log.Println("go=kafka at=created-producer")
return &KafkaDeliver{
clientId: clientId,
brokerList: brokerList,
store: store,
producer: producer,
producerConfig: producerConfig,
client: client,
clientConfig: clientConfig,
deliverGoroutines: 8,
shutdownDeliver: make(chan bool, 8),
shutdown: make(chan bool, 8),
}, nil
}
开发者ID:sclasen,项目名称:event-shuttle,代码行数:31,代码来源:deliver.go
示例6: CreateKafkaTopic
func CreateKafkaTopic() *KafkaTopic {
client, err := sarama.NewClient([]string{"kafka:9092"}, sarama.NewConfig())
if err != nil {
panic(err)
} else {
fmt.Printf("Kafka Client connected: %v\n", client)
}
topic := "http-request"
producer, err := sarama.NewAsyncProducerFromClient(client)
if err != nil {
panic(err)
} else {
fmt.Printf("Kafka Producer connected: %v\n", producer)
}
producable := producer.Input()
consumer, err := sarama.NewConsumerFromClient(client)
if err != nil {
panic(err)
} else {
fmt.Printf("Kafka Consumer connected: %v\n", consumer)
}
consumable, err := consumer.ConsumePartition(topic, 0, 0)
if err != nil {
panic(err)
}
return &KafkaTopic{client, topic, producer, producable, consumer, consumable}
}
开发者ID:ds0nt,项目名称:webpipes,代码行数:31,代码来源:ws-kafka.go
示例7: diagnose
func (this *Ping) diagnose() {
this.zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
registeredBrokers := zkcluster.RegisteredInfo().Roster
for _, broker := range registeredBrokers {
log.Debug("ping %s", broker.Addr())
kfk, err := sarama.NewClient([]string{broker.Addr()}, sarama.NewConfig())
if err != nil {
log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error()))
continue
}
_, err = kfk.Topics() // kafka didn't provide ping, so use Topics() as ping
if err != nil {
log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error()))
} else {
if !this.problematicMode {
log.Info("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Green("ok"))
}
}
kfk.Close()
}
})
}
开发者ID:chendx79,项目名称:gafka,代码行数:26,代码来源:ping.go
示例8: newKafkaClient
func newKafkaClient(proc int, brokerList []string, hostname string) (sarama.Client, error) {
sarama.MaxRequestSize = 100 * 1024 * 1024
sarama.MaxResponseSize = 100 * 1024 * 1024
config := sarama.NewConfig()
config.Net.MaxOpenRequests = proc * 2
config.Producer.MaxMessageBytes = int(sarama.MaxRequestSize)
config.Producer.RequiredAcks = sarama.WaitForAll
config.Producer.Partitioner = sarama.NewRandomPartitioner
config.Metadata.RefreshFrequency = 10 * time.Second
config.ClientID = "indexer"
// config.Producer.Compression = sarama.CompressionGZIP
// config.Producer.Flush.MaxMessages = 10000
cl, err := sarama.NewClient(brokerList, config)
if err != nil {
return nil, err
}
// partitionerCreator := func(topic string) sarama.Partitioner {
// return newLocalAwarePartitioner(cl, topic, hostname)
// }
// config.Producer.Partitioner = partitionerCreator
return cl, nil
}
开发者ID:jackdoe,项目名称:no,代码行数:26,代码来源:indexer.go
示例9: NewKafka
func NewKafka(numberOfMessages int, testLatency bool) *Kafka {
config := sarama.NewConfig()
client, _ := sarama.NewClient([]string{"localhost:9092"}, config)
topic := "test"
pub, _ := sarama.NewAsyncProducer([]string{"localhost:9092"}, config)
consumer, _ := sarama.NewConsumerFromClient(client)
sub, _ := consumer.ConsumePartition(topic, 0, sarama.OffsetNewest)
var handler benchmark.MessageHandler
if testLatency {
handler = &benchmark.LatencyMessageHandler{
NumberOfMessages: numberOfMessages,
Latencies: []float32{},
}
} else {
handler = &benchmark.ThroughputMessageHandler{NumberOfMessages: numberOfMessages}
}
return &Kafka{
handler: handler,
client: client,
pub: pub,
sub: sub,
topic: topic,
}
}
开发者ID:houcy,项目名称:mq-benchmarking,代码行数:27,代码来源:kafka.go
示例10: TestSendData
func TestSendData(t *testing.T) {
kafkaClient, err := sarama.NewClient(brokerList, config)
if err != nil {
panic(err)
}
defer kafkaClient.Close()
partitionID, err := kafkaClient.Partitions(topicsInit[0])
convey.Convey("err should be nil", t, func() {
convey.So(err, convey.ShouldEqual, nil)
})
convey.Convey("partitionID should not be nil ", t, func() {
convey.So(partitionID, convey.ShouldNotEqual, nil)
})
offset, err := kafkaClient.GetOffset("test", partitionID[0], sarama.OffsetOldest)
convey.Convey("err should be nil", t, func() {
convey.So(err, convey.ShouldEqual, nil)
})
producer.NewProducer(brokerList, topicsInit, config)
producer.SendData(topicsInit[0], "init message")
offset2, err := kafkaClient.GetOffset("test", partitionID[0], sarama.OffsetOldest)
convey.Convey("err should be nil", t, func() {
convey.So(err, convey.ShouldEqual, nil)
})
if offset == 0 {
convey.Convey("offset2 should not be equal to offset ", t, func() {
convey.So(offset2, convey.ShouldEqual, offset)
})
} else {
convey.Convey("offset2 should not be equal to offset + 1 ", t, func() {
convey.So(offset2, convey.ShouldEqual, offset+1)
})
}
}
开发者ID:NexwayGroup,项目名称:nx-lib,代码行数:33,代码来源:producer_test.go
示例11: Produce
func Produce(Quit chan bool, Host []string, Topic string, Data chan []byte) {
client, err := sarama.NewClient("crontab_client", Host, sarama.NewClientConfig())
if err != nil {
panic(err)
} else {
log.Println("kafka producer connected")
}
defer client.Close()
cfg := sarama.NewProducerConfig()
cfg.Partitioner = sarama.NewRoundRobinPartitioner
producer, err := sarama.NewProducer(client, cfg)
if err != nil {
panic(err)
}
defer producer.Close()
log.Println("kafka producer ready")
for {
select {
case pack := <-Data:
producer.Input() <- &sarama.MessageToSend{Topic: Topic, Key: nil, Value: sarama.ByteEncoder(pack)}
case err := <-producer.Errors():
log.Println(err)
case <-Quit:
break
}
}
}
开发者ID:snowsnail,项目名称:crontabd,代码行数:29,代码来源:send.go
示例12: saramaClient
func saramaClient() sarama.Client {
client, err := sarama.NewClient(kafkaPeers, nil)
if err != nil {
panic(err)
}
return client
}
开发者ID:gcnonato,项目名称:kafka,代码行数:7,代码来源:consumergroup_integration_test.go
示例13: NewClient
// NewClient returns a Kafka client
func NewClient(addresses []string) (sarama.Client, error) {
config := sarama.NewConfig()
hostname, err := os.Hostname()
if err != nil {
hostname = ""
}
config.ClientID = hostname
config.Producer.Compression = sarama.CompressionSnappy
config.Producer.Return.Successes = true
var client sarama.Client
retries := outOfBrokersRetries + 1
for retries > 0 {
client, err = sarama.NewClient(addresses, config)
retries--
if err == sarama.ErrOutOfBrokers {
glog.Errorf("Can't connect to the Kafka cluster at %s (%d retries left): %s",
addresses, retries, err)
time.Sleep(outOfBrokersBackoff)
} else {
break
}
}
return client, err
}
开发者ID:aristanetworks,项目名称:goarista,代码行数:26,代码来源:client.go
示例14: tryOpenConnection
func (prod *Kafka) tryOpenConnection() bool {
// Reconnect the client first
if prod.client == nil {
if client, err := kafka.NewClient(prod.servers, prod.config); err == nil {
prod.client = client
} else {
Log.Error.Print("Kafka client error:", err)
prod.client = nil
prod.producer = nil
return false // ### return, connection failed ###
}
}
// Make sure we have a producer up and running
if prod.producer == nil {
if producer, err := kafka.NewAsyncProducerFromClient(prod.client); err == nil {
prod.producer = producer
} else {
Log.Error.Print("Kafka producer error:", err)
prod.client.Close()
prod.client = nil
prod.producer = nil
return false // ### return, connection failed ###
}
}
prod.Control() <- core.PluginControlFuseActive
return true
}
开发者ID:pombredanne,项目名称:gollum-1,代码行数:29,代码来源:kafka.go
示例15: generateKafkaData
func generateKafkaData(t *testing.T, topic string) {
config := sarama.NewConfig()
client, err := sarama.NewClient([]string{getTestKafkaHost()}, config)
if err != nil {
t.Errorf("%s", err)
}
producer, err := sarama.NewSyncProducerFromClient(client)
if err != nil {
t.Error(err)
}
defer producer.Close()
msg := &sarama.ProducerMessage{
Topic: topic,
Value: sarama.StringEncoder("Hello World"),
}
_, _, err = producer.SendMessage(msg)
if err != nil {
t.Errorf("FAILED to send message: %s\n", err)
}
client.RefreshMetadata(topic)
}
开发者ID:ruflin,项目名称:beats,代码行数:25,代码来源:partition_integration_test.go
示例16: kafkaClient
// kafkaClient initializes a connection to a Kafka cluster and
// initializes one or more clientProducer() (producer instances).
func kafkaClient(n int) {
switch noop {
// If not noop, actually fire up Kafka connections and send messages.
case false:
cId := "client_" + strconv.Itoa(n)
conf := kafka.NewConfig()
if compression != kafka.CompressionNone {
conf.Producer.Compression = compression
}
conf.Producer.Flush.MaxMessages = batchSize
client, err := kafka.NewClient(brokers, conf)
if err != nil {
log.Println(err)
os.Exit(1)
} else {
log.Printf("%s connected\n", cId)
}
for i := 0; i < producers; i++ {
go clientProducer(client)
}
// If noop, we're not creating connections at all.
// Just generate messages and burn CPU.
default:
for i := 0; i < producers; i++ {
go clientDummyProducer()
}
}
<-killClients
}
开发者ID:prezi,项目名称:sangrenel,代码行数:33,代码来源:sangrenel.go
示例17: consumeCluster
func (this *Peek) consumeCluster(zkcluster *zk.ZkCluster, topicPattern string,
partitionId int, msgChan chan *sarama.ConsumerMessage) {
brokerList := zkcluster.BrokerList()
if len(brokerList) == 0 {
return
}
kfk, err := sarama.NewClient(brokerList, sarama.NewConfig())
if err != nil {
this.Ui.Output(err.Error())
return
}
//defer kfk.Close() // FIXME how to close it
topics, err := kfk.Topics()
if err != nil {
this.Ui.Output(err.Error())
return
}
for _, t := range topics {
if patternMatched(t, topicPattern) {
go this.simpleConsumeTopic(zkcluster, kfk, t, int32(partitionId), msgChan)
}
}
}
开发者ID:funkygao,项目名称:gafka,代码行数:26,代码来源:peek.go
示例18: Connect
func (t *Transport) Connect() error {
config := sarama.NewConfig()
config.Producer.Compression = sarama.CompressionSnappy
client, err := sarama.NewClient(t.Brokers, config)
if err != nil {
return err
}
t.client = client
producer, err := sarama.NewAsyncProducerFromClient(t.client)
if err != nil {
return err
}
t.producer = producer
// Consumer configuration
zkConfig := kafkaClient.NewZookeeperConfig()
zkConfig.ZookeeperConnect = t.ZookeeperHosts
consumerConfig := kafkaClient.DefaultConsumerConfig()
consumerConfig.Coordinator = kafkaClient.NewZookeeperCoordinator(zkConfig)
consumerConfig.RebalanceMaxRetries = 10
consumerConfig.NumWorkers = 1
consumerConfig.NumConsumerFetchers = 1
consumerConfig.AutoOffsetReset = kafkaClient.LargestOffset
t.consumerConfig = *consumerConfig
return nil
}
开发者ID:frosenberg,项目名称:go-cloud-stream,代码行数:31,代码来源:kafka.go
示例19: produceNToTopicPartition
func produceNToTopicPartition(t *testing.T, n int, topic string, partition int, brokerAddr string) {
client, err := sarama.NewClient("test-client", []string{brokerAddr}, sarama.NewClientConfig())
if err != nil {
t.Fatal(err)
}
defer client.Close()
producerConfig := sarama.NewProducerConfig()
partitionerFactory := &SaramaPartitionerFactory{NewFixedPartitioner}
producerConfig.Partitioner = partitionerFactory.PartitionerConstructor
producer, err := sarama.NewProducer(client, producerConfig)
encoder := &Int32Encoder{}
if err != nil {
t.Fatal(err)
}
defer producer.Close()
for i := 0; i < n; i++ {
key, _ := encoder.Encode(uint32(partition))
producer.Input() <- &sarama.ProducerMessage{Topic: topic, Key: sarama.ByteEncoder(key), Value: sarama.StringEncoder(fmt.Sprintf("test-kafka-message-%d", i))}
}
select {
case e := <-producer.Errors():
t.Fatalf("Failed to produce message: %s", e)
case <-time.After(5 * time.Second):
}
}
开发者ID:stealthly,项目名称:edge-test,代码行数:26,代码来源:testing_utils.go
示例20: NewKafkaLogger
// Creates a KafkaLogger for a given kafka cluster. We identify ourselves with clientId.
func NewKafkaLogger(clientId string, brokers []string) (request_handler.SpadeEdgeLogger, error) {
c, err := sarama.NewClient(clientId, brokers, sarama.NewClientConfig())
if err != nil {
return nil, err
}
config := sarama.NewProducerConfig()
config.Partitioner = sarama.NewRoundRobinPartitioner
config.FlushFrequency = 500 * time.Millisecond
config.FlushMsgCount = 1000
// Might want to try out compression
config.Compression = sarama.CompressionNone
config.AckSuccesses = true
p, err := NewProducer(c, GetTopic(), config)
if err != nil {
return nil, err
}
k := &KafkaLogger{
Producer: p,
}
hystrix.ConfigureCommand(hystrixCommandName, hystrix.CommandConfig{
Timeout: 1000,
MaxConcurrentRequests: hystrixConcurrencyLevel,
ErrorPercentThreshold: 10,
})
return k, nil
}
开发者ID:hyandell,项目名称:spade_edge,代码行数:30,代码来源:kafka_logger.go
注:本文中的github.com/Shopify/sarama.NewClient函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论