• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Golang breaker.New函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Golang中github.com/eapache/go-resiliency/breaker.New函数的典型用法代码示例。如果您正苦于以下问题:Golang New函数的具体用法?Golang New怎么用?Golang New使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了New函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。

示例1: partitionDispatcher

// one per topic
// partitions messages, then dispatches them by partition
func (p *asyncProducer) partitionDispatcher(topic string, input <-chan *ProducerMessage) {
	handlers := make(map[int32]chan *ProducerMessage)
	partitioner := p.conf.Producer.Partitioner(topic)
	breaker := breaker.New(3, 1, 10*time.Second)

	for msg := range input {
		if msg.retries == 0 {
			err := breaker.Run(func() error {
				return p.assignPartition(partitioner, msg)
			})
			if err != nil {
				p.returnError(msg, err)
				continue
			}
		}

		handler := handlers[msg.Partition]
		if handler == nil {
			newHandler := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
			topic := msg.Topic         // block local because go's closure semantics suck
			partition := msg.Partition // block local because go's closure semantics suck
			go withRecover(func() { p.leaderDispatcher(topic, partition, newHandler) })
			handler = newHandler
			handlers[msg.Partition] = handler
		}

		handler <- msg
	}

	for _, handler := range handlers {
		close(handler)
	}
}
开发者ID:oopcode,项目名称:gollum,代码行数:35,代码来源:async_producer.go


示例2: newTopicProducer

func (p *asyncProducer) newTopicProducer(topic string, input <-chan *ProducerMessage) *topicProducer {
	tp := &topicProducer{
		parent:      p,
		topic:       topic,
		input:       input,
		breaker:     breaker.New(3, 1, 10*time.Second),
		handlers:    make(map[int32]chan *ProducerMessage),
		partitioner: p.conf.Producer.Partitioner(topic),
	}
	go withRecover(tp.dispatch)
	return tp
}
开发者ID:sunshine-zhd1229,项目名称:go_kafka_client,代码行数:12,代码来源:async_producer.go


示例3: newTopicProducer

func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
	tp := &topicProducer{
		parent:      p,
		topic:       topic,
		input:       input,
		breaker:     breaker.New(3, 1, 10*time.Second),
		handlers:    make(map[int32]chan<- *ProducerMessage),
		partitioner: p.conf.Producer.Partitioner(topic),
	}
	go withRecover(tp.dispatch)
	return input
}
开发者ID:chrisdo,项目名称:cadvisor,代码行数:13,代码来源:async_producer.go


示例4: newPartitionProducer

func (p *asyncProducer) newPartitionProducer(topic string, partition int32, input <-chan *ProducerMessage) *partitionProducer {
	pp := &partitionProducer{
		parent:    p,
		topic:     topic,
		partition: partition,
		input:     input,

		breaker:    breaker.New(3, 1, 10*time.Second),
		retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
	}
	go withRecover(pp.dispatch)
	return pp
}
开发者ID:sunshine-zhd1229,项目名称:go_kafka_client,代码行数:13,代码来源:async_producer.go


示例5: leaderDispatcher

// one per partition per topic
// dispatches messages to the appropriate broker
// also responsible for maintaining message order during retries
func (p *asyncProducer) leaderDispatcher(topic string, partition int32, input <-chan *ProducerMessage) {
	var leader *Broker
	var output chan *ProducerMessage

	breaker := breaker.New(3, 1, 10*time.Second)
	doUpdate := func() (err error) {
		if err = p.client.RefreshMetadata(topic); err != nil {
			return err
		}

		if leader, err = p.client.Leader(topic, partition); err != nil {
			return err
		}

		output = p.getBrokerProducer(leader)
		return nil
	}

	// try to prefetch the leader; if this doesn't work, we'll do a proper breaker-protected refresh-and-fetch
	// on the first message
	leader, _ = p.client.Leader(topic, partition)
	if leader != nil {
		output = p.getBrokerProducer(leader)
	}

	// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
	// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
	// retryState[msg.retries].expectChaser simply tracks whether we've seen a chaser message for a given level (and
	// therefore whether our buffer is complete and safe to flush)
	highWatermark := 0
	retryState := make([]struct {
		buf          []*ProducerMessage
		expectChaser bool
	}, p.conf.Producer.Retry.Max+1)

	for msg := range input {
		if msg.retries > highWatermark {
			// new, higher, retry level; send off a chaser so that we know when everything "in between" has made it
			// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
			highWatermark = msg.retries
			Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", topic, partition, highWatermark)
			retryState[msg.retries].expectChaser = true
			p.inFlight.Add(1) // we're generating a chaser message; track it so we don't shut down while it's still inflight
			output <- &ProducerMessage{Topic: topic, Partition: partition, flags: chaser, retries: msg.retries - 1}
			Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", topic, partition, leader.ID())
			p.unrefBrokerProducer(leader, output)
			output = nil
			time.Sleep(p.conf.Producer.Retry.Backoff)
		} else if highWatermark > 0 {
			// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
			if msg.retries < highWatermark {
				// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a chaser)
				if msg.flags&chaser == chaser {
					retryState[msg.retries].expectChaser = false
					p.inFlight.Done() // this chaser is now handled and will be garbage collected
				} else {
					retryState[msg.retries].buf = append(retryState[msg.retries].buf, msg)
				}
				continue
			} else if msg.flags&chaser == chaser {
				// this message is of the current retry level (msg.retries == highWatermark) and the chaser flag is set,
				// meaning this retry level is done and we can go down (at least) one level and flush that
				retryState[highWatermark].expectChaser = false
				Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", topic, partition, highWatermark)
				for {
					highWatermark--

					if output == nil {
						if err := breaker.Run(doUpdate); err != nil {
							p.returnErrors(retryState[highWatermark].buf, err)
							goto flushDone
						}
						Logger.Printf("producer/leader/%s/%d selected broker %d\n", topic, partition, leader.ID())
					}

					for _, msg := range retryState[highWatermark].buf {
						output <- msg
					}

				flushDone:
					retryState[highWatermark].buf = nil
					if retryState[highWatermark].expectChaser {
						Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", topic, partition, highWatermark)
						break
					} else {
						if highWatermark == 0 {
							Logger.Printf("producer/leader/%s/%d state change to [normal]\n", topic, partition)
							break
						}
					}

				}
				p.inFlight.Done() // this chaser is now handled and will be garbage collected
				continue
			}
		}

//.........这里部分代码省略.........
开发者ID:oopcode,项目名称:gollum,代码行数:101,代码来源:async_producer.go


示例6: leaderDispatcher

// one per partition per topic
// dispatches messages to the appropriate broker
// also responsible for maintaining message order during retries
func (p *Producer) leaderDispatcher(topic string, partition int32, input chan *MessageToSend) {
	var leader *Broker
	var output chan *MessageToSend
	var backlog []*MessageToSend
	breaker := breaker.New(3, 1, 10*time.Second)
	doUpdate := func() (err error) {
		if err = p.client.RefreshTopicMetadata(topic); err != nil {
			return err
		}

		if leader, err = p.client.Leader(topic, partition); err != nil {
			return err
		}

		output = p.getBrokerWorker(leader)
		return nil
	}

	// try to prefetch the leader; if this doesn't work, we'll do a proper breaker-protected refresh-and-fetch
	// on the first message
	leader, _ = p.client.Leader(topic, partition)
	if leader != nil {
		output = p.getBrokerWorker(leader)
	}

	for msg := range input {
		if msg.flags&retried == 0 {
			// normal case
			if backlog != nil {
				backlog = append(backlog, msg)
				continue
			}
		} else if msg.flags&chaser == 0 {
			// retry flag set, chaser flag not set
			if backlog == nil {
				// on the very first retried message we send off a chaser so that we know when everything "in between" has made it
				// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
				Logger.Printf("producer/leader state change to [retrying] on %s/%d\n", topic, partition)
				output <- &MessageToSend{Topic: topic, partition: partition, flags: chaser}
				backlog = make([]*MessageToSend, 0)
				p.unrefBrokerWorker(leader)
				output = nil
				time.Sleep(p.config.RetryBackoff)
			}
		} else {
			// retry *and* chaser flag set, flush the backlog and return to normal processing
			Logger.Printf("producer/leader state change to [flushing] on %s/%d\n", topic, partition)
			if output == nil {
				if err := breaker.Run(doUpdate); err != nil {
					p.returnErrors(backlog, err)
					backlog = nil
					continue
				}
			}

			for _, msg := range backlog {
				output <- msg
			}
			Logger.Printf("producer/leader state change to [normal] on %s/%d\n", topic, partition)

			backlog = nil
			continue
		}

		if output == nil {
			if err := breaker.Run(doUpdate); err != nil {
				p.returnError(msg, err)
				continue
			}
		}

		output <- msg
	}

	p.unrefBrokerWorker(leader)
	p.retries <- &MessageToSend{flags: unref}
}
开发者ID:hyandell,项目名称:spade_edge,代码行数:80,代码来源:producer.go



注:本文中的github.com/eapache/go-resiliency/breaker.New函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Golang middleware.Context类代码示例发布时间:2022-05-23
下一篇:
Golang hood.Indexes类代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap