• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Golang log4go.Debug函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Golang中github.com/funkygao/log4go.Debug函数的典型用法代码示例。如果您正苦于以下问题:Golang Debug函数的具体用法?Golang Debug怎么用?Golang Debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了Debug函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。

示例1: warmUp

func (this *FunServantImpl) warmUp() {
	log.Debug("warming up...")

	if this.mg != nil {
		go this.mg.Warmup()
	}

	if this.mc != nil {
		go this.mc.Warmup()
	}

	if this.my != nil {
		this.my.Warmup()
	}

	if this.proxy != nil {
		this.proxy.Warmup()
	}

	if this.rd != nil {
		this.rd.Warmup()
	}

	log.Debug("warmup done")
}
开发者ID:lucmichalski,项目名称:fae,代码行数:25,代码来源:servant.go


示例2: wsReadPump

func (this *subServer) wsReadPump(clientGone chan struct{}, ws *websocket.Conn) {
	ws.SetReadLimit(this.wsReadLimit)
	ws.SetReadDeadline(time.Now().Add(this.wsPongWait))
	ws.SetPongHandler(func(string) error {
		ws.SetReadDeadline(time.Now().Add(this.wsPongWait))
		return nil
	})

	// if kateway shutdown while there are open ws conns, the shutdown will
	// wait 1m: this.subServer.wsPongWait
	for {
		_, message, err := ws.ReadMessage()
		if err != nil {
			if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway) {
				log.Warn("%s: %v", ws.RemoteAddr(), err)
			} else {
				log.Debug("%s: %v", ws.RemoteAddr(), err)
			}

			close(clientGone)
			break
		}

		log.Debug("ws[%s] read: %s", ws.RemoteAddr(), string(message))
	}
}
开发者ID:funkygao,项目名称:gafka,代码行数:26,代码来源:handler_sub_ws.go


示例3: MgInsert

func (this *FunServantImpl) MgInsert(ctx *rpc.Context,
	kind string, table string, shardId int32,
	doc []byte, options []byte) (r bool, intError error) {
	log.Debug("%s %d %s %s %v %s", kind, shardId, table,
		string(doc), doc, string(options))

	var sess *mongo.Session
	sess, intError = this.mongoSession(kind, shardId)
	if intError != nil {
		return
	}

	var bdoc = bson.M{}
	json.Unmarshal(doc, &bdoc)
	log.Debug("%+v", bdoc)

	err := sess.DB().C(table).Insert(bdoc)
	if err == nil {
		r = true
	} else {
		log.Error(err)
	}
	sess.Recyle(&err)

	return
}
开发者ID:justinblah,项目名称:fae,代码行数:26,代码来源:mg.go


示例4: LaunchHttpServer

func LaunchHttpServer(listenAddr string, debugAddr string) (err error) {
	if httpApi != nil {
		return httpDupLaunch
	}

	httpApi = new(httpRestApi)
	httpApi.httpPaths = make([]string, 0, 10)
	httpApi.httpRouter = mux.NewRouter()
	httpApi.httpServer = &http.Server{
		Addr:    listenAddr,
		Handler: httpApi.httpRouter,
	}

	httpApi.httpListener, err = net.Listen("tcp", httpApi.httpServer.Addr)
	if err != nil {
		httpApi = nil
		return err
	}

	if debugAddr != "" {
		log.Debug("HTTP serving at %s with pprof at %s", listenAddr, debugAddr)
	} else {
		log.Debug("HTTP serving at %s", listenAddr)
	}

	go httpApi.httpServer.Serve(httpApi.httpListener)
	if debugAddr != "" {
		go http.ListenAndServe(debugAddr, nil)
	}

	return nil
}
开发者ID:postfix,项目名称:golib-1,代码行数:32,代码来源:http.go


示例5: handleDueJobs

// TODO batch DELETE/INSERT for better performance.
func (this *JobExecutor) handleDueJobs(wg *sync.WaitGroup) {
	defer wg.Done()

	var (
		// zabbix maintains a in-memory delete queue
		// delete from history_uint where itemid=? and clock<min_clock
		sqlDeleteJob = fmt.Sprintf("DELETE FROM %s WHERE job_id=?", this.table)

		sqlInsertArchive = fmt.Sprintf("INSERT INTO %s(job_id,payload,ctime,due_time,etime,actor_id) VALUES(?,?,?,?,?,?)",
			jm.HistoryTable(this.topic))
		sqlReinject = fmt.Sprintf("INSERT INTO %s(job_id, payload, ctime, due_time) VALUES(?,?,?,?)", this.table)
	)
	for {
		select {
		case <-this.stopper:
			return

		case item := <-this.dueJobs:
			now := time.Now()
			affectedRows, _, err := this.mc.Exec(jm.AppPool, this.table, this.aid, sqlDeleteJob, item.JobId)
			if err != nil {
				log.Error("%s: %s", this.ident, err)
				continue
			}
			if affectedRows == 0 {
				// 2 possibilities:
				// - client Cancel job wins
				// - this handler is too slow and the job fetched twice in tick
				continue
			}

			log.Debug("%s land %s", this.ident, item)
			_, _, err = store.DefaultPubStore.SyncPub(this.cluster, this.topic, nil, item.Payload)
			if err != nil {
				err = hh.Default.Append(this.cluster, this.topic, nil, item.Payload)
			}
			if err != nil {
				// pub fails and hinted handoff also fails: reinject job back to mysql
				log.Error("%s: %s", this.ident, err)
				this.mc.Exec(jm.AppPool, this.table, this.aid, sqlReinject,
					item.JobId, item.Payload, item.Ctime, item.DueTime)
				continue
			}

			log.Debug("%s fired %s", this.ident, item)
			this.auditor.Trace(item.String())

			// mv job to archive table
			_, _, err = this.mc.Exec(jm.AppPool, this.table, this.aid, sqlInsertArchive,
				item.JobId, item.Payload, item.Ctime, item.DueTime, now.Unix(), this.parentId)
			if err != nil {
				log.Error("%s: %s", this.ident, err)
			} else {
				log.Debug("%s archived %s", this.ident, item)
			}

		}
	}
}
开发者ID:funkygao,项目名称:gafka,代码行数:60,代码来源:exe_job.go


示例6: LoadConfig

func (this *ConfigProxy) LoadConfig(selfAddr string, cf *conf.Conf) {
	if selfAddr == "" {
		panic("proxy self addr unknown")
	}
	this.PoolCapacity = cf.Int("pool_capacity", 10)
	this.IdleTimeout = cf.Duration("idle_timeout", 0)
	this.IoTimeout = cf.Duration("io_timeout", time.Second*10)
	this.BorrowTimeout = cf.Duration("borrow_timeout", time.Second*10)
	this.DiagnosticInterval = cf.Duration("diagnostic_interval", time.Second*5)
	this.TcpNoDelay = cf.Bool("tcp_nodelay", true)
	this.BufferSize = cf.Int("buffer_size", 4<<10)
	this.SelfAddr = selfAddr
	parts := strings.SplitN(this.SelfAddr, ":", 2)
	if parts[0] == "" {
		// auto get local ip when self_addr like ":9001"
		ips, _ := ip.LocalIpv4Addrs()
		if len(ips) == 0 {
			panic("cannot get local ip address")
		}

		this.SelfAddr = ips[0] + ":" + parts[1]
	}

	log.Debug("proxy conf: %+v", *this)
}
开发者ID:lucmichalski,项目名称:fae,代码行数:25,代码来源:proxy.go


示例7: LoadConfig

func (this *ConfigMongodb) LoadConfig(cf *conf.Conf) {
	this.ShardBaseNum = cf.Int("shard_base_num", 100000)
	this.DebugProtocol = cf.Bool("debug_protocol", false)
	this.DebugHeartbeat = cf.Bool("debug_heartbeat", false)
	this.ShardStrategy = cf.String("shard_strategy", "legacy")
	this.ConnectTimeout = cf.Duration("connect_timeout", 4*time.Second)
	this.IoTimeout = cf.Duration("io_timeout", 30*time.Second)
	this.MaxIdleConnsPerServer = cf.Int("max_idle_conns_per_server", 2)
	this.MaxConnsPerServer = cf.Int("max_conns_per_server",
		this.MaxIdleConnsPerServer*5)
	this.HeartbeatInterval = cf.Int("heartbeat_interval", 120)
	section, err := cf.Section("breaker")
	if err == nil {
		this.Breaker.loadConfig(section)
	}
	this.Servers = make(map[string]*ConfigMongodbServer)
	for i := 0; i < len(cf.List("servers", nil)); i++ {
		section, err := cf.Section(fmt.Sprintf("servers[%d]", i))
		if err != nil {
			panic(err)
		}

		server := new(ConfigMongodbServer)
		server.ShardBaseNum = this.ShardBaseNum
		server.loadConfig(section)
		this.Servers[server.Pool] = server
	}

	log.Debug("mongodb conf: %+v", *this)
}
开发者ID:lucmichalski,项目名称:fae,代码行数:30,代码来源:mongodb.go


示例8: processConsumerOffsetsMessage

// consume topic: __consumer_offsets and process the message to get offsets of consumers
func (this *ZkCluster) processConsumerOffsetsMessage(msg *sarama.ConsumerMessage) {
	var keyver, valver uint16
	var partition uint32
	var offset, timestamp uint64

	buf := bytes.NewBuffer(msg.Key)
	err := binary.Read(buf, binary.BigEndian, &keyver)
	if (err != nil) || ((keyver != 0) && (keyver != 1)) {
		log.Warn("Failed to decode %s:%v offset %v: keyver", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	group, err := readString(buf)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: group", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	topic, err := readString(buf)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: topic", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	err = binary.Read(buf, binary.BigEndian, &partition)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: partition", msg.Topic, msg.Partition, msg.Offset)
		return
	}

	buf = bytes.NewBuffer(msg.Value)
	err = binary.Read(buf, binary.BigEndian, &valver)
	if (err != nil) || ((valver != 0) && (valver != 1)) {
		log.Warn("Failed to decode %s:%v offset %v: valver", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	err = binary.Read(buf, binary.BigEndian, &offset)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: offset", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	_, err = readString(buf)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: metadata", msg.Topic, msg.Partition, msg.Offset)
		return
	}
	err = binary.Read(buf, binary.BigEndian, &timestamp)
	if err != nil {
		log.Warn("Failed to decode %s:%v offset %v: timestamp", msg.Topic, msg.Partition, msg.Offset)
		return
	}

	partitionOffset := &PartitionOffset{
		Cluster:   this.Name(),
		Topic:     topic,
		Partition: int32(partition),
		Group:     group,
		Timestamp: int64(timestamp),
		Offset:    int64(offset),
	}
	log.Debug("%+v", partitionOffset)
	return
}
开发者ID:funkygao,项目名称:gafka,代码行数:61,代码来源:zkcluster.go


示例9: commitOffsets

func (this *subServer) commitOffsets() {
	for cluster, clusterTopic := range this.ackedOffsets {
		zkcluster := meta.Default.ZkCluster(cluster)

		for topic, groupPartition := range clusterTopic {
			for group, partitionOffset := range groupPartition {
				for partition, offset := range partitionOffset {
					if offset == -1 {
						// this slot is empty
						continue
					}

					log.Debug("cluster[%s] group[%s] commit offset {T:%s/%d O:%d}", cluster, group, topic, partition, offset)

					if err := zkcluster.ResetConsumerGroupOffset(topic, group, strconv.Itoa(partition), offset); err != nil {
						log.Error("cluster[%s] group[%s] commit offset {T:%s/%d O:%d} %v", cluster, group, topic, partition, offset, err)

						if err == zk.ErrNoNode {
							// invalid offset commit request, will not retry
							this.ackedOffsets[cluster][topic][group][partition] = -1
						}
					} else {
						// mark this slot empty
						this.ackedOffsets[cluster][topic][group][partition] = -1
					}
				}
			}
		}
	}

}
开发者ID:funkygao,项目名称:gafka,代码行数:31,代码来源:server_sub.go


示例10: newMysql

func newMysql(dsn string, maxStmtCached int, bc *config.ConfigBreaker) *mysql {
	this := new(mysql)
	if bc == nil {
		bc = &config.ConfigBreaker{
			FailureAllowance: 5,
			RetryTimeout:     time.Second * 10,
		}
	}
	this.dsn = dsn
	this.breaker = &breaker.Consecutive{
		FailureAllowance: bc.FailureAllowance,
		RetryTimeout:     bc.RetryTimeout}
	if maxStmtCached > 0 {
		this.stmtsStore = cache.NewLruCache(maxStmtCached)
		this.stmtsStore.OnEvicted = func(key cache.Key, value interface{}) {
			query := key.(string)
			stmt := value.(*sql.Stmt)
			stmt.Close()

			log.Debug("[%s] stmt[%s] closed", this.dsn, query)
		}
	}

	return this
}
开发者ID:lucmichalski,项目名称:fae,代码行数:25,代码来源:mysql.go


示例11: diagnose

func (this *Ping) diagnose() {
	this.zkzone.ForSortedClusters(func(zkcluster *zk.ZkCluster) {
		registeredBrokers := zkcluster.RegisteredInfo().Roster
		for _, broker := range registeredBrokers {
			log.Debug("ping %s", broker.Addr())

			kfk, err := sarama.NewClient([]string{broker.Addr()}, sarama.NewConfig())
			if err != nil {
				log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error()))

				continue
			}

			_, err = kfk.Topics() // kafka didn't provide ping, so use Topics() as ping
			if err != nil {
				log.Error("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Red(err.Error()))
			} else {
				if !this.problematicMode {
					log.Info("%25s %30s %s", broker.Addr(), broker.NamedAddr(), color.Green("ok"))
				}
			}
			kfk.Close()
		}
	})

}
开发者ID:chendx79,项目名称:gafka,代码行数:26,代码来源:ping.go


示例12: LoadConfig

func (this *ConfigMemcache) LoadConfig(cf *conf.Conf) {
	this.Servers = make(map[string]*ConfigMemcacheServer)
	this.HashStrategy = cf.String("hash_strategy", "standard")
	this.Timeout = cf.Duration("timeout", 4*time.Second)
	this.ReplicaN = cf.Int("replica_num", 1)
	section, err := cf.Section("breaker")
	if err == nil {
		this.Breaker.loadConfig(section)
	}
	this.MaxIdleConnsPerServer = cf.Int("max_idle_conns_per_server", 3)
	this.MaxConnsPerServer = cf.Int("max_conns_per_server",
		this.MaxIdleConnsPerServer*10)
	for i := 0; i < len(cf.List("servers", nil)); i++ {
		section, err := cf.Section(fmt.Sprintf("servers[%d]", i))
		if err != nil {
			panic(err)
		}

		server := new(ConfigMemcacheServer)
		server.loadConfig(section)
		this.Servers[server.Address()] = server
	}

	log.Debug("memcache conf: %+v", *this)
}
开发者ID:lucmichalski,项目名称:fae,代码行数:25,代码来源:memcache.go


示例13: discoverPeers

func (this *Peer) discoverPeers() {
	defer func() {
		this.c.Close() // leave the multicast group
	}()

	var msg peerMessage
	reader := bufio.NewReader(this.c)
	for {
		// net.ListenMulticastUDP sets IP_MULTICAST_LOOP=0 as
		// default, so you never receive your own sent data
		// if you run both sender and receiver on (logically) same IP host
		line, _, err := reader.ReadLine()
		if err != nil {
			log.Error(err)
			continue
		}

		if err := msg.unmarshal(line); err != nil {
			// Not our protocol, it may be SSDP or else
			continue
		}

		log.Debug("received peer: %+v", msg)

		neighborIp, present := msg["ip"]
		if !present {
			log.Info("Peer msg has no 'ip'")
			continue
		}

		this.refreshNeighbor(neighborIp.(string))
	}
}
开发者ID:justinblah,项目名称:fae,代码行数:33,代码来源:peer.go


示例14: Close

// Close must be called before Recycle
func (this *syncProducerClient) Close() {
	log.Debug("cluster[%s] closing kafka sync client: %d", this.cluster, this.id)

	// will close the producer and the kafka tcp conn
	this.SyncProducer.Close()
	this.closed = true
}
开发者ID:chendx79,项目名称:gafka,代码行数:8,代码来源:pubclient.go


示例15: LoadConfig

func (this *ConfigRedis) LoadConfig(cf *conf.Conf) {
	section, err := cf.Section("breaker")
	if err == nil {
		this.Breaker.loadConfig(section)
	}

	this.Servers = make(map[string]map[string]*ConfigRedisServer)
	for i := 0; i < len(cf.List("pools", nil)); i++ {
		section, err := cf.Section(fmt.Sprintf("pools[%d]", i))
		if err != nil {
			panic(err)
		}

		pool := section.String("name", "")
		if pool == "" {
			panic("Empty redis pool name")
		}

		this.Servers[pool] = make(map[string]*ConfigRedisServer)

		// get servers in each pool
		for j := 0; j < len(section.List("servers", nil)); j++ {
			server, err := section.Section(fmt.Sprintf("servers[%d]", j))
			if err != nil {
				panic(err)
			}

			redisServer := new(ConfigRedisServer)
			redisServer.loadConfig(server)
			this.Servers[pool][redisServer.Addr] = redisServer
		}
	}

	log.Debug("redis conf: %+v", *this)
}
开发者ID:lucmichalski,项目名称:fae,代码行数:35,代码来源:redis.go


示例16: loadSegments

// loadSegments loads all in-range segments on disk
// FIXME manage q.inflights counter while loading segments
func (q *queue) loadSegments(minId uint64) (segments, error) {
	segments := []*segment{}

	files, err := ioutil.ReadDir(q.dir)
	if err != nil {
		return segments, err
	}

	for _, segment := range files {
		if segment.IsDir() || segment.Name() == cursorFile {
			continue
		}

		// segment file names are all numeric
		id, err := strconv.ParseUint(segment.Name(), 10, 64)
		if err != nil {
			log.Error("queue[%s] segment:%s %s", q.ident(), segment.Name(), err)
			continue
		}
		if id < minId {
			log.Debug("queue[%s] skip stale segment:%s", q.ident(), segment.Name())
			continue
		}

		segment, err := newSegment(id, filepath.Join(q.dir, segment.Name()), q.maxSegmentSize)
		if err != nil {
			return segments, err
		}

		segments = append(segments, segment)
	}
	return segments, nil
}
开发者ID:funkygao,项目名称:gafka,代码行数:35,代码来源:queue.go


示例17: watchTopicPartitionsChange

// watchTopicPartitionsChange watch partition changes on a topic.
func (cg *ConsumerGroup) watchTopicPartitionsChange(topic string, stopper <-chan struct{},
	topicPartitionsChanged chan<- string, outstanding *sync.WaitGroup) {
	defer outstanding.Done()

	_, ch, err := cg.kazoo.Topic(topic).WatchPartitions()
	if err != nil {
		if err == zk.ErrNoNode {
			err = ErrInvalidTopic
		}
		log.Error("[%s/%s] topic[%s] watch partitions: %s", cg.group.Name, cg.shortID(), topic, err)
		cg.emitError(err, topic, -1)
		return
	}

	var (
		backoff    = time.Duration(5)
		maxRetries = 3
	)
	select {
	case <-cg.stopper:
		return

	case <-stopper:
		return

	case <-ch:
		// when partitions scales up, the zk node might not be completely ready, await it ready
		//
		// even if zk node ready, kafka broker might not be ready:
		// kafka server: Request was for a topic or partition that does not exist on this broker
		// so we blindly wait: should be enough for most cases
		// in rare cases, that is still not enough: imagine partitions 1->1000, which takes long
		// ok, just return that err to client to retry
		time.Sleep(time.Second * backoff)
		for retries := 0; retries < maxRetries; retries++ {
			// retrieve brokers/topics/{topic}/partitions/{partition}/state and find the leader broker id
			// the new partitions state znode might not be ready yet
			if partitions, err := cg.kazoo.Topic(topic).Partitions(); err == nil {
				if _, err = retrievePartitionLeaders(partitions); err == nil {
					log.Debug("[%s/%s] topic[%s] partitions change complete", cg.group.Name, cg.shortID(), topic)
					break
				} else {
					log.Warn("[%s/%s] topic[%s] partitions change retry#%d waiting: %v", cg.group.Name, cg.shortID(), topic, retries, err)
					backoff-- // don't worry if negative
					time.Sleep(time.Second * backoff)
				}
			} else {
				log.Warn("[%s/%s] topic[%s] partitions change retry#%d waiting: %v", cg.group.Name, cg.shortID(), topic, retries, err)
				backoff--
				time.Sleep(time.Second * backoff)
			}
		}

		// safe to trigger rebalance
		select {
		case topicPartitionsChanged <- topic:
		default:
		}
	}
}
开发者ID:funkygao,项目名称:kafka-cg,代码行数:61,代码来源:consumer_group.go


示例18: LoadConfig

func (this *ConfigLock) LoadConfig(cf *conf.Conf) {
	this.MaxItems = cf.Int("max_items", 1<<20)
	this.Expires = cf.Duration("expires", time.Second*10)

	this.enabled = true

	log.Debug("lock conf: %+v", *this)
}
开发者ID:lucmichalski,项目名称:fae,代码行数:8,代码来源:lock.go


示例19: loadConfig

func loadConfig(cf *conf.Conf) {
	config.etcServers = cf.StringList("etcd_servers", nil)
	config.faeTemplateFile = cf.String("fae_template_file", "")
	config.faeTargetFile = cf.String("fae_target_file", "")
	config.maintainTemplateFile = cf.String("maintain_template_file", "")
	config.maintainTargetFile = cf.String("maintain_target_file", "")

	log.Debug("config: %+v", config)
}
开发者ID:lucmichalski,项目名称:fae,代码行数:9,代码来源:option.go


示例20: Close

func (c *fastListenerConn) Close() error {
	log.Debug("%s closing conn from %s", c.Conn.LocalAddr(), c.Conn.RemoteAddr())

	err := c.Conn.Close()
	if c.gw != nil && !Options.DisableMetrics {
		c.gw.svrMetrics.ConcurrentConns.Dec(1)
	}
	return err
}
开发者ID:funkygao,项目名称:gafka,代码行数:9,代码来源:listener.go



注:本文中的github.com/funkygao/log4go.Debug函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Golang log4go.Error函数代码示例发布时间:2022-05-23
下一篇:
Golang jsconf.Conf类代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap