• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Golang logp.Debug函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Golang中github.com/elastic/beats/libbeat/logp.Debug函数的典型用法代码示例。如果您正苦于以下问题:Golang Debug函数的具体用法?Golang Debug怎么用?Golang Debug使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了Debug函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。

示例1: writeRegistry

// writeRegistry writes the new json registry file to disk.
func (r *Registrar) writeRegistry() error {
	logp.Debug("registrar", "Write registry file: %s", r.registryFile)

	tempfile := r.registryFile + ".new"
	f, err := os.Create(tempfile)
	if err != nil {
		logp.Err("Failed to create tempfile (%s) for writing: %s", tempfile, err)
		return err
	}

	// First clean up states
	states := r.states.GetStates()

	encoder := json.NewEncoder(f)
	err = encoder.Encode(states)
	if err != nil {
		logp.Err("Error when encoding the states: %s", err)
		return err
	}

	// Directly close file because of windows
	f.Close()

	logp.Debug("registrar", "Registry file updated. %d states written.", len(states))
	registryWrites.Add(1)
	statesCurrent.Set(int64(len(states)))

	return file.SafeFileRotate(r.registryFile, tempfile)
}
开发者ID:Zhoutall,项目名称:beats,代码行数:30,代码来源:registrar.go


示例2: harvestExistingFile

// harvestExistingFile continues harvesting a file with a known state if needed
func (p *ProspectorLog) harvestExistingFile(newState file.State, oldState file.State) {

	logp.Debug("prospector", "Update existing file for harvesting: %s, offset: %v", newState.Source, oldState.Offset)

	// TODO: check for ignore_older reached? or should that happen in scan already?

	// No harvester is running for the file, start a new harvester
	// It is important here that only the size is checked and not modification time, as modification time could be incorrect on windows
	// https://blogs.technet.microsoft.com/asiasupp/2010/12/14/file-date-modified-property-are-not-updating-while-modifying-a-file-without-closing-it/
	if oldState.Finished && newState.Fileinfo.Size() > oldState.Offset {
		// Resume harvesting of an old file we've stopped harvesting from
		// This could also be an issue with force_close_older that a new harvester is started after each scan but not needed?
		// One problem with comparing modTime is that it is in seconds, and scans can happen more then once a second
		logp.Debug("prospector", "Resuming harvesting of file: %s, offset: %v", newState.Source, oldState.Offset)
		p.Prospector.startHarvester(newState, oldState.Offset)

	} else if oldState.Source != "" && oldState.Source != newState.Source {
		// This does not start a new harvester as it is assume that the older harvester is still running
		// or no new lines were detected. It sends only an event status update to make sure the new name is persisted.
		logp.Debug("prospector", "File rename was detected, updating state: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset)

		h, _ := p.Prospector.createHarvester(newState)
		h.SetOffset(oldState.Offset)

		// Update state because of file rotation
		h.SendStateUpdate()
	} else {
		// TODO: improve logging depedent on what the exact reason is that harvesting does not continue
		// Nothing to do. Harvester is still running and file was not renamed
		logp.Debug("prospector", "No updates needed, file %s is already harvested.", newState.Source)
	}
}
开发者ID:yhyang1,项目名称:beats,代码行数:33,代码来源:prospector_log.go


示例3: checkNewFile

// Check if harvester for new file has to be started
// For a new file the following options exist:
func (p ProspectorLog) checkNewFile(h *harvester.Harvester) {

	logp.Debug("prospector", "Start harvesting unknown file: %s", h.Path)

	// Call crawler if there if there exists a state for the given file
	offset, resuming := p.Prospector.registrar.fetchState(h.Path, h.Stat.Fileinfo)

	if p.checkOldFile(h) {

		logp.Debug("prospector", "Fetching old state of file to resume: %s", h.Path)

		// Are we resuming a dead file? We have to resume even if dead so we catch any old updates to the file
		// This is safe as the harvester, once it hits the EOF and a timeout, will stop harvesting
		// Once we detect changes again we can resume another harvester again - this keeps number of go routines to a minimum
		if resuming {
			logp.Debug("prospector", "Resuming harvester on a previously harvested file: %s", h.Path)
			p.resumeHarvesting(h, offset)
		} else {
			// Old file, skip it, but push offset of file size so we start from the end if this file changes and needs picking up
			logp.Debug("prospector", "Skipping file (older than ignore older of %v, %v): %s",
				p.config.IgnoreOlderDuration,
				time.Since(h.Stat.Fileinfo.ModTime()),
				h.Path)
			h.Stat.Skip(h.Stat.Fileinfo.Size())
		}
	} else if previousFile, err := p.getPreviousFile(h.Path, h.Stat.Fileinfo); err == nil {
		p.continueExistingFile(h, previousFile)
	} else {
		p.resumeHarvesting(h, offset)
	}
}
开发者ID:jarpy,项目名称:beats,代码行数:33,代码来源:prospector_log.go


示例4: checkExistingFile

// checkExistingFile checks if a harvester has to be started for a already known file
// For existing files the following options exist:
// * Last reading position is 0, no harvester has to be started as old harvester probably still busy
// * The old known modification time is older then the current one. Start at last known position
// * The new file is not the same as the old file, means file was renamed
// ** New file is actually really a new file, start a new harvester
// ** Renamed file has a state, continue there
func (p ProspectorLog) checkExistingFile(h *harvester.Harvester, newFile *input.File, oldFile *input.File) {

	logp.Debug("prospector", "Update existing file for harvesting: %s", h.Path)

	// We assume it is the same file, but it wasn't
	if !oldFile.IsSameFile(newFile) {

		logp.Debug("prospector", "File previously found: %s", h.Path)

		if previousFile, err := p.getPreviousFile(h.Path, h.Stat.Fileinfo); err == nil {
			p.continueExistingFile(h, previousFile)
		} else {
			// File is not the same file we saw previously, it must have rotated and is a new file
			logp.Debug("prospector", "Launching harvester on rotated file: %s", h.Path)

			// Forget about the previous harvester and let it continue on the old file - so start a new channel to use with the new harvester
			h.Stat.Ignore()

			// Start a new harvester on the path
			h.Start()
		}

		// Keep the old file in missingFiles so we don't rescan it if it was renamed and we've not yet reached the new filename
		// We only need to keep it for the remainder of this iteration then we can assume it was deleted and forget about it
		p.missingFiles[h.Path] = oldFile.FileInfo

	} else if h.Stat.Finished() && oldFile.FileInfo.ModTime() != h.Stat.Fileinfo.ModTime() {
		// Resume harvesting of an old file we've stopped harvesting from
		// Start a harvester on the path; a file was just modified and it doesn't have a harvester
		// The offset to continue from will be stored in the harvester channel - so take that to use and also clear the channel
		p.resumeHarvesting(h, <-h.Stat.Return)
	} else {
		logp.Debug("prospector", "Not harvesting, file didn't change: %s", h.Path)
	}
}
开发者ID:jarpy,项目名称:beats,代码行数:42,代码来源:prospector_log.go


示例5: FindProcessesTuple

func (proc *ProcessesWatcher) FindProcessesTuple(tuple *common.IPPortTuple) (procTuple *common.CmdlineTuple) {
	procTuple = &common.CmdlineTuple{}

	if !proc.readFromProc {
		return
	}

	if proc.isLocalIP(tuple.SrcIP) {
		logp.Debug("procs", "Looking for port %d", tuple.SrcPort)
		procTuple.Src = []byte(proc.findProc(tuple.SrcPort))
		if len(procTuple.Src) > 0 {
			logp.Debug("procs", "Found device %s for port %d", procTuple.Src, tuple.SrcPort)
		}
	}

	if proc.isLocalIP(tuple.DstIP) {
		logp.Debug("procs", "Looking for port %d", tuple.DstPort)
		procTuple.Dst = []byte(proc.findProc(tuple.DstPort))
		if len(procTuple.Dst) > 0 {
			logp.Debug("procs", "Found device %s for port %d", procTuple.Dst, tuple.DstPort)
		}
	}

	return
}
开发者ID:ruflin,项目名称:beats,代码行数:25,代码来源:procs.go


示例6: Run

// Run runs the spooler
// It heartbeats periodically. If the last flush was longer than
// 'IdleTimeoutDuration' time ago, then we'll force a flush to prevent us from
// holding on to spooled events for too long.
func (s *Spooler) Run() {

	config := &s.Filebeat.FbConfig.Filebeat

	// Sets up ticket channel
	ticker := time.NewTicker(config.IdleTimeoutDuration / 2)

	s.spool = make([]*input.FileEvent, 0, config.SpoolSize)

	logp.Info("Starting spooler: spool_size: %v; idle_timeout: %s", config.SpoolSize, config.IdleTimeoutDuration)

	// Loops until running is set to false
	for {
		select {

		case <-s.exit:
			break
		case event := <-s.Channel:
			s.spool = append(s.spool, event)

			// Spooler is full -> flush
			if len(s.spool) == cap(s.spool) {
				logp.Debug("spooler", "Flushing spooler because spooler full. Events flushed: %v", len(s.spool))
				s.flush()
			}
		case <-ticker.C:
			// Flush periodically
			if time.Now().After(s.nextFlushTime) {
				logp.Debug("spooler", "Flushing spooler because of timeout. Events flushed: %v", len(s.spool))
				s.flush()
			}
		}
	}
}
开发者ID:QunarBlackOps,项目名称:beats,代码行数:38,代码来源:spooler.go


示例7: Config

func (fs *FileSizeBeat) Config(b *beat.Beat) error {
	err := cfgfile.Read(&fs.config, "")
	if err != nil {
		logp.Err("Error reading configuration file: %v", err)
		return err
	}

	if fs.config.Input.Period != nil {
		fs.period = time.Duration(*fs.config.Input.Period) * time.Second
	} else {
		fs.period = 10 * time.Second
	}
	logp.Debug("filesizebeat", "Period %v\n", fs.period)

	if fs.config.Input.Paths != nil {
		//fs.paths = make([]Path, len(*fs.config.Input.Paths))
		for _, path := range *fs.config.Input.Paths {
			err := fs.AddPath(path)
			if err != nil {
				logp.Critical("Error: %v", err)
				os.Exit(1)
			}
		}
		logp.Debug("filesizebeat", "Paths : %v\n", fs.paths)
	} else {
		logp.Critical("Error: no paths specified, cannot continue!")
		os.Exit(1)
	}
	return nil
}
开发者ID:RemiDesgrange,项目名称:FileSizeBeat,代码行数:30,代码来源:filesizebeat.go


示例8: harvestExistingFile

// harvestExistingFile continues harvesting a file with a known state if needed
func (p *ProspectorLog) harvestExistingFile(newState input.FileState, oldState input.FileState) {

	logp.Debug("prospector", "Update existing file for harvesting: %s, offset: %v", newState.Source, oldState.Offset)

	// No harvester is running for the file, start a new harvester
	if oldState.Finished {
		// TODO: should we check if modtime old / new is the same -> no harvester has to be started -> prevent duplicates?
		// Resume harvesting of an old file we've stopped harvesting from
		// This could also be an issue with force_close_older that a new harvester is started after each scan but not needed?
		// One problem with comparing modTime is that it is in seconds, and scans can happen more then once a second
		logp.Debug("prospector", "Resuming harvesting of file: %s, offset: %v", newState.Source, oldState.Offset)
		p.Prospector.startHarvester(newState, oldState.Offset)

	} else if oldState.Source != "" && oldState.Source != newState.Source {
		logp.Debug("prospector", "File rename was detected, updating state: %s -> %s, Current offset: %v", oldState.Source, newState.Source, oldState.Offset)

		h, _ := p.Prospector.createHarvester(newState)
		h.SetOffset(oldState.Offset)
		// Update state because of file rotation
		h.SendStateUpdate()
	} else {
		// TODO: It could be that a harvester is still harvesting the file?
		logp.Debug("prospector", "Not harvesting, file didn't change: %s", newState.Source)
	}
}
开发者ID:mrkschan,项目名称:beats,代码行数:26,代码来源:prospector_log.go


示例9: Run

func (p *ProspectorLog) Run() {
	logp.Debug("prospector", "Start next scan")

	p.scan()

	// It is important that a first scan is run before cleanup to make sure all new states are read first
	if p.config.CleanInactive > 0 || p.config.CleanRemoved {
		beforeCount := p.Prospector.states.Count()
		p.Prospector.states.Cleanup()
		logp.Debug("prospector", "Prospector states cleaned up. Before: %d, After: %d", beforeCount, p.Prospector.states.Count())
	}

	// Cleanup of removed files will only happen after next scan. Otherwise it can happen that not all states
	// were updated before cleanup is called
	if p.config.CleanRemoved {
		for _, state := range p.Prospector.states.GetStates() {
			// os.Stat will return an error in case the file does not exist
			_, err := os.Stat(state.Source)
			if err != nil {
				state.TTL = 0
				event := input.NewEvent(state)
				p.Prospector.harvesterChan <- event
				logp.Debug("prospector", "Remove state for file as file removed: %s", state.Source)
			}
		}
	}
}
开发者ID:ChongFeng,项目名称:beats,代码行数:27,代码来源:prospector_log.go


示例10: Config

func (d *Dockerbeat) Config(b *beat.Beat) error {

	err := cfgfile.Read(&d.TbConfig, "")
	if err != nil {
		logp.Err("Error reading configuration file: %v", err)
		return err
	}

	//init the period
	if d.TbConfig.Input.Period != nil {
		d.period = time.Duration(*d.TbConfig.Input.Period) * time.Second
	} else {
		d.period = 1 * time.Second
	}
	//init the socket
	if d.TbConfig.Input.Socket != nil {
		d.socket = *d.TbConfig.Input.Socket
	} else {
		d.socket = "unix:///var/run/docker.sock" // default docker socket location
	}

	logp.Debug("dockerbeat", "Init dockerbeat")
	logp.Debug("dockerbeat", "Follow docker socket %q\n", d.socket)
	logp.Debug("dockerbeat", "Period %v\n", d.period)

	return nil
}
开发者ID:hmalphettes,项目名称:dockerbeat,代码行数:27,代码来源:dockerbeat.go


示例11: GapInStream

func (dns *Dns) GapInStream(tcpTuple *common.TcpTuple, dir uint8, nbytes int, private protos.ProtocolData) (priv protos.ProtocolData, drop bool) {
	if private == nil {
		return private, true
	}
	conn, ok := private.(*dnsConnectionData)
	if !ok {
		return private, false
	}
	stream := conn.Data[dir]

	if stream == nil || stream.message == nil {
		return private, false
	}

	decodedData, err := stream.handleTcpRawData()

	if err == nil {
		dns.messageComplete(conn, tcpTuple, dir, decodedData)
		return private, true
	}

	if dir == tcp.TcpDirectionReverse {
		dns.publishResponseError(conn, err)
	}

	logp.Debug("dns", "%s addresses %s, length %d", err.Error(),
		tcpTuple.String(), len(stream.rawData))
	logp.Debug("dns", "Dropping the stream %s", tcpTuple.String())

	// drop the stream because it is binary Data and it would be unexpected to have a decodable message later
	return private, true
}
开发者ID:jarpy,项目名称:beats,代码行数:32,代码来源:dns_tcp.go


示例12: Start

func (c *Crawler) Start(states file.States) error {

	logp.Info("Loading Prospectors: %v", len(c.prospectorConfigs))

	// Prospect the globs/paths given on the command line and launch harvesters
	for _, prospectorConfig := range c.prospectorConfigs {

		prospector, err := prospector.NewProspector(prospectorConfig, states, c.spooler.Channel)
		if err != nil {
			return fmt.Errorf("Error in initing prospector: %s", err)
		}
		c.prospectors = append(c.prospectors, prospector)
	}

	logp.Info("Loading Prospectors completed. Number of prospectors: %v", len(c.prospectors))

	for i, p := range c.prospectors {
		c.wg.Add(1)

		go func(id int, prospector *prospector.Prospector) {
			defer func() {
				c.wg.Done()
				logp.Debug("crawler", "Prospector %v stopped", id)
			}()
			logp.Debug("crawler", "Starting prospector %v", id)
			prospector.Run()
		}(i, p)
	}

	logp.Info("All prospectors are initialised and running with %d states to persist", states.Count())

	return nil
}
开发者ID:ChongFeng,项目名称:beats,代码行数:33,代码来源:crawler.go


示例13: initFileOffset

func (h *Harvester) initFileOffset(file *os.File) error {
	offset, err := file.Seek(0, os.SEEK_CUR)

	if h.getOffset() > 0 {
		// continue from last known offset

		logp.Debug("harvester",
			"harvest: %q position:%d (offset snapshot:%d)", h.Path, h.getOffset(), offset)
		_, err = file.Seek(h.getOffset(), os.SEEK_SET)
	} else if h.Config.TailFiles {
		// tail file if file is new and tail_files config is set

		logp.Debug("harvester",
			"harvest: (tailing) %q (offset snapshot:%d)", h.Path, offset)
		offset, err = file.Seek(0, os.SEEK_END)
		h.SetOffset(offset)

	} else {
		// get offset from file in case of encoding factory was
		// required to read some data.
		logp.Debug("harvester", "harvest: %q (offset snapshot:%d)", h.Path, offset)
		h.SetOffset(offset)
	}

	return err
}
开发者ID:yhyang1,项目名称:beats,代码行数:26,代码来源:log.go


示例14: FindSocketsOfPid

func FindSocketsOfPid(prefix string, pid int) (inodes []int64, err error) {

	dirname := filepath.Join(prefix, "/proc", strconv.Itoa(pid), "fd")
	procfs, err := os.Open(dirname)
	if err != nil {
		return []int64{}, fmt.Errorf("Open: %s", err)
	}
	defer procfs.Close()
	names, err := procfs.Readdirnames(0)
	if err != nil {
		return []int64{}, fmt.Errorf("Readdirnames: %s", err)
	}

	for _, name := range names {
		link, err := os.Readlink(filepath.Join(dirname, name))
		if err != nil {
			logp.Debug("procs", "Readlink %s: %s", name, err)
			continue
		}

		if strings.HasPrefix(link, "socket:[") {
			inode, err := strconv.ParseInt(link[8:len(link)-1], 10, 64)
			if err != nil {
				logp.Debug("procs", "ParseInt: %s:", err)
				continue
			}

			inodes = append(inodes, int64(inode))
		}
	}

	return inodes, nil
}
开发者ID:ChongFeng,项目名称:beats,代码行数:33,代码来源:procs.go


示例15: isSpecialPgsqlCommand

func isSpecialPgsqlCommand(data []byte) (bool, int) {

	if len(data) < 8 {
		// 8 bytes required
		return false, 0
	}

	// read length
	length := int(common.Bytes_Ntohl(data[0:4]))

	// read command identifier
	code := int(common.Bytes_Ntohl(data[4:8]))

	if length == 16 && code == 80877102 {
		// Cancel Request
		logp.Debug("pgsqldetailed", "Cancel Request, length=%d", length)
		return true, CancelRequest
	} else if length == 8 && code == 80877103 {
		// SSL Request
		logp.Debug("pgsqldetailed", "SSL Request, length=%d", length)
		return true, SSLRequest
	} else if code == 196608 {
		// Startup Message
		logp.Debug("pgsqldetailed", "Startup Message, length=%d", length)
		return true, StartupMessage
	}
	return false, 0
}
开发者ID:davidsoloman,项目名称:beats,代码行数:28,代码来源:pgsql.go


示例16: convertReplyToMap

// convertReplyToMap converts a bulk string reply from Redis to a map
func convertReplyToMap(s string) (map[string]string, error) {
	var info map[string]string
	info = make(map[string]string)

	// Regex for INFO type property
	infoRegex := `^\s*#\s*\w+\s*$`
	r, err := regexp.Compile(infoRegex)
	if err != nil {
		return nil, errors.New("Regex failed to compile")
	}

	// http://redis.io/topics/protocol#bulk-string-reply
	a := strings.Split(s, "\r\n")

	for _, v := range a {
		if r.MatchString(v) || v == "" {
			logp.Debug("redisbeat", "Skipping reply string - \"%v\"", v)
			continue
		}
		entry := strings.Split(v, ":")
		logp.Debug("redisbeat", "Entry: %#v\n", entry)
		info[entry[0]] = entry[1]
	}
	return info, nil
}
开发者ID:chrsblck,项目名称:redisbeat,代码行数:26,代码来源:redisbeat.go


示例17: FindProcessesTuple

func (proc *ProcessesWatcher) FindProcessesTuple(tuple *common.IpPortTuple) (proc_tuple *common.CmdlineTuple) {
	proc_tuple = &common.CmdlineTuple{}

	if !proc.ReadFromProc {
		return
	}

	if proc.IsLocalIp(tuple.Src_ip) {
		logp.Debug("procs", "Looking for port %d", tuple.Src_port)
		proc_tuple.Src = []byte(proc.FindProc(tuple.Src_port))
		if len(proc_tuple.Src) > 0 {
			logp.Debug("procs", "Found device %s for port %d", proc_tuple.Src, tuple.Src_port)
		}
	}

	if proc.IsLocalIp(tuple.Dst_ip) {
		logp.Debug("procs", "Looking for port %d", tuple.Dst_port)
		proc_tuple.Dst = []byte(proc.FindProc(tuple.Dst_port))
		if len(proc_tuple.Dst) > 0 {
			logp.Debug("procs", "Found device %s for port %d", proc_tuple.Dst, tuple.Dst_port)
		}
	}

	return
}
开发者ID:ChongFeng,项目名称:beats,代码行数:25,代码来源:procs.go


示例18: scan

// Scan starts a scanGlob for each provided path/glob
func (p *ProspectorLog) scan() {

	for path, info := range p.getFiles() {

		logp.Debug("prospector", "Check file for harvesting: %s", path)

		// Create new state for comparison
		newState := file.NewState(info, path)

		// Load last state
		lastState := p.Prospector.states.FindPrevious(newState)

		// Ignores all files which fall under ignore_older
		if p.isIgnoreOlder(newState) {
			err := p.handleIgnoreOlder(lastState, newState)
			if err != nil {
				logp.Err("Updating ignore_older state error: %s", err)
			}
			continue
		}

		// Decides if previous state exists
		if lastState.IsEmpty() {
			logp.Debug("prospector", "Start harvester for new file: %s", newState.Source)
			err := p.Prospector.startHarvester(newState, 0)
			if err != nil {
				logp.Err("Harvester could not be started on new file: %s, Err: %s", newState.Source, err)
			}
		} else {
			p.harvestExistingFile(newState, lastState)
		}
	}
}
开发者ID:andrewkroh,项目名称:beats,代码行数:34,代码来源:prospector_log.go


示例19: Run

func (p *ProspectorLog) Run() {
	logp.Debug("prospector", "Start next scan")

	p.scan()

	// It is important that a first scan is run before cleanup to make sure all new states are read first
	if p.config.CleanInactive > 0 || p.config.CleanRemoved {
		beforeCount := p.Prospector.states.Count()
		cleanedStates := p.Prospector.states.Cleanup()
		logp.Debug("prospector", "Prospector states cleaned up. Before: %d, After: %d", beforeCount, beforeCount-cleanedStates)
	}

	// Marking removed files to be cleaned up. Cleanup happens after next scan to make sure all states are updated first
	if p.config.CleanRemoved {
		for _, state := range p.Prospector.states.GetStates() {
			// os.Stat will return an error in case the file does not exist
			_, err := os.Stat(state.Source)
			if err != nil {
				// Only clean up files where state is Finished
				if state.Finished {
					state.TTL = 0
					err := p.Prospector.updateState(input.NewEvent(state))
					if err != nil {
						logp.Err("File cleanup state update error: %s", err)
					}
					logp.Debug("prospector", "Remove state for file as file removed: %s", state.Source)
				} else {
					logp.Debug("prospector", "State for file not removed because not finished: %s", state.Source)
				}
			}
		}
	}
}
开发者ID:YaSuenag,项目名称:hsbeat,代码行数:33,代码来源:prospector_log.go


示例20: receivedReply

func (thrift *thriftPlugin) receivedReply(msg *thriftMessage) {

	// we need to search the request first.
	tuple := msg.tcpTuple

	trans := thrift.getTransaction(tuple.Hashable())
	if trans == nil {
		logp.Debug("thrift", "Response from unknown transaction. Ignoring: %v", tuple)
		unmatchedResponses.Add(1)
		return
	}

	if trans.request.method != msg.method {
		logp.Debug("thrift", "Response from another request received '%s' '%s'"+
			". Ignoring.", trans.request.method, msg.method)
		unmatchedResponses.Add(1)
		return
	}

	trans.reply = msg
	trans.bytesOut = uint64(msg.frameSize)

	trans.responseTime = int32(msg.ts.Sub(trans.ts).Nanoseconds() / 1e6) // resp_time in milliseconds

	thrift.publishQueue <- trans
	thrift.transactions.Delete(tuple.Hashable())

	logp.Debug("thrift", "Transaction queued")
}
开发者ID:ruflin,项目名称:beats,代码行数:29,代码来源:thrift.go



注:本文中的github.com/elastic/beats/libbeat/logp.Debug函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Golang logp.Err函数代码示例发布时间:2022-05-23
下一篇:
Golang logp.Critical函数代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap