• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Golang logging.Debugf函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Golang中github.com/couchbase/indexing/secondary/logging.Debugf函数的典型用法代码示例。如果您正苦于以下问题:Golang Debugf函数的具体用法?Golang Debugf怎么用?Golang Debugf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了Debugf函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。

示例1: GetNodeListForBuckets

//
// Get the set of nodes for all the given buckets
//
func (p *ProjectorClientEnvImpl) GetNodeListForBuckets(buckets []string) (map[string]string, error) {

	logging.Debugf("ProjectorCLientEnvImpl::getNodeListForBuckets(): start")

	nodes := make(map[string]string)

	for _, bucket := range buckets {

		bucketRef, err := couchbase.GetBucket(COUCHBASE_INTERNAL_BUCKET_URL, DEFAULT_POOL_NAME, bucket)
		if err != nil {
			return nil, err
		}

		if err := bucketRef.Refresh(); err != nil {
			return nil, err
		}

		for _, node := range bucketRef.NodeAddresses() {
			// TODO: This may not work for cluster_run when all processes are run in the same node.  Need to check.
			logging.Debugf("ProjectorCLientEnvImpl::getNodeListForBuckets(): node=%v for bucket %v", node, bucket)
			nodes[node] = node
		}
	}

	return nodes, nil
}
开发者ID:jchris,项目名称:indexing,代码行数:29,代码来源:stream_admin.go


示例2: validateActiveVb

func (p *ProjectorAdmin) validateActiveVb(buckets []string, activeTimestamps []*protobuf.TsVbuuid) bool {

	for _, bucket := range buckets {
		for vb := 0; vb < NUM_VB; vb++ {
			found := false
			for _, ts := range activeTimestamps {
				if ts.GetBucket() == bucket {
					for _, ts_vb := range ts.GetVbnos() {
						if uint32(vb) == ts_vb {
							if found {
								logging.Debugf("validateActiveVb(): find duplicate active timestamp for bucket %s vb %d", bucket, vb)
								return false
							}
							found = true
						}
					}
				}
			}

			if !found {
				logging.Debugf("validateActiveVb(): Cannot find active timestamp for bucket %s vb %d", bucket, vb)
				return false
			}
		}
	}

	return true
}
开发者ID:jchris,项目名称:indexing,代码行数:28,代码来源:stream_admin.go


示例3: GetClientForNode

//
// Get the projector client for the given node
//
func (p *ProjectorStreamClientFactoryImpl) GetClientForNode(server string) ProjectorStreamClient {

	var projAddr string

	if host, port, err := net.SplitHostPort(server); err == nil {
		if common.IsIPLocal(host) {

			if port == KV_DCP_PORT {
				projAddr = LOCALHOST + ":" + PROJECTOR_PORT

			} else {
				iportProj, _ := strconv.Atoi(PROJECTOR_PORT)
				iportKV, _ := strconv.Atoi(port)
				iportKV0, _ := strconv.Atoi(KV_DCP_PORT_CLUSTER_RUN)

				//In cluster_run, port number increments by 2
				nodeNum := (iportKV - iportKV0) / 2
				p := iportProj + nodeNum
				projAddr = LOCALHOST + ":" + strconv.Itoa(p)
			}
			logging.Debugf("StreamAdmin::GetClientForNode(): Local Projector Addr: %v", projAddr)

		} else {
			projAddr = host + ":" + PROJECTOR_PORT
			logging.Debugf("StreamAdmin::GetClientForNode(): Remote Projector Addr: %v", projAddr)
		}
	}

	//create client for node's projectors
	config := common.SystemConfig.SectionConfig("manager.projectorclient.", true)
	maxvbs := common.SystemConfig["maxVbuckets"].Int()
	ap := projectorC.NewClient(HTTP_PREFIX+projAddr+"/adminport/", maxvbs, config)
	return ap
}
开发者ID:jchris,项目名称:indexing,代码行数:37,代码来源:stream_admin.go


示例4: runProtocol

//
// run server (as leader or follower)
//
func (s *Coordinator) runProtocol(leader string) (err error) {

	host := s.getHostUDPAddr()

	// If this host is the leader, then start the leader server.
	// Otherwise, start the followerCoordinator.
	if leader == host {
		logging.Debugf("Coordinator.runServer() : Local Coordinator %s is elected as leader. Leading ...", leader)
		s.state.setStatus(protocol.LEADING)

		// start other master services if this node is a candidate as master
		s.idxMgr.startMasterService()
		defer s.idxMgr.stopMasterService()

		err = protocol.RunLeaderServer(s.getHostTCPAddr(), s.listener, s, s, s.factory, s.skillch)
	} else {
		logging.Debugf("Coordinator.runServer() : Remote Coordinator %s is elected as leader. Following ...", leader)
		s.state.setStatus(protocol.FOLLOWING)
		leaderAddr := s.findMatchingPeerTCPAddr(leader)
		if len(leaderAddr) == 0 {
			return NewError(ERROR_COOR_ELECTION_FAIL, NORMAL, COORDINATOR, nil,
				fmt.Sprintf("Index Coordinator cannot find matching TCP addr for leader "+leader))
		}
		err = protocol.RunFollowerServer(s.getHostTCPAddr(), leaderAddr, s, s, s.factory, s.skillch)
	}

	return err
}
开发者ID:jchris,项目名称:indexing,代码行数:31,代码来源:coordinator.go


示例5: calcQueueLenFromMemQuota

//Calculate mutation queue length from memory quota
func (m *mutationMgr) calcQueueLenFromMemQuota() uint64 {

	memQuota := m.config["settings.memory_quota"].Uint64()
	maxVbLen := m.config["settings.maxVbQueueLength"].Uint64()
	maxVbLenDef := m.config["settings.maxVbQueueLength"].DefaultVal.(uint64)

	//if there is a user specified value, use that
	if maxVbLen != 0 {
		logging.Debugf("MutationMgr:: Set maxVbQueueLength %v", maxVbLen)
		return maxVbLen
	} else {
		//Formula for calculation(see MB-14876)
		//Below 2GB - 5000 per vbucket
		//2GB to 4GB - 8000 per vbucket
		//Above 4GB - 10000 per vbucket
		if memQuota <= 2*1024*1024*1024 {
			maxVbLen = 5000
		} else if memQuota <= 4*1024*1024*1024 {
			maxVbLen = 8000
		} else {
			maxVbLen = maxVbLenDef
		}
		logging.Debugf("MutationMgr:: Set maxVbQueueLength %v", maxVbLen)
		return maxVbLen
	}

}
开发者ID:prataprc,项目名称:indexing,代码行数:28,代码来源:mutation_manager.go


示例6: handleDeleteInstances

//
// This function computes topology changes.
//
func (s *StreamManager) handleDeleteInstances(
	streamId common.StreamId,
	bucket string,
	oldTopology *IndexTopology,
	newTopology *IndexTopology,
	fromState []common.IndexState,
	toState []common.IndexState) error {

	if oldTopology == nil || oldTopology.Version == newTopology.Version {
		return nil
	}

	var changes []*changeRecord = nil

	for _, newDefn := range newTopology.Definitions {
		if oldTopology != nil {
			oldDefn := oldTopology.FindIndexDefinition(newDefn.Bucket, newDefn.Name)
			changes = append(changes, s.addInstancesToChangeList(oldDefn, &newDefn, fromState, toState)...)
		} else {
			changes = append(changes, s.addInstancesToChangeList(nil, &newDefn, fromState, toState)...)
		}
	}

	var toBeDeleted []uint64 = nil
	for _, change := range changes {
		logging.Debugf("StreamManager.handleDeleteInstances(): adding inst '%v' to change list.", change.instance.InstId)
		toBeDeleted = append(toBeDeleted, change.instance.InstId)
	}

	logging.Debugf("StreamManager.handleDeleteInstances(): len(toBeDeleted) '%v'", len(toBeDeleted))
	return s.removeIndexInstances(streamId, bucket, toBeDeleted)
}
开发者ID:jchris,项目名称:indexing,代码行数:35,代码来源:stream_mgr.go


示例7: run

func (s *Stream) run() {

	logging.Debugf("Stream.run(): starts")

	defer s.receiver.Close()

	for {
		select {
		case mut := <-s.mutch:

			func() {
				defer func() {
					if r := recover(); r != nil {
						logging.Debugf("panic in Stream.run() : error ignored.  Error = %v\n", r)
					}
				}()

				switch d := mut.(type) {
				case ([]*data.VbKeyVersions):
					logging.Debugf("Stream.run(): recieve VbKeyVersion")
					s.handleVbKeyVersions(d)
				case dataport.ConnectionError:
					logging.Debugf("Stream.run(): recieve ConnectionError")
					s.handler.HandleConnectionError(s.id, d)
				}
			}()

		case <-s.stopch:
			logging.Debugf("Stream.run(): stop")
			return
		}
	}
}
开发者ID:jchris,项目名称:indexing,代码行数:33,代码来源:stream.go


示例8: LogProposal

// TODO : what to do if createIndex returns error
func (c *Coordinator) LogProposal(proposal protocol.ProposalMsg) error {

	if c.GetStatus() == protocol.LEADING {
		switch common.OpCode(proposal.GetOpCode()) {
		case OPCODE_ADD_IDX_DEFN:
			success := c.createIndex(proposal.GetKey(), proposal.GetContent())
			logging.Debugf("Coordinator.LogProposal(): (createIndex) success = %s", success)
		case OPCODE_DEL_IDX_DEFN:
			success := c.deleteIndex(proposal.GetKey())
			logging.Debugf("Coordinator.LogProposal(): (deleteIndex) success = %s", success)
		}
	}

	switch common.OpCode(proposal.GetOpCode()) {
	case OPCODE_NOTIFY_TIMESTAMP:
		timestamp, err := unmarshallTimestampSerializable(proposal.GetContent())
		if err == nil {
			c.idxMgr.notifyNewTimestamp(timestamp)
		} else {
			logging.Debugf("Coordinator.LogProposal(): error when unmarshalling timestamp. Ignore timestamp.  Error=%s", err.Error())
		}
	}

	c.updateRequestOnNewProposal(proposal)

	return nil
}
开发者ID:jchris,项目名称:indexing,代码行数:28,代码来源:coordinator.go


示例9: StartStream

//
// Start a stream for listening only.  This will not trigger the mutation source to start
// streaming mutations.   Need to call AddIndexForBucket() or AddIndexForAllBuckets()
// to kick off the mutation source to start streaming the mutations for indexes in bucket(s).
//
func (s *StreamManager) StartStream(streamId common.StreamId) error {

	s.mutex.Lock()
	defer s.mutex.Unlock()

	logging.Debugf("StreamManager.StartStream(): start")

	if s.isClosed {
		return nil
	}

	// Verify if the stream is already open.  Just an no-op.
	if stream, ok := s.streams[streamId]; ok && stream.status {
		logging.Debugf("StreamManager.StartStream(): stream %v already started", streamId)
		return nil
	}

	// Create a new stream.  This will prepare the reciever to be ready for receving mutation.
	port := getPortForStreamId(streamId)
	stream, err := newStream(streamId, port, s.handler)
	if err != nil {
		return err
	}

	err = stream.start()
	if err != nil {
		return err
	}
	logging.Debugf("StreamManager.StartStream(): stream %v started successfully on port %v", streamId, port)

	s.streams[streamId] = stream
	stream.status = true
	return nil
}
开发者ID:jchris,项目名称:indexing,代码行数:39,代码来源:stream_mgr.go


示例10: runElection

//
// run election
//
func (s *Coordinator) runElection() (leader string, err error) {

	host := s.getHostUDPAddr()
	peers := s.getPeerUDPAddr()

	// Create an election site to start leader election.
	logging.Debugf("Coordinator.runElection(): Local Coordinator %s start election", host)
	logging.Debugf("Coordinator.runElection(): Peer in election")
	for _, peer := range peers {
		logging.Debugf("	peer : %s", peer)
	}

	s.site, err = protocol.CreateElectionSite(host, peers, s.factory, s, false)
	if err != nil {
		return "", err
	}

	// blocked until leader is elected. coordinator.Terminate() will unblock this.
	resultCh := s.site.StartElection()
	leader, ok := <-resultCh
	if !ok {
		return "", NewError(ERROR_COOR_ELECTION_FAIL, NORMAL, COORDINATOR, nil,
			fmt.Sprintf("Index Coordinator Election Fails"))
	}

	return leader, nil
}
开发者ID:jchris,项目名称:indexing,代码行数:30,代码来源:coordinator.go


示例11: handleRestoreIndexMetadataRequest

//
// Restore semantic:
// 1) Each index is associated with the <IndexDefnId, IndexerId>.  IndexDefnId is unique for each index defnition,
//    and IndexerId is unique among the index nodes.  Note that IndexDefnId cannot be reused.
// 2) Index defn exists for the given <IndexDefnId, IndexerId> in current repository.  No action will be applied during restore.
// 3) Index defn is deleted or missing in current repository.  Index Defn restored from backup if bucket exists.
//    - Index defn of the same <bucket, name> exists.   It will rename the index to <index name>_restore_<seqNo>
//    - Bucket does not exist.   It will restore an index defn with a non-existent bucket.
//
func (m *requestHandlerContext) handleRestoreIndexMetadataRequest(w http.ResponseWriter, r *http.Request) {

	if !doAuth(r, w, m.clusterUrl) {
		return
	}

	image := m.convertIndexMetadataRequest(r)
	if image == nil {
		send(w, &RestoreResponse{Code: RESP_ERROR, Error: "Unable to process request input"})
		return
	}

	indexerHostMap := make(map[common.IndexerId]string)
	current, err := m.getIndexMetadata(m.mgr.getServiceAddrProvider().(*common.ClusterInfoCache), indexerHostMap)
	if err != nil {
		send(w, &RestoreResponse{Code: RESP_ERROR, Error: "Unable to get the latest index metadata for restore"})
		return
	}

	context := &RestoreContext{idxToRestore: make(map[common.IndexerId][]common.IndexDefn),
		idxToResolve: make(map[common.IndexerId][]common.IndexDefn)}

	// Figure out what index to restore that has the same IndexDefnId
	for _, imeta := range image.Metadata {
		found := false
		for _, cmeta := range current.Metadata {
			if imeta.IndexerId == cmeta.IndexerId {
				m.findIndexToRestoreById(&imeta, &cmeta, context)
				found = true
			}
		}

		if !found {
			logging.Debugf("requestHandler.handleRestoreIndexMetadataRequest(): cannot find matching indexer id %s", imeta.IndexerId)
			for _, idefn := range imeta.IndexDefinitions {
				logging.Debugf("requestHandler.handleRestoreIndexMetadataRequest(): adding index definition (%v,%v) to to-be-resolve list", idefn.Bucket, idefn.Name)
				context.idxToResolve[common.IndexerId(imeta.IndexerId)] =
					append(context.idxToResolve[common.IndexerId(imeta.IndexerId)], idefn)
			}
		}
	}

	// Figure out what index to restore that has the same bucket and name
	for indexerId, idxs := range context.idxToResolve {
		for _, idx := range idxs {
			m.findIndexToRestoreByName(current, idx, indexerId, context)
		}
	}

	// recreate index
	success := m.restoreIndex(current, context, indexerHostMap)

	if success {
		send(w, &RestoreResponse{Code: RESP_SUCCESS})
		return
	}

	send(w, &RestoreResponse{Code: RESP_ERROR, Error: "Unable to restore metadata"})
}
开发者ID:jchris,项目名称:indexing,代码行数:68,代码来源:request_handler.go


示例12: handleDeleteBucket

func (m *LifecycleMgr) handleDeleteBucket(bucket string, content []byte) error {

	result := error(nil)

	if len(content) == 0 {
		return errors.New("invalid argument")
	}

	streamId := common.StreamId(content[0])

	topology, err := m.repo.GetTopologyByBucket(bucket)
	if err == nil {
		/*
			// if there is an error getting the UUID, this means that
			// the node is not able to connect to pool service in order
			// to fetch the bucket UUID.   Return an error and skip.
			uuid, err := m.getBucketUUID(bucket)
			if err != nil {
				logging.Errorf("LifecycleMgr.handleDeleteBucket() : Encounter when connecting to pool service = %v", err)
				return err
			}
		*/

		// At this point, we are able to connect to pool service.  If pool
		// does not contain the bucket, then we delete all index defn in
		// the bucket.  Otherwise, delete index defn that does not have the
		// same bucket UUID.  Note that any other create index request will
		// be blocked while this call is run.
		definitions := make([]IndexDefnDistribution, len(topology.Definitions))
		copy(definitions, topology.Definitions)

		for _, defnRef := range definitions {

			if defn, err := m.repo.GetIndexDefnById(common.IndexDefnId(defnRef.DefnId)); err == nil {

				logging.Debugf("LifecycleMgr.handleDeleteBucket() : index instance: id %v, streamId %v.",
					defn.DefnId, defnRef.Instances[0].StreamId)

				// delete index defn from the bucket if bucket uuid is not specified or
				// index does *not* belong to bucket uuid
				if /* (uuid == common.BUCKET_UUID_NIL || defn.BucketUUID != uuid) && */
				streamId == common.NIL_STREAM || common.StreamId(defnRef.Instances[0].StreamId) == streamId {
					if err := m.DeleteIndex(common.IndexDefnId(defn.DefnId), false); err != nil {
						result = err
					}
				}
			} else {
				logging.Debugf("LifecycleMgr.handleDeleteBucket() : Cannot find index instance %v.  Skip.", defnRef.DefnId)
			}
		}
	} else if err != fdb.RESULT_KEY_NOT_FOUND {
		result = err
	}

	return result
}
开发者ID:jchris,项目名称:indexing,代码行数:56,代码来源:lifecycle.go


示例13: runTimestampKeeper

func (m *IndexManager) runTimestampKeeper() {

	defer logging.Debugf("IndexManager.runTimestampKeeper() : terminate")

	inboundch := m.timer.getOutputChannel()

	persistTimestamp := true // save the first timestamp always
	lastPersistTime := uint64(time.Now().UnixNano())

	timestamps, err := m.repo.GetStabilityTimestamps()
	if err != nil {
		// TODO : Determine timestamp not exist versus forestdb error
		logging.Errorf("IndexManager.runTimestampKeeper() : cannot get stability timestamp from repository. Create a new one.")
		timestamps = createTimestampListSerializable()
	}

	for {
		select {
		case <-m.timekeeperStopCh:
			return

		case timestamp, ok := <-inboundch:

			if !ok {
				return
			}

			gometaC.SafeRun("IndexManager.runTimestampKeeper()",
				func() {
					timestamps.addTimestamp(timestamp)
					persistTimestamp = persistTimestamp ||
						uint64(time.Now().UnixNano())-lastPersistTime > m.timestampPersistInterval
					if persistTimestamp {
						if err := m.repo.SetStabilityTimestamps(timestamps); err != nil {
							logging.Errorf("IndexManager.runTimestampKeeper() : cannot set stability timestamp into repository.")
						} else {
							logging.Debugf("IndexManager.runTimestampKeeper() : saved stability timestamp to repository")
							persistTimestamp = false
							lastPersistTime = uint64(time.Now().UnixNano())
						}
					}

					data, err := marshallTimestampSerializable(timestamp)
					if err != nil {
						logging.Debugf(
							"IndexManager.runTimestampKeeper(): error when marshalling timestamp. Ignore timestamp.  Error=%s",
							err.Error())
					} else {
						m.coordinator.NewRequest(uint32(OPCODE_NOTIFY_TIMESTAMP), "Stability Timestamp", data)
					}
				})
		}
	}
}
开发者ID:jchris,项目名称:indexing,代码行数:54,代码来源:manager.go


示例14: restartStream

//
// Add index instances to a specific projector node
//
func (worker *adminWorker) restartStream(timestamps []*protobuf.TsVbuuid, doneCh chan *adminWorker) {

	defer func() {
		doneCh <- worker
	}()

	logging.Debugf("adminWorker::restartStream(): start")

	// Get projector client for the particular node.  This function does not
	// return an error even if the server is an invalid host name, but subsequent
	// call to client may fail.  Also note that there is no method to close the client
	// (no need to close upon termination).
	client := worker.admin.factory.GetClientForNode(worker.server)
	if client == nil {
		logging.Debugf("adminWorker::restartStream(): no client returns from factory")
		return
	}

	// open the stream for the specific node for the set of <bucket, timestamp>
	topic := getTopicForStreamId(worker.streamId)

	retry := true
	startTime := time.Now().Unix()
	for retry {
		select {
		case <-worker.killch:
			return
		default:
			response, err := client.RestartVbuckets(topic, timestamps)
			if err == nil {
				// no error, it is successful for this node
				worker.activeTimestamps = response.GetActiveTimestamps()
				worker.err = nil
				return
			}

			timestamps, err = worker.shouldRetryRestartVbuckets(timestamps, response, err)
			if err != nil {
				// Either it is a non-recoverable error or an error that cannot be retry by this worker.
				// Terminate this worker.
				worker.activeTimestamps = response.GetActiveTimestamps()
				worker.err = err
				return
			}

			retry = time.Now().Unix()-startTime < MAX_PROJECTOR_RETRY_ELAPSED_TIME
		}
	}

	// When we reach here, it passes the elaspse time that the projector is supposed to response.
	// Projector may die or it can be a network partition, need to return an error since it may
	// require another worker to retry.
	worker.err = NewError4(ERROR_STREAM_PROJECTOR_TIMEOUT, NORMAL, STREAM, "Projector Call timeout after retry.")
}
开发者ID:jchris,项目名称:indexing,代码行数:57,代码来源:stream_admin.go


示例15: repairEndpoint

//
// Repair endpoint for a specific projector node
//
func (worker *adminWorker) repairEndpoint(endpoint string, doneCh chan *adminWorker) {

	defer func() {
		doneCh <- worker
	}()

	logging.Debugf("adminWorker::repairEndpoint(): start")

	// Get projector client for the particular node.  This function does not
	// return an error even if the server is an invalid host name, but subsequent
	// call to client may fail.  Also note that there is no method to close the client
	// (no need to close upon termination).
	client := worker.admin.factory.GetClientForNode(worker.server)
	if client == nil {
		logging.Debugf("adminWorker::repairEndpoints(): no client returns from factory")
		return
	}

	// open the stream for the specific node for the set of <bucket, timestamp>
	topic := getTopicForStreamId(worker.streamId)

	retry := true
	startTime := time.Now().Unix()
	for retry {
		select {
		case <-worker.killch:
			return
		default:

			err := client.RepairEndpoints(topic, []string{endpoint})
			if err == nil {
				// no error, it is successful for this node
				worker.err = nil
				return
			}

			logging.Debugf("adminWorker::repairEndpiont(): Error encountered when calling RepairEndpoint. Error=%v", err.Error())
			if strings.Contains(err.Error(), projectorC.ErrorTopicMissing.Error()) {
				// It is OK if topic is missing
				worker.err = nil
				return
			}

			retry = time.Now().Unix()-startTime < MAX_PROJECTOR_RETRY_ELAPSED_TIME
		}
	}

	// When we reach here, it passes the elaspse time that the projector is supposed to response.
	// Projector may die or it can be a network partition, need to return an error since it may
	// require another worker to retry.
	worker.err = NewError4(ERROR_STREAM_PROJECTOR_TIMEOUT, NORMAL, STREAM, "Projector Call timeout after retry.")
}
开发者ID:jchris,项目名称:indexing,代码行数:55,代码来源:stream_admin.go


示例16: waitForEvent

func (w *watcher) waitForEvent(defnId c.IndexDefnId, status []c.IndexState) error {

	event := &event{defnId: defnId, status: status, notifyCh: make(chan error, 1)}
	if w.registerEvent(event) {
		logging.Debugf("watcher.waitForEvent(): wait event : id %v status %v", event.defnId, event.status)
		err, ok := <-event.notifyCh
		if ok && err != nil {
			logging.Debugf("watcher.waitForEvent(): wait arrives : id %v status %v", event.defnId, event.status)
			return err
		}
	}
	return nil
}
开发者ID:jchris,项目名称:indexing,代码行数:13,代码来源:metadata_provider.go


示例17: deleteIndex

//
// Handle delete Index request in the dictionary.  If this function
// returns true, it means deleteIndex request completes successfully.
// If this function returns false, then the result is unknown.
//
func (c *Coordinator) deleteIndex(key string) bool {

	id, err := indexDefnId(key)
	if err != nil {
		logging.Debugf("Coordinator.deleteIndex() : deleteIndex fails. Reason = %s", err.Error())
		return false
	}

	if err := c.idxMgr.getLifecycleMgr().DeleteIndex(id, true); err != nil {
		logging.Debugf("Coordinator.deleteIndex() : deleteIndex fails. Reason = %s", err.Error())
		return false
	}

	return true
}
开发者ID:jchris,项目名称:indexing,代码行数:20,代码来源:coordinator.go


示例18: convertResponse

func convertResponse(r *http.Response, resp interface{}) string {

	buf := new(bytes.Buffer)
	if _, err := buf.ReadFrom(r.Body); err != nil {
		logging.Debugf("RequestHandler::convertResponse: unable to read request body, err %v", err)
		return RESP_ERROR
	}

	if err := json.Unmarshal(buf.Bytes(), resp); err != nil {
		logging.Debugf("convertResponse: unable to unmarshall response body. Buf = %s, err %v", buf, err)
		return RESP_ERROR
	}

	return RESP_SUCCESS
}
开发者ID:jchris,项目名称:indexing,代码行数:15,代码来源:request_handler.go


示例19: processChange

func (w *watcher) processChange(op uint32, key string, content []byte) error {

	logging.Debugf("watcher.processChange(): key = %v", key)
	defer logging.Debugf("watcher.processChange(): done -> key = %v", key)

	opCode := common.OpCode(op)

	switch opCode {
	case common.OPCODE_ADD, common.OPCODE_SET:
		if isIndexDefnKey(key) {
			if len(content) == 0 {
				logging.Debugf("watcher.processChange(): content of key = %v is empty.", key)
			}

			id, err := extractDefnIdFromKey(key)
			if err != nil {
				return err
			}
			w.addDefnWithNoLock(c.IndexDefnId(id))
			if err := w.provider.repo.unmarshallAndAddDefn(content); err != nil {
				return err
			}
			w.notifyEventNoLock()

		} else if isIndexTopologyKey(key) {
			if len(content) == 0 {
				logging.Debugf("watcher.processChange(): content of key = %v is empty.", key)
			}
			if err := w.provider.repo.unmarshallAndAddInst(content); err != nil {
				return err
			}
			w.notifyEventNoLock()
		}
	case common.OPCODE_DELETE:
		if isIndexDefnKey(key) {

			id, err := extractDefnIdFromKey(key)
			if err != nil {
				return err
			}
			w.removeDefnWithNoLock(c.IndexDefnId(id))
			w.provider.repo.removeDefn(c.IndexDefnId(id))
			w.notifyEventNoLock()
		}
	}

	return nil
}
开发者ID:jchris,项目名称:indexing,代码行数:48,代码来源:metadata_provider.go


示例20: handleAbortPersist

func (m *mutationMgr) handleAbortPersist(cmd Message) {

	logging.Debugf("MutationMgr::handleAbortPersist %v", cmd)

	bucket := cmd.(*MsgMutMgrFlushMutationQueue).GetBucket()
	streamId := cmd.(*MsgMutMgrFlushMutationQueue).GetStreamId()

	go func() {
		m.flock.Lock()
		defer m.flock.Unlock()

		//abort the flush for given stream and bucket, if its in progress
		if bucketStopChMap, ok := m.streamFlusherStopChMap[streamId]; ok {
			if stopch, ok := bucketStopChMap[bucket]; ok {
				if stopch != nil {
					close(stopch)
				}
			}
		}
		m.supvRespch <- &MsgMutMgrFlushDone{mType: MUT_MGR_ABORT_DONE,
			bucket:   bucket,
			streamId: streamId}
	}()

	m.supvCmdch <- &MsgSuccess{}

}
开发者ID:prataprc,项目名称:indexing,代码行数:27,代码来源:mutation_manager.go



注:本文中的github.com/couchbase/indexing/secondary/logging.Debugf函数示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Golang logging.Errorf函数代码示例发布时间:2022-05-23
下一篇:
Golang transport.MCResponse类代码示例发布时间:2022-05-23
热门推荐
热门话题
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap