本文整理汇总了Golang中github.com/opentracing/opentracing-go.ContextWithSpan函数的典型用法代码示例。如果您正苦于以下问题:Golang ContextWithSpan函数的具体用法?Golang ContextWithSpan怎么用?Golang ContextWithSpan使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ContextWithSpan函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Golang代码示例。
示例1: ForkCtxSpan
// ForkCtxSpan checks if ctx has a Span open; if it does, it creates a new Span
// that follows from the original Span. This allows the resulting context to be
// used in an async task that might outlive the original operation.
//
// Returns the new context and the new span (if any). The span should be
// closed via FinishSpan.
func ForkCtxSpan(ctx context.Context, opName string) (context.Context, opentracing.Span) {
if span := opentracing.SpanFromContext(ctx); span != nil {
if span.BaggageItem(Snowball) == "1" {
// If we are doing snowball tracing, the span might outlive the snowball
// tracer (calling the record function when it is no longer legal to do
// so). Return a context with no span in this case.
return opentracing.ContextWithSpan(ctx, nil), nil
}
tr := span.Tracer()
newSpan := tr.StartSpan(opName, opentracing.FollowsFrom(span.Context()))
return opentracing.ContextWithSpan(ctx, newSpan), newSpan
}
return ctx, nil
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:20,代码来源:tracer.go
示例2: setupFlow
func (ds *ServerImpl) setupFlow(
ctx context.Context, req *SetupFlowRequest, simpleFlowConsumer RowReceiver,
) (*Flow, error) {
sp, err := tracing.JoinOrNew(ds.AmbientContext.Tracer, req.TraceContext, "flow")
if err != nil {
return nil, err
}
ctx = opentracing.ContextWithSpan(ctx, sp)
txn := ds.setupTxn(ctx, &req.Txn)
flowCtx := FlowCtx{
Context: ctx,
id: req.Flow.FlowID,
evalCtx: &ds.evalCtx,
rpcCtx: ds.RPCContext,
txn: txn,
}
f := newFlow(flowCtx, ds.flowRegistry, simpleFlowConsumer)
if err := f.setupFlow(&req.Flow); err != nil {
log.Error(ctx, err)
sp.Finish()
return nil, err
}
return f, nil
}
开发者ID:knz,项目名称:cockroach,代码行数:26,代码来源:server.go
示例3: setupFlow
func (ds *ServerImpl) setupFlow(
ctx context.Context, req *SetupFlowRequest, syncFlowConsumer RowReceiver,
) (*Flow, error) {
sp, err := tracing.JoinOrNew(ds.AmbientContext.Tracer, req.TraceContext, "flow")
if err != nil {
return nil, err
}
ctx = opentracing.ContextWithSpan(ctx, sp)
// TODO(radu): we should sanity check some of these fields (especially
// txnProto).
flowCtx := FlowCtx{
Context: ctx,
id: req.Flow.FlowID,
evalCtx: &ds.evalCtx,
rpcCtx: ds.RPCContext,
txnProto: &req.Txn,
clientDB: ds.DB,
}
f := newFlow(flowCtx, ds.flowRegistry, syncFlowConsumer)
if err := f.setupFlow(&req.Flow); err != nil {
log.Error(ctx, err)
sp.Finish()
return nil, err
}
return f, nil
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:28,代码来源:server.go
示例4: TestTrace
func TestTrace(t *testing.T) {
ctx := context.Background()
// Events to context without a trace should be no-ops.
Event(ctx, "should-not-show-up")
var ev events
tracer := testingTracer(&ev)
sp := tracer.StartSpan("s")
ctxWithSpan := opentracing.ContextWithSpan(ctx, sp)
Event(ctxWithSpan, "test1")
ErrEvent(ctxWithSpan, "testerr")
VEvent(ctxWithSpan, logging.verbosity.get()+1, "test2")
Info(ctxWithSpan, "log")
// Events to parent context should still be no-ops.
Event(ctx, "should-not-show-up")
sp.Finish()
expected := events{"s:start", "s:test1", "s:testerr", "s:test2", "s:log", "s:finish"}
if !compareTraces(expected, ev) {
t.Errorf("expected events '%s', got '%v'", expected, ev)
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:26,代码来源:trace_test.go
示例5: EnsureContext
// EnsureContext checks whether the given context.Context contains a Span. If
// not, it creates one using the provided Tracer and wraps it in the returned
// Span. The returned closure must be called after the request has been fully
// processed.
func EnsureContext(ctx context.Context, tracer opentracing.Tracer) (context.Context, func()) {
_, _, funcName := caller.Lookup(1)
if opentracing.SpanFromContext(ctx) == nil {
sp := tracer.StartSpan(funcName)
return opentracing.ContextWithSpan(ctx, sp), sp.Finish
}
return ctx, func() {}
}
开发者ID:bogdanbatog,项目名称:cockroach,代码行数:12,代码来源:tracer.go
示例6: ChildSpan
// ChildSpan opens a span as a child of the current span in the context (if
// there is one).
//
// Returns the new context and the new span (if any). The span should be
// closed via FinishSpan.
func ChildSpan(ctx context.Context, opName string) (context.Context, opentracing.Span) {
span := opentracing.SpanFromContext(ctx)
if span == nil {
return ctx, nil
}
newSpan := span.Tracer().StartSpan(opName, opentracing.ChildOf(span.Context()))
return opentracing.ContextWithSpan(ctx, newSpan), newSpan
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:13,代码来源:tracer.go
示例7: heartbeatLoop
// heartbeatLoop periodically sends a HeartbeatTxn RPC to an extant transaction,
// stopping in the event the transaction is aborted or committed after
// attempting to resolve the intents. When the heartbeat stops, the transaction
// is unregistered from the coordinator.
//
// TODO(dan): The Context we use for this is currently the one from the first
// request in a Txn, but the semantics of this aren't good. Each context has its
// own associated lifetime and we're ignoring all but the first. It happens now
// that we pass the same one in every request, but it's brittle to rely on this
// forever.
func (tc *TxnCoordSender) heartbeatLoop(ctx context.Context, txnID uuid.UUID) {
var tickChan <-chan time.Time
{
ticker := time.NewTicker(tc.heartbeatInterval)
tickChan = ticker.C
defer ticker.Stop()
}
defer func() {
tc.Lock()
duration, restarts, status := tc.unregisterTxnLocked(txnID)
tc.Unlock()
tc.updateStats(duration, int64(restarts), status, false)
}()
var closer <-chan struct{}
// TODO(tschottdorf): this should join to the trace of the request
// which starts this goroutine.
sp := tc.tracer.StartSpan(opHeartbeatLoop)
defer sp.Finish()
ctx = opentracing.ContextWithSpan(ctx, sp)
{
tc.Lock()
txnMeta := tc.txns[txnID] // do not leak to outer scope
closer = txnMeta.txnEnd
tc.Unlock()
}
if closer == nil {
// Avoid race in which a Txn is cleaned up before the heartbeat
// goroutine gets a chance to start.
return
}
// Loop with ticker for periodic heartbeats.
for {
select {
case <-tickChan:
if !tc.heartbeat(ctx, txnID) {
return
}
case <-closer:
// Transaction finished normally.
return
case <-ctx.Done():
// Note that if ctx is not cancellable, then ctx.Done() returns a nil
// channel, which blocks forever. In this case, the heartbeat loop is
// responsible for timing out transactions. If ctx.Done() is not nil, then
// then heartbeat loop ignores the timeout check and this case is
// responsible for client timeouts.
tc.clientHasAbandoned(txnID)
return
case <-tc.stopper.ShouldDrain():
return
}
}
}
开发者ID:petermattis,项目名称:cockroach,代码行数:65,代码来源:txn_coord_sender.go
示例8: executeCmd
// executeCmd interprets the given message as a *roachpb.BatchRequest and sends it
// via the local sender.
func (n *Node) executeCmd(argsI proto.Message) (proto.Message, error) {
ba := argsI.(*roachpb.BatchRequest)
var br *roachpb.BatchResponse
opName := "node " + strconv.Itoa(int(n.Descriptor.NodeID)) // could save allocs here
fail := func(err error) {
br = &roachpb.BatchResponse{}
br.Error = roachpb.NewError(err)
}
f := func() {
sp, err := tracing.JoinOrNew(n.ctx.Tracer, ba.Trace, opName)
if err != nil {
fail(err)
return
}
// If this is a snowball span, it gets special treatment: It skips the
// regular tracing machinery, and we instead send the collected spans
// back with the response. This is more expensive, but then again,
// those are individual requests traced by users, so they can be.
if sp.BaggageItem(tracing.Snowball) != "" {
if sp, err = tracing.JoinOrNewSnowball(opName, ba.Trace, func(rawSpan basictracer.RawSpan) {
encSp, err := tracing.EncodeRawSpan(&rawSpan, nil)
if err != nil {
log.Warning(err)
}
br.CollectedSpans = append(br.CollectedSpans, encSp)
}); err != nil {
fail(err)
return
}
}
defer sp.Finish()
ctx := opentracing.ContextWithSpan((*Node)(n).context(), sp)
tStart := time.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(ctx, *ba)
if pErr != nil {
br = &roachpb.BatchResponse{}
sp.LogEvent(fmt.Sprintf("error: %T", pErr.GetDetail()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.metrics.callComplete(time.Now().Sub(tStart), pErr)
br.Error = pErr
}
if !n.stopper.RunTask(f) {
return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID)
}
return br, nil
}
开发者ID:liugangnhm,项目名称:cockroach,代码行数:56,代码来源:node.go
示例9: Send
func (ts *txnSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Send call through wrapped sender.
ba.Txn = &ts.Proto
if ts.UserPriority > 0 {
ba.UserPriority = ts.UserPriority
}
ctx = opentracing.ContextWithSpan(ctx, ts.Trace)
ba.SetNewRequest()
br, pErr := ts.wrapped.Send(ctx, ba)
if br != nil && br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ts.wrapped, br))
}
if br != nil {
for _, encSp := range br.CollectedSpans {
var newSp basictracer.RawSpan
if err := tracing.DecodeRawSpan(encSp, &newSp); err != nil {
return nil, roachpb.NewError(err)
}
ts.CollectedSpans = append(ts.CollectedSpans, newSp)
}
}
// Only successful requests can carry an updated Txn in their response
// header. Any error (e.g. a restart) can have a Txn attached to them as
// well; those update our local state in the same way for the next attempt.
// The exception is if our transaction was aborted and needs to restart
// from scratch, in which case we do just that.
if pErr == nil {
ts.Proto.Update(br.Txn)
return br, nil
} else if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); ok {
// On Abort, reset the transaction so we start anew on restart.
ts.Proto = roachpb.Transaction{
TxnMeta: roachpb.TxnMeta{
Isolation: ts.Proto.Isolation,
},
Name: ts.Proto.Name,
}
// Acts as a minimum priority on restart.
if pErr.GetTxn() != nil {
ts.Proto.Priority = pErr.GetTxn().Priority
}
} else if pErr.TransactionRestart != roachpb.TransactionRestart_ABORT {
ts.Proto.Update(pErr.GetTxn())
}
return nil, pErr
}
开发者ID:cuongdo,项目名称:cockroach,代码行数:49,代码来源:txn.go
示例10: heartbeatLoop
// heartbeatLoop periodically sends a HeartbeatTxn RPC to an extant
// transaction, stopping in the event the transaction is aborted or
// committed after attempting to resolve the intents. When the
// heartbeat stops, the transaction is unregistered from the
// coordinator,
func (tc *TxnCoordSender) heartbeatLoop(txnID uuid.UUID) {
var tickChan <-chan time.Time
{
ticker := time.NewTicker(tc.heartbeatInterval)
tickChan = ticker.C
defer ticker.Stop()
}
defer func() {
tc.Lock()
duration, restarts, status := tc.unregisterTxnLocked(txnID)
tc.Unlock()
tc.updateStats(duration, int64(restarts), status)
}()
var closer <-chan struct{}
var sp opentracing.Span
{
tc.Lock()
txnMeta := tc.txns[txnID] // do not leak to outer scope
closer = txnMeta.txnEnd
// TODO(tschottdorf): this should join to the trace of the request
// which starts this goroutine.
sp = tc.tracer.StartSpan(opHeartbeatLoop)
defer sp.Finish()
tc.Unlock()
}
if closer == nil {
// Avoid race in which a Txn is cleaned up before the heartbeat
// goroutine gets a chance to start.
return
}
ctx := opentracing.ContextWithSpan(context.Background(), sp)
// Loop with ticker for periodic heartbeats.
for {
select {
case <-tickChan:
if !tc.heartbeat(txnID, sp, ctx) {
return
}
case <-closer:
// Transaction finished normally.
return
case <-tc.stopper.ShouldDrain():
return
}
}
}
开发者ID:cuongdo,项目名称:cockroach,代码行数:53,代码来源:txn_coord_sender.go
示例11: InitSenderForLocalTestCluster
// InitSenderForLocalTestCluster initializes a TxnCoordSender that can be used
// with LocalTestCluster.
func InitSenderForLocalTestCluster(
nodeDesc *roachpb.NodeDescriptor,
tracer opentracing.Tracer,
clock *hlc.Clock,
latency time.Duration,
stores client.Sender,
stopper *stop.Stopper,
gossip *gossip.Gossip,
) client.Sender {
var rpcSend rpcSendFn = func(_ SendOptions, _ ReplicaSlice,
args roachpb.BatchRequest, _ *rpc.Context) (*roachpb.BatchResponse, error) {
if latency > 0 {
time.Sleep(latency)
}
sp := tracer.StartSpan("node")
defer sp.Finish()
ctx := opentracing.ContextWithSpan(context.Background(), sp)
log.Trace(ctx, args.String())
br, pErr := stores.Send(ctx, args)
if br == nil {
br = &roachpb.BatchResponse{}
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(stores, br))
}
br.Error = pErr
if pErr != nil {
log.Trace(ctx, "error: "+pErr.String())
}
return br, nil
}
retryOpts := GetDefaultDistSenderRetryOptions()
retryOpts.Closer = stopper.ShouldDrain()
distSender := NewDistSender(&DistSenderContext{
Clock: clock,
RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
RangeLookupMaxRanges: defaultRangeLookupMaxRanges,
LeaderCacheSize: defaultLeaderCacheSize,
RPCRetryOptions: &retryOpts,
nodeDescriptor: nodeDesc,
RPCSend: rpcSend, // defined above
RangeDescriptorDB: stores.(RangeDescriptorDB), // for descriptor lookup
}, gossip)
return NewTxnCoordSender(distSender, clock, false /* !linearizable */, tracer,
stopper, NewTxnMetrics(metric.NewRegistry()))
}
开发者ID:GitGoldie,项目名称:cockroach,代码行数:49,代码来源:local_test_cluster_util.go
示例12: AnnotateCtxWithSpan
// AnnotateCtxWithSpan annotates the given context with the information in
// AmbientContext (see AnnotateCtx) and opens a span.
//
// If the given context has a span, the new span is a child of that span.
// Otherwise, the Tracer in AmbientContext is used to create a new root span.
//
// The caller is responsible for closing the span (via Span.Finish).
func (ac *AmbientContext) AnnotateCtxWithSpan(
ctx context.Context, opName string,
) (context.Context, opentracing.Span) {
if ac.tags != nil {
ctx = copyTagChain(ctx, ac.tags)
}
var span opentracing.Span
if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil {
tracer := parentSpan.Tracer()
span = tracer.StartSpan(opName, opentracing.ChildOf(parentSpan.Context()))
} else {
if ac.Tracer == nil {
panic("no tracer in AmbientContext for root span")
}
span = ac.Tracer.StartSpan(opName)
}
return opentracing.ContextWithSpan(ctx, span), span
}
开发者ID:knz,项目名称:cockroach,代码行数:26,代码来源:ambient_context.go
示例13: reset
// reset creates a new Txn and initializes it using the session defaults.
func (ts *txnState) reset(ctx context.Context, e *Executor, s *Session) {
*ts = txnState{}
ts.txn = client.NewTxn(ctx, *e.ctx.DB)
ts.txn.Context = s.context
ts.txn.Proto.Isolation = s.DefaultIsolationLevel
ts.tr = s.Trace
// Discard the old schemaChangers, if any.
ts.schemaChangers = schemaChangerCollection{}
if traceSQL {
sp, err := tracing.JoinOrNewSnowball("coordinator", nil, func(sp basictracer.RawSpan) {
ts.txn.CollectedSpans = append(ts.txn.CollectedSpans, sp)
})
if err != nil {
log.Warningf(ctx, "unable to create snowball tracer: %s", err)
return
}
ts.txn.Context = opentracing.ContextWithSpan(ts.txn.Context, sp)
ts.sp = sp
}
}
开发者ID:yangxuanjia,项目名称:cockroach,代码行数:22,代码来源:session.go
示例14: TestEventLogAndTrace
func TestEventLogAndTrace(t *testing.T) {
ctx := context.Background()
// Events to context without a trace should be no-ops.
Event(ctx, "should-not-show-up")
el := &testingEventLog{}
ctxWithEventLog := withEventLogInternal(ctx, el)
Event(ctxWithEventLog, "test1")
ErrEvent(ctxWithEventLog, "testerr")
var traceEv events
tracer := testingTracer(&traceEv)
sp := tracer.StartSpan("s")
ctxWithBoth := opentracing.ContextWithSpan(ctxWithEventLog, sp)
// Events should only go to the trace.
Event(ctxWithBoth, "test3")
ErrEventf(ctxWithBoth, "%s", "test3err")
// Events to parent context should still go to the event log.
Event(ctxWithEventLog, "test5")
sp.Finish()
el.Finish()
trExpected := "[s:start s:test3 s:test3err s:finish]"
if evStr := fmt.Sprint(traceEv); evStr != trExpected {
t.Errorf("expected events '%s', got '%s'", trExpected, evStr)
}
elExpected := "[test1 testerr(err) test5 finish]"
if evStr := fmt.Sprint(el.ev); evStr != elExpected {
t.Errorf("expected events '%s', got '%s'", elExpected, evStr)
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:36,代码来源:trace_test.go
示例15: TestTraceWithTags
func TestTraceWithTags(t *testing.T) {
ctx := context.Background()
ctx = WithLogTagInt(ctx, "tag", 1)
var ev events
tracer := testingTracer(&ev)
sp := tracer.StartSpan("s")
ctxWithSpan := opentracing.ContextWithSpan(ctx, sp)
Event(ctxWithSpan, "test1")
ErrEvent(ctxWithSpan, "testerr")
VEvent(ctxWithSpan, logging.verbosity.get()+1, "test2")
Info(ctxWithSpan, "log")
sp.Finish()
expected := events{
"s:start", "s:[tag=1] test1", "s:[tag=1] testerr", "s:[tag=1] test2", "s:[tag=1] log",
"s:finish",
}
if !compareTraces(expected, ev) {
t.Errorf("expected events '%s', got '%v'", expected, ev)
}
}
开发者ID:BramGruneir,项目名称:cockroach,代码行数:24,代码来源:trace_test.go
示例16: Send
// Send implements the batch.Sender interface. If the request is part of a
// transaction, the TxnCoordSender adds the transaction to a map of active
// transactions and begins heartbeating it. Every subsequent request for the
// same transaction updates the lastUpdate timestamp to prevent live
// transactions from being considered abandoned and garbage collected.
// Read/write mutating requests have their key or key range added to the
// transaction's interval tree of key ranges for eventual cleanup via resolved
// write intents; they're tagged to an outgoing EndTransaction request, with
// the receiving replica in charge of resolving them.
func (tc *TxnCoordSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
// Start new or pick up active trace and embed its trace metadata into
// header for use by RPC recipients. From here on, there's always an active
// Trace, though its overhead is small unless it's sampled.
sp, cleanupSp := tracing.SpanFromContext(opTxnCoordSender, tc.tracer, ctx)
defer cleanupSp()
// TODO(tschottdorf): To get rid of the spurious alloc below we need to
// implement the carrier interface on ba.Header or make Span non-nullable,
// both of which force all of ba on the Heap. It's already there, so may
// not be a big deal, but ba should live on the stack. Also not easy to use
// a buffer pool here since anything that goes into the RPC layer could be
// used by goroutines we didn't wait for.
if ba.Header.Trace == nil {
ba.Header.Trace = &tracing.Span{}
}
if err := tc.tracer.Inject(sp, basictracer.Delegator, ba.Trace); err != nil {
return nil, roachpb.NewError(err)
}
if err := tc.maybeBeginTxn(&ba); err != nil {
return nil, roachpb.NewError(err)
}
var startNS int64
ba.SetNewRequest()
// This is the earliest point at which the request has an ID (if
// applicable). Begin a Trace which follows this request.
ctx = opentracing.ContextWithSpan(ctx, sp)
if ba.Txn != nil {
// If this request is part of a transaction...
txnID := *ba.Txn.ID
// Verify that if this Transaction is not read-only, we have it on
// file. If not, refuse writes - the client must have issued a write on
// another coordinator previously.
if ba.Txn.Writing && ba.IsTransactionWrite() {
tc.Lock()
_, ok := tc.txns[txnID]
tc.Unlock()
if !ok {
return nil, roachpb.NewErrorf("transaction must not write on multiple coordinators")
}
}
// Set the timestamp to the original timestamp for read-only
// commands and to the transaction timestamp for read/write
// commands.
if ba.IsReadOnly() {
ba.Timestamp = ba.Txn.OrigTimestamp
} else {
ba.Timestamp = ba.Txn.Timestamp
}
if rArgs, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := rArgs.(*roachpb.EndTransactionRequest)
if len(et.Key) != 0 {
return nil, roachpb.NewErrorf("EndTransaction must not have a Key set")
}
et.Key = ba.Txn.Key
// Remember when EndTransaction started in case we want to
// be linearizable.
startNS = tc.clock.PhysicalNow()
if len(et.IntentSpans) > 0 {
// TODO(tschottdorf): it may be useful to allow this later.
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
return nil, roachpb.NewErrorf("client must not pass intents to EndTransaction")
}
tc.Lock()
txnMeta, metaOK := tc.txns[txnID]
if metaOK {
et.IntentSpans = txnMeta.intentSpans()
}
tc.Unlock()
if intentSpans := ba.GetIntentSpans(); len(intentSpans) > 0 {
// Writes in Batch, so EndTransaction is fine. Should add
// outstanding intents to EndTransaction, though.
// TODO(tschottdorf): possible issues when the batch fails,
// but the intents have been added anyways.
// TODO(tschottdorf): some of these intents may be covered
// by others, for example {[a,b), a}). This can lead to
// some extra requests when those are non-local to the txn
// record. But it doesn't seem worth optimizing now.
et.IntentSpans = append(et.IntentSpans, intentSpans...)
} else if !metaOK {
// If we don't have the transaction, then this must be a retry
// by the client. We can no longer reconstruct a correct
// request so we must fail.
//
// TODO(bdarnell): if we had a GetTransactionStatus API then
//.........这里部分代码省略.........
开发者ID:liugangnhm,项目名称:cockroach,代码行数:101,代码来源:txn_coord_sender.go
示例17: resetForNewSQLTxn
// resetForNewSQLTxn (re)initializes the txnState for a new transaction.
// It creates a new client.Txn and initializes it using the session defaults.
// txnState.State will be set to Open.
func (ts *txnState) resetForNewSQLTxn(e *Executor, s *Session) {
if ts.sp != nil {
panic(fmt.Sprintf("txnState.reset() called on ts with active span. How come "+
"finishSQLTxn() wasn't called previously? ts: %+v", ts))
}
// Reset state vars to defaults.
ts.retrying = false
ts.retryIntent = false
ts.autoRetry = false
ts.commitSeen = false
// Discard previously collected spans. We start collecting anew on
// every fresh SQL txn.
ts.CollectedSpans = nil
// Create a context for this transaction. It will include a
// root span that will contain everything executed as part of the
// upcoming SQL txn, including (automatic or user-directed) retries.
// The span is closed by finishSQLTxn().
// TODO(andrei): figure out how to close these spans on server shutdown?
ctx := s.context
var sp opentracing.Span
if traceSQL {
var err error
sp, err = tracing.JoinOrNewSnowball("coordinator", nil, func(sp basictracer.RawSpan) {
ts.CollectedSpans = append(ts.CollectedSpans, sp)
})
if err != nil {
log.Warningf(ctx, "unable to create snowball tracer: %s", err)
return
}
} else if traceSQLFor7881 {
var err error
sp, _, err = tracing.NewTracerAndSpanFor7881(func(sp basictracer.RawSpan) {
ts.CollectedSpans = append(ts.CollectedSpans, sp)
})
if err != nil {
log.Fatalf(ctx, "couldn't create a tracer for debugging #7881: %s", err)
}
} else {
if parentSp := opentracing.SpanFromContext(ctx); parentSp != nil {
// Create a child span for this SQL txn.
tracer := parentSp.Tracer()
sp = tracer.StartSpan("sql txn", opentracing.ChildOf(parentSp.Context()))
} else {
// Create a root span for this SQL txn.
tracer := e.cfg.AmbientCtx.Tracer
sp = tracer.StartSpan("sql txn")
}
}
// Put the new span in the context.
ts.sp = sp
ctx = opentracing.ContextWithSpan(ctx, sp)
ts.Ctx = ctx
ts.mon.Start(ctx, &s.mon, mon.BoundAccount{})
ts.txn = client.NewTxn(ts.Ctx, *e.cfg.DB)
ts.txn.Proto.Isolation = s.DefaultIsolationLevel
ts.State = Open
// Discard the old schemaChangers, if any.
ts.schemaChangers = schemaChangerCollection{}
}
开发者ID:hvaara,项目名称:cockroach,代码行数:67,代码来源:session.go
示例18: Send
// Send implements the batch.Sender interface. If the request is part of a
// transaction, the TxnCoordSender adds the transaction to a map of active
// transactions and begins heartbeating it. Every subsequent request for the
// same transaction updates the lastUpdate timestamp to prevent live
// transactions from being considered abandoned and garbage collected.
// Read/write mutating requests have their key or key range added to the
// transaction's interval tree of key ranges for eventual cleanup via resolved
// write intents; they're tagged to an outgoing EndTransaction request, with
// the receiving replica in charge of resolving them.
func (tc *TxnCoordSender) Send(ctx context.Context, ba roachpb.BatchRequest) (*roachpb.BatchResponse, *roachpb.Error) {
{
// Start new or pick up active trace and embed its trace metadata into
// header for use by RPC recipients. From here on, there's always an active
// Trace, though its overhead is small unless it's sampled.
sp := opentracing.SpanFromContext(ctx)
if sp == nil {
sp = tc.tracer.StartSpan(opTxnCoordSender)
defer sp.Finish()
ctx = opentracing.ContextWithSpan(ctx, sp)
}
// TODO(tschottdorf): To get rid of the spurious alloc below we need to
// implement the carrier interface on ba.Header or make Span non-nullable,
// both of which force all of ba on the Heap. It's already there, so may
// not be a big deal, but ba should live on the stack. Also not easy to use
// a buffer pool here since anything that goes into the RPC layer could be
// used by goroutines we didn't wait for.
if ba.Header.Trace == nil {
ba.Header.Trace = &tracing.Span{}
}
if err := tc.tracer.Inject(sp, basictracer.Delegator, ba.Trace); err != nil {
return nil, roachpb.NewError(err)
}
}
startNS := tc.clock.PhysicalNow()
if ba.Txn != nil {
// If this request is part of a transaction...
if err := tc.maybeBeginTxn(&ba); err != nil {
return nil, roachpb.NewError(err)
}
txnID := *ba.Txn.ID
// Verify that if this Transaction is not read-only, we have it on file.
// If not, refuse further operations - the transaction was aborted due
// to a timeout or the client must have issued a write on another
// coordinator previously.
if ba.Txn.Writing {
tc.Lock()
_, ok := tc.txns[txnID]
tc.Unlock()
if !ok {
pErr := roachpb.NewErrorf("writing transaction timed out, was aborted, " +
"or ran on multiple coordinators")
return nil, pErr
}
}
if rArgs, ok := ba.GetArg(roachpb.EndTransaction); ok {
et := rArgs.(*roachpb.EndTransactionRequest)
if len(et.Key) != 0 {
return nil, roachpb.NewErrorf("EndTransaction must not have a Key set")
}
et.Key = ba.Txn.Key
if len(et.IntentSpans) > 0 {
// TODO(tschottdorf): it may be useful to allow this later.
// That would be part of a possible plan to allow txns which
// write on multiple coordinators.
return nil, roachpb.NewErrorf("client must not pass intents to EndTransaction")
}
tc.Lock()
txnMeta, metaOK := tc.txns[txnID]
{
// Populate et.IntentSpans, taking into account both existing
// writes (if any) and new writes in this batch, and taking
// care to perform proper deduplication.
var keys interval.RangeGroup
if metaOK {
keys = txnMeta.keys
} else {
keys = interval.NewRangeTree()
}
ba.IntentSpanIterate(func(key, endKey roachpb.Key) {
addKeyRange(keys, key, endKey)
})
et.IntentSpans = collectIntentSpans(keys)
}
tc.Unlock()
if len(et.IntentSpans) > 0 {
// All good, proceed.
} else if !metaOK {
// If we don't have the transaction, then this must be a retry
// by the client. We can no longer reconstruct a correct
// request so we must fail.
//
// TODO(bdarnell): if we had a GetTransactionStatus API then
// we could lookup the transaction and return either nil or
// TransactionAbortedError instead of this ambivalent error.
return nil, roachpb.NewErrorf("transaction is already committed or aborted")
}
//.........这里部分代码省略.........
开发者ID:petermattis,项目名称:cockroach,代码行数:101,代码来源:txn_coord_sender.go
示例19: Batch
// Batch implements the roachpb.KVServer interface.
func (n *Node) Batch(ctx context.Context, args *roachpb.BatchRequest) (*roachpb.BatchResponse, error) {
// TODO(marc): this code is duplicated in kv/db.go, which should be fixed.
// Also, grpc's authentication model (which gives credential access in the
// request handler) doesn't really fit with the current design of the
// security package (which assumes that TLS state is only given at connection
// time) - that should be fixed.
if peer, ok := peer.FromContext(ctx); ok {
if tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo); ok {
certUser, err := security.GetCertificateUser(&tlsInfo.State)
if err != nil {
return nil, err
}
if certUser != security.NodeUser {
return nil, util.Errorf("user %s is not allowed", certUser)
}
}
}
var br *roachpb.BatchResponse
opName := "node " + strconv.Itoa(int(n.Descriptor.NodeID)) // could save allocs here
fail := func(err error) {
br = &roachpb.BatchResponse{}
br.Error = roachpb.NewError(err)
}
f := func() {
sp, err := tracing.JoinOrNew(n.ctx.Tracer, args.Trace, opName)
if err != nil {
fail(err)
return
}
// If this is a snowball span, it gets special treatment: It skips the
// regular tracing machinery, and we instead send the collected spans
// back with the response. This is more expensive, but then again,
// those are individual requests traced by users, so they can be.
if sp.BaggageItem(tracing.Snowball) != "" {
sp.LogEvent("delegating to snowball tracing")
sp.Finish()
if sp, err = tracing.JoinOrNewSnowball(opName, args.Trace, func(rawSpan basictracer.RawSpan) {
encSp, err := tracing.EncodeRawSpan(&rawSpan, nil)
if err != nil {
log.Warning(err)
}
br.CollectedSpans = append(br.CollectedSpans, encSp)
}); err != nil {
fail(err)
return
}
}
defer sp.Finish()
traceCtx := opentracing.ContextWithSpan(n.context(ctx), sp)
tStart := timeutil.Now()
var pErr *roachpb.Error
br, pErr = n.stores.Send(traceCtx, *args)
if pErr != nil {
br = &roachpb.BatchResponse{}
log.Trace(traceCtx, fmt.Sprintf("error: %T", pErr.GetDetail()))
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(n.stores, br))
}
n.metrics.callComplete(timeutil.Since(tStart), pErr)
br.Error = pErr
}
if !n.stopper.RunTask(f) {
return nil, util.Errorf("node %d stopped", n.Descriptor.NodeID)
}
return br, nil
}
开发者ID:GitGoldie,项目名称:cockroach,代码行数:73,代码来源:node.go
示例20: Start
// Start starts the test cluster by bootstrapping an in-memory store
// (defaults to maximum of 50M). The server is started, launching the
// node RPC server and all HTTP endpoints. Use the value of
// TestServer.Addr after Start() for client connections. Use Stop()
// to shutdown the server after the test completes.
func (ltc *LocalTestCluster) Start(t util.Tester) {
nodeID := roachpb.NodeID(1)
nodeDesc := &roachpb.NodeDescriptor{NodeID: nodeID}
ltc.tester = t
ltc.Manual = hlc.NewManualClock(0)
ltc.Clock = hlc.NewClock(ltc.Manual.UnixNano)
ltc.Stopper = stop.NewStopper()
rpcContext := rpc.NewContext(testutils.NewNodeTestBaseContext(), ltc.Clock, ltc.Stopper)
ltc.Gossip = gossip.New(rpcContext, gossip.TestBootstrap, ltc.Stopper)
ltc.Eng = engine.NewInMem(roachpb.Attributes{}, 50<<20, ltc.Stopper)
ltc.stores = storage.NewStores(ltc.Clock)
tracer := tracing.NewTracer()
var rpcSend rpcSendFn = func(_ SendOptions, _ ReplicaSlice,
args roachpb.BatchRequest, _ *rpc.Context) (proto.Message, error) {
if ltc.Latency > 0 {
time.Sleep(ltc.Latency)
}
sp := tracer.StartSpan("node")
defer sp.Finish()
ctx := opentracing.ContextWithSpan(context.Background(), sp)
sp.LogEvent(args.String())
br, pErr := ltc.stores.Send(ctx, args)
if br == nil {
br = &roachpb.BatchResponse{}
}
if br.Error != nil {
panic(roachpb.ErrorUnexpectedlySet(ltc.stores, br))
}
br.Error = pErr
if pErr != nil {
sp.LogEvent("error: " + pErr.String())
}
return br, nil
}
retryOpts := GetDefaultDistSenderRetryOptions()
retryOpts.Closer = ltc.Stopper.ShouldDrain()
ltc.distSender = NewDistSender(&DistSenderContext{
Clock: ltc.Clock,
RangeDescriptorCacheSize: defaultRangeDescriptorCacheSize,
RangeLookupMaxRanges: defaultRangeLookupMaxRanges,
LeaderCacheSize: defaultLeaderCacheSize,
RPCRetryOptions: &retryOpts,
nodeDescriptor: nodeDesc,
RPCSend: rpcSend, // defined above
RangeDescriptorDB: ltc.stores, // for descriptor lookup
}, ltc.Gossip)
ltc.Sender = NewTxnCoordSender(ltc.distSender, ltc.Clock, false /* !linearizable */, tracer,
ltc.Stopper, NewTxnMetrics(metric.NewRegistry()))
ltc.DB = client.NewDB(ltc.Sender)
transport := storage.NewDummyRaftTransport()
ctx := storage.TestStoreContext()
ctx.Clock = ltc.Clock
ctx.DB = ltc.DB
ctx.Gossip = ltc.Gossip
ctx.Transport = transport
ctx.Tracer = tracer
ltc.Store = storage.NewStore(ctx, ltc.Eng, nodeDesc)
if err := ltc.Store.Bootstrap(roachpb.StoreIdent{NodeID: nodeID, StoreID: 1}, ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.stores.AddStore(ltc.Store)
if err := ltc.Store.BootstrapRange(nil); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
if err := ltc.Store.Start(ltc.Stopper); err != nil {
t.Fatalf("unable to start local test cluster: %s", err)
}
ltc.Gossip.SetNodeID(nodeDesc.NodeID)
if err := ltc.Gossip.SetNodeDescriptor(nodeDesc); err != nil {
t.Fatalf("unable to set node descriptor: %s", err)
}
}
开发者ID:liugangnhm,项目名称:cockroach,代码行数:79,代码来源:local_test_cluster.go
注:本文中的github.com/opentracing/opentracing-go.ContextWithSpan函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论