• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

C++ OwnedPointerVector类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了C++中OwnedPointerVector的典型用法代码示例。如果您正苦于以下问题:C++ OwnedPointerVector类的具体用法?C++ OwnedPointerVector怎么用?C++ OwnedPointerVector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了OwnedPointerVector类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。

示例1: GetPolygonBorder

bool BigSimplePolygon::Contains(const S2Polyline& line) const {
    //
    // A line is contained within a loop if the result of subtracting the loop from the line is
    // nothing.
    //
    // Also, a line is contained within a loop if the result of clipping the line to the
    // complement of the loop is nothing.
    //
    // If we can't subtract the loop itself using S2, we clip (intersect) to the inverse.  Every
    // point in S2 is contained in exactly one of these loops.
    //
    // TODO: Polygon borders are actually kind of weird, and this is somewhat inconsistent with
    // Intersects().  A point might Intersect() a boundary exactly, but not be Contain()ed
    // within the Polygon.  Think the right thing to do here is custom intersection functions.
    //
    const S2Polygon& polyBorder = GetPolygonBorder();

    OwnedPointerVector<S2Polyline> clippedOwned;
    vector<S2Polyline*>& clipped = clippedOwned.mutableVector();

    if (_isNormalized) {
        // Polygon border is the same as the loop
        polyBorder.SubtractFromPolyline(&line, &clipped);
        return clipped.size() == 0;
    } else {
        // Polygon border is the complement of the loop
        polyBorder.IntersectWithPolyline(&line, &clipped);
        return clipped.size() == 0;
    }
}
开发者ID:AlexOreshkevich,项目名称:mongo,代码行数:30,代码来源:big_polygon.cpp


示例2: clone

    PlanCacheEntry* PlanCacheEntry::clone() const {
        OwnedPointerVector<QuerySolution> solutions;
        for (size_t i = 0; i < plannerData.size(); ++i) {
            QuerySolution* qs = new QuerySolution();
            qs->cacheData.reset(plannerData[i]->clone());
            solutions.mutableVector().push_back(qs);
        }
        PlanCacheEntry* entry = new PlanCacheEntry(solutions.vector(), decision->clone());

        entry->backupSoln = backupSoln;

        // Copy query shape.
        entry->query = query.getOwned();
        entry->sort = sort.getOwned();
        entry->projection = projection.getOwned();

        // Copy performance stats.
        for (size_t i = 0; i < feedback.size(); ++i) {
            PlanCacheEntryFeedback* fb = new PlanCacheEntryFeedback();
            fb->stats.reset(feedback[i]->stats->clone());
            fb->score = feedback[i]->score;
            entry->feedback.push_back(fb);
        }
        entry->averageScore = averageScore;
        entry->stddevScore = stddevScore;
        return entry;
    }
开发者ID:pharrell84,项目名称:rtree-mongo-mf,代码行数:27,代码来源:plan_cache.cpp


示例3: parseShardKeyPattern

/**
 * Currently the allowable shard keys are either
 * i) a hashed single field, e.g. { a : "hashed" }, or
 * ii) a compound list of ascending, potentially-nested field paths, e.g. { a : 1 , b.c : 1 }
 */
static vector<FieldRef*> parseShardKeyPattern(const BSONObj& keyPattern) {
    OwnedPointerVector<FieldRef> parsedPaths;
    static const vector<FieldRef*> empty;

    BSONObjIterator patternIt(keyPattern);
    while (patternIt.more()) {
        BSONElement patternEl = patternIt.next();
        parsedPaths.push_back(new FieldRef(patternEl.fieldNameStringData()));
        const FieldRef& patternPath = *parsedPaths.back();

        // Empty path
        if (patternPath.numParts() == 0)
            return empty;

        // Extra "." in path?
        if (patternPath.dottedField() != patternEl.fieldNameStringData())
            return empty;

        // Empty parts of the path, ".."?
        for (size_t i = 0; i < patternPath.numParts(); ++i) {
            if (patternPath.getPart(i).size() == 0)
                return empty;
        }

        // Numeric and ascending (1.0), or "hashed" and single field
        if (!patternEl.isNumber()) {
            if (keyPattern.nFields() != 1 || !isHashedPatternEl(patternEl))
                return empty;
        } else if (patternEl.numberInt() != 1) {
            return empty;
        }
    }

    return parsedPaths.release();
}
开发者ID:DCEngines,项目名称:mongo,代码行数:40,代码来源:shard_key_pattern.cpp


示例4:

    vector<RecordIterator*> SimpleRecordStoreV1::getManyIterators( OperationContext* txn ) const {
        OwnedPointerVector<RecordIterator> iterators;
        const Extent* ext;
        for (DiskLoc extLoc = details()->firstExtent(txn); !extLoc.isNull(); extLoc = ext->xnext) {
            ext = _getExtent(txn, extLoc);
            if (ext->firstRecord.isNull())
                continue;
            iterators.push_back(
                new RecordStoreV1Base::IntraExtentIterator(txn, ext->firstRecord, this));
        }

        return iterators.release();
    }
开发者ID:BotterLiu,项目名称:mongo,代码行数:13,代码来源:record_store_v1_simple.cpp


示例5:

    vector<PlanStageStats*> MultiPlanStage::generateCandidateStats() {
        OwnedPointerVector<PlanStageStats> candidateStats;

        for (size_t ix = 0; ix < _candidates.size(); ix++) {
            if (ix == (size_t)_bestPlanIdx) { continue; }
            if (ix == (size_t)_backupPlanIdx) { continue; }

            PlanStageStats* stats = _candidates[ix].root->getStats();
            candidateStats.push_back(stats);
        }

        return candidateStats.release();
    }
开发者ID:Amosvista,项目名称:mongo,代码行数:13,代码来源:multi_plan.cpp


示例6: cloned

const S2Polygon& BigSimplePolygon::GetPolygonBorder() const {
    if (_borderPoly)
        return *_borderPoly;

    unique_ptr<S2Loop> cloned(_loop->Clone());

    // Any loop in polygon should be than a hemisphere (2*Pi).
    cloned->Normalize();

    OwnedPointerVector<S2Loop> loops;
    loops.mutableVector().push_back(cloned.release());
    _borderPoly.reset(new S2Polygon(&loops.mutableVector()));
    return *_borderPoly;
}
开发者ID:AlexOreshkevich,项目名称:mongo,代码行数:14,代码来源:big_polygon.cpp


示例7: dassert

IndexBounds ChunkManager::getIndexBoundsForQuery(const BSONObj& key,
                                                 const CanonicalQuery& canonicalQuery) {
    // $text is not allowed in planning since we don't have text index on mongos.
    //
    // TODO: Treat $text query as a no-op in planning. So with shard key {a: 1},
    //       the query { a: 2, $text: { ... } } will only target to {a: 2}.
    if (QueryPlannerCommon::hasNode(canonicalQuery.root(), MatchExpression::TEXT)) {
        IndexBounds bounds;
        IndexBoundsBuilder::allValuesBounds(key, &bounds);  // [minKey, maxKey]
        return bounds;
    }

    // Consider shard key as an index
    string accessMethod = IndexNames::findPluginName(key);
    dassert(accessMethod == IndexNames::BTREE || accessMethod == IndexNames::HASHED);

    // Use query framework to generate index bounds
    QueryPlannerParams plannerParams;
    // Must use "shard key" index
    plannerParams.options = QueryPlannerParams::NO_TABLE_SCAN;
    IndexEntry indexEntry(key,
                          accessMethod,
                          false /* multiKey */,
                          false /* sparse */,
                          false /* unique */,
                          "shardkey",
                          NULL /* filterExpr */,
                          BSONObj());
    plannerParams.indices.push_back(indexEntry);

    OwnedPointerVector<QuerySolution> solutions;
    Status status = QueryPlanner::plan(canonicalQuery, plannerParams, &solutions.mutableVector());
    uassert(status.code(), status.reason(), status.isOK());

    IndexBounds bounds;

    for (vector<QuerySolution*>::const_iterator it = solutions.begin();
         bounds.size() == 0 && it != solutions.end();
         it++) {
        // Try next solution if we failed to generate index bounds, i.e. bounds.size() == 0
        bounds = collapseQuerySolution((*it)->root.get());
    }

    if (bounds.size() == 0) {
        // We cannot plan the query without collection scan, so target to all shards.
        IndexBoundsBuilder::allValuesBounds(key, &bounds);  // [minKey, maxKey]
    }
    return bounds;
}
开发者ID:Andiry,项目名称:mongo,代码行数:49,代码来源:chunk_manager.cpp


示例8: dassert

void Strategy::writeOp(OperationContext* txn, int op, Request& request) {
    // make sure we have a last error
    dassert(&LastError::get(cc()));

    OwnedPointerVector<BatchedCommandRequest> commandRequestsOwned;
    vector<BatchedCommandRequest*>& commandRequests = commandRequestsOwned.mutableVector();

    msgToBatchRequests(request.m(), &commandRequests);

    for (vector<BatchedCommandRequest*>::iterator it = commandRequests.begin();
         it != commandRequests.end();
         ++it) {
        // Multiple commands registered to last error as multiple requests
        if (it != commandRequests.begin())
            LastError::get(cc()).startRequest();

        BatchedCommandRequest* commandRequest = *it;

        // Adjust namespaces for command
        NamespaceString fullNS(commandRequest->getNS());
        string cmdNS = fullNS.getCommandNS();
        // We only pass in collection name to command
        commandRequest->setNS(fullNS);

        BSONObjBuilder builder;
        BSONObj requestBSON = commandRequest->toBSON();

        {
            // Disable the last error object for the duration of the write cmd
            LastError::Disabled disableLastError(&LastError::get(cc()));
            Command::runAgainstRegistered(txn, cmdNS.c_str(), requestBSON, builder, 0);
        }

        BatchedCommandResponse commandResponse;
        bool parsed = commandResponse.parseBSON(builder.done(), NULL);
        (void)parsed;  // for compile
        dassert(parsed && commandResponse.isValid(NULL));

        // Populate the lastError object based on the write response
        LastError::get(cc()).reset();
        bool hadError =
            batchErrorToLastError(*commandRequest, commandResponse, &LastError::get(cc()));

        // Check if this is an ordered batch and we had an error which should stop processing
        if (commandRequest->getOrdered() && hadError)
            break;
    }
}
开发者ID:CeperaCPP,项目名称:mongo,代码行数:48,代码来源:strategy.cpp


示例9: buildMergeLogEntry

    BSONObj buildMergeLogEntry( const OwnedPointerVector<ChunkType>& chunksToMerge,
                                const ChunkVersion& currShardVersion,
                                const ChunkVersion& newMergedVersion ) {

        BSONObjBuilder logDetailB;

        BSONArrayBuilder mergedB( logDetailB.subarrayStart( "merged" ) );

        for ( OwnedPointerVector<ChunkType>::const_iterator it = chunksToMerge.begin();
                it != chunksToMerge.end(); ++it ) {
            ChunkType* chunkToMerge = *it;
            mergedB.append( chunkToMerge->toBSON() );
        }

        mergedB.done();

        currShardVersion.addToBSON( logDetailB, "prevShardVersion" );
        newMergedVersion.addToBSON( logDetailB, "mergedVersion" );

        return logDetailB.obj();
    }
开发者ID:leonidbl91,项目名称:mongo,代码行数:21,代码来源:d_merge.cpp


示例10: generateSection

    BSONObj generateSection(OperationContext* txn, const BSONElement& configElement) const {
        RangeDeleter* deleter = getDeleter();
        if (!deleter) {
            return BSONObj();
        }

        BSONObjBuilder result;

        OwnedPointerVector<DeleteJobStats> statsList;
        deleter->getStatsHistory(&statsList.mutableVector());
        BSONArrayBuilder oldStatsBuilder;
        for (OwnedPointerVector<DeleteJobStats>::const_iterator it = statsList.begin();
             it != statsList.end();
             ++it) {
            BSONObjBuilder entryBuilder;
            entryBuilder.append("deletedDocs", (*it)->deletedDocCount);

            if ((*it)->queueEndTS > Date_t()) {
                entryBuilder.append("queueStart", (*it)->queueStartTS);
                entryBuilder.append("queueEnd", (*it)->queueEndTS);
            }

            if ((*it)->deleteEndTS > Date_t()) {
                entryBuilder.append("deleteStart", (*it)->deleteStartTS);
                entryBuilder.append("deleteEnd", (*it)->deleteEndTS);

                if ((*it)->waitForReplEndTS > Date_t()) {
                    entryBuilder.append("waitForReplStart", (*it)->waitForReplStartTS);
                    entryBuilder.append("waitForReplEnd", (*it)->waitForReplEndTS);
                }
            }

            oldStatsBuilder.append(entryBuilder.obj());
        }
        result.append("lastDeleteStats", oldStatsBuilder.arr());

        return result.obj();
    }
开发者ID:stevelyall,项目名称:mongol-db,代码行数:38,代码来源:range_deleter_server_status.cpp


示例11: invariant

// static
Status ListFilters::list(const QuerySettings& querySettings, BSONObjBuilder* bob) {
    invariant(bob);

    // Format of BSON result:
    //
    // {
    //     hints: [
    //         {
    //             query: <query>,
    //             sort: <sort>,
    //             projection: <projection>,
    //             indexes: [<index1>, <index2>, <index3>, ...]
    //         }
    //  }
    BSONArrayBuilder hintsBuilder(bob->subarrayStart("filters"));
    OwnedPointerVector<AllowedIndexEntry> entries;
    entries.mutableVector() = querySettings.getAllAllowedIndices();
    for (vector<AllowedIndexEntry*>::const_iterator i = entries.begin();
            i != entries.end(); ++i) {
        AllowedIndexEntry* entry = *i;
        invariant(entry);

        BSONObjBuilder hintBob(hintsBuilder.subobjStart());
        hintBob.append("query", entry->query);
        hintBob.append("sort", entry->sort);
        hintBob.append("projection", entry->projection);
        BSONArrayBuilder indexesBuilder(hintBob.subarrayStart("indexes"));
        for (vector<BSONObj>::const_iterator j = entry->indexKeyPatterns.begin();
                j != entry->indexKeyPatterns.end(); ++j) {
            const BSONObj& index = *j;
            indexesBuilder.append(index);
        }
        indexesBuilder.doneFast();
    }
    hintsBuilder.doneFast();
    return Status::OK();
}
开发者ID:jewkesy,项目名称:mongo,代码行数:38,代码来源:index_filter_commands.cpp


示例12:

/*static*/ int MongoFile::_flushAll(bool sync) {
    if (!sync) {
        int num = 0;
        LockMongoFilesShared lk;
        for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
            num++;
            MongoFile* mmf = *i;
            if (!mmf)
                continue;

            mmf->flush(sync);
        }
        return num;
    }

    // want to do it sync

    // get a thread-safe Flushable object for each file first in a single lock
    // so that we can iterate and flush without doing any locking here
    OwnedPointerVector<Flushable> thingsToFlushWrapper;
    vector<Flushable*>& thingsToFlush = thingsToFlushWrapper.mutableVector();
    {
        LockMongoFilesShared lk;
        for (set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++) {
            MongoFile* mmf = *i;
            if (!mmf)
                continue;
            thingsToFlush.push_back(mmf->prepareFlush());
        }
    }

    for (size_t i = 0; i < thingsToFlush.size(); i++) {
        thingsToFlush[i]->flush();
    }

    return thingsToFlush.size();
}
开发者ID:Andiry,项目名称:mongo,代码行数:37,代码来源:mmap.cpp


示例13: buildApplyOpsCmd

    BSONObj buildApplyOpsCmd( const OwnedPointerVector<ChunkType>& chunksToMerge,
                              const ChunkVersion& currShardVersion,
                              const ChunkVersion& newMergedVersion ) {

        BSONObjBuilder applyOpsCmdB;
        BSONArrayBuilder updatesB( applyOpsCmdB.subarrayStart( "applyOps" ) );

        // The chunk we'll be "expanding" is the first chunk
        const ChunkType* chunkToMerge = *chunksToMerge.begin();

        // Fill in details not tracked by metadata
        ChunkType mergedChunk;
        chunkToMerge->cloneTo( &mergedChunk );
        mergedChunk.setName( Chunk::genID( chunkToMerge->getNS(), chunkToMerge->getMin() ) );
        mergedChunk.setMax( ( *chunksToMerge.vector().rbegin() )->getMax() );
        mergedChunk.setVersion( newMergedVersion );

        updatesB.append( buildOpMergeChunk( mergedChunk ) );

        // Don't remove chunk we're expanding
        OwnedPointerVector<ChunkType>::const_iterator it = chunksToMerge.begin();
        for ( ++it; it != chunksToMerge.end(); ++it ) {
            ChunkType* chunkToMerge = *it;
            chunkToMerge->setName( Chunk::genID( chunkToMerge->getNS(), chunkToMerge->getMin() ) );
            updatesB.append( buildOpRemoveChunk( *chunkToMerge ) );
        }

        updatesB.done();

        applyOpsCmdB.append( "preCondition",
                             buildOpPrecond( chunkToMerge->getNS(),
                                             chunkToMerge->getShard(),
                                             currShardVersion ) );

        return applyOpsCmdB.obj();
    }
开发者ID:leonidbl91,项目名称:mongo,代码行数:36,代码来源:d_merge.cpp


示例14: details

    StatusWith<CompactStats> Collection::compact( const CompactOptions* compactOptions ) {

        if ( isCapped() )
            return StatusWith<CompactStats>( ErrorCodes::BadValue,
                                             "cannot compact capped collection" );

        if ( _indexCatalog.numIndexesInProgress() )
            return StatusWith<CompactStats>( ErrorCodes::BadValue,
                                             "cannot compact when indexes in progress" );

        NamespaceDetails* d = details();

        // this is a big job, so might as well make things tidy before we start just to be nice.
        getDur().commitIfNeeded();

        list<DiskLoc> extents;
        for( DiskLoc L = d->firstExtent(); !L.isNull(); L = L.ext()->xnext )
            extents.push_back(L);
        log() << "compact " << extents.size() << " extents" << endl;

        // same data, but might perform a little different after compact?
        _infoCache.reset();

        vector<BSONObj> indexSpecs;
        {
            IndexCatalog::IndexIterator ii( _indexCatalog.getIndexIterator( false ) );
            while ( ii.more() ) {
                IndexDescriptor* descriptor = ii.next();
                indexSpecs.push_back( _compactAdjustIndexSpec( descriptor->infoObj() ) );
            }
        }

        log() << "compact orphan deleted lists" << endl;
        d->orphanDeletedList();

        // Start over from scratch with our extent sizing and growth
        d->setLastExtentSize( 0 );

        // before dropping indexes, at least make sure we can allocate one extent!
        if ( allocateSpaceForANewRecord( _ns.ns().c_str(),
                                         d,
                                         Record::HeaderSize+1,
                                         false).isNull() ) {
            return StatusWith<CompactStats>( ErrorCodes::InternalError,
                                             "compact error no space available to allocate" );
        }

        // note that the drop indexes call also invalidates all clientcursors for the namespace,
        // which is important and wanted here
        log() << "compact dropping indexes" << endl;
        Status status = _indexCatalog.dropAllIndexes( true );
        if ( !status.isOK() ) {
            return StatusWith<CompactStats>( status );
        }

        getDur().commitIfNeeded();

        CompactStats stats;

        OwnedPointerVector<IndexCatalog::IndexBuildBlock> indexBuildBlocks;
        vector<IndexAccessMethod*> indexesToInsertTo;
        vector< std::pair<IndexAccessMethod*,IndexAccessMethod*> > bulkToCommit;
        for ( size_t i = 0; i < indexSpecs.size(); i++ ) {
            killCurrentOp.checkForInterrupt(false);
            BSONObj info = indexSpecs[i];
            info = _compactAdjustIndexSpec( info );
            info = _indexCatalog.fixIndexSpec( info );
            auto_ptr<IndexCatalog::IndexBuildBlock> block( new IndexCatalog::IndexBuildBlock( this,info ) );
            Status status = block->init();
            if ( !status.isOK() )
                return StatusWith<CompactStats>(status);

            IndexAccessMethod* accessMethod = block->getEntry()->accessMethod();
            status = accessMethod->initializeAsEmpty();
            if ( !status.isOK() )
                return StatusWith<CompactStats>(status);

            IndexAccessMethod* bulk = accessMethod->initiateBulk();
            if ( bulk ) {
                indexesToInsertTo.push_back( bulk );
                bulkToCommit.push_back( std::pair<IndexAccessMethod*,IndexAccessMethod*>( accessMethod, bulk ) );
            }
            else {
                indexesToInsertTo.push_back( accessMethod );
            }

            indexBuildBlocks.mutableVector().push_back( block.release() );
        }

        // reset data size and record counts to 0 for this namespace
        // as we're about to tally them up again for each new extent
        d->setStats( 0, 0 );

        ProgressMeterHolder pm(cc().curop()->setMessage("compact extent",
                                                        "Extent Compacting Progress",
                                                        extents.size()));

        int extentNumber = 0;
        for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
            _compactExtent(*i, extentNumber++, indexesToInsertTo, compactOptions, &stats );
//.........这里部分代码省略.........
开发者ID:DanilSerd,项目名称:mongo,代码行数:101,代码来源:collection_compact.cpp


示例15: computeGeoNearDistance

    static StatusWith<double> computeGeoNearDistance(const GeoNearParams& nearParams,
                                                     WorkingSetMember* member) {

        //
        // Generic GeoNear distance computation
        // Distances are computed by projecting the stored geometry into the query CRS, and
        // computing distance in that CRS.
        //

        // Must have an object in order to get geometry out of it.
        invariant(member->hasObj());

        CRS queryCRS = nearParams.nearQuery.centroid.crs;

        // Extract all the geometries out of this document for the near query
        OwnedPointerVector<StoredGeometry> geometriesOwned;
        vector<StoredGeometry*>& geometries = geometriesOwned.mutableVector();
        extractGeometries(member->obj, nearParams.nearQuery.field, &geometries);

        // Compute the minimum distance of all the geometries in the document
        double minDistance = -1;
        BSONObj minDistanceObj;
        for (vector<StoredGeometry*>::iterator it = geometries.begin(); it != geometries.end();
            ++it) {

            StoredGeometry& stored = **it;

            // NOTE: A stored document with STRICT_SPHERE CRS is treated as a malformed document
            // and ignored. Since GeoNear requires an index, there's no stored STRICT_SPHERE shape.
            // So we don't check it here.

            // NOTE: For now, we're sure that if we get this far in the query we'll have an
            // appropriate index which validates the type of geometry we're pulling back here.
            // TODO: It may make sense to change our semantics and, by default, only return
            // shapes in the same CRS from $geoNear.
            if (!stored.geometry.supportsProject(queryCRS))
                continue;
            stored.geometry.projectInto(queryCRS);

            double nextDistance = stored.geometry.minDistance(nearParams.nearQuery.centroid);

            if (minDistance < 0 || nextDistance < minDistance) {
                minDistance = nextDistance;
                minDistanceObj = stored.element.Obj();
            }
        }

        if (minDistance < 0) {
            // No distance to report
            return StatusWith<double>(-1);
        }

        if (nearParams.addDistMeta) {
            if (nearParams.nearQuery.unitsAreRadians) {
                // Hack for nearSphere
                // TODO: Remove nearSphere?
                invariant(SPHERE == queryCRS);
                member->addComputed(new GeoDistanceComputedData(minDistance
                                                                / kRadiusOfEarthInMeters));
            }
            else {
                member->addComputed(new GeoDistanceComputedData(minDistance));
            }
        }

        if (nearParams.addPointMeta) {
            member->addComputed(new GeoNearPointComputedData(minDistanceObj));
        }

        return StatusWith<double>(minDistance);
    }
开发者ID:Mickael-van-der-Beek,项目名称:mongo,代码行数:71,代码来源:geo_near.cpp


示例16: targetBatch

Status BatchWriteOp::targetBatch(OperationContext* txn,
                                 const NSTargeter& targeter,
                                 bool recordTargetErrors,
                                 vector<TargetedWriteBatch*>* targetedBatches) {
    //
    // Targeting of unordered batches is fairly simple - each remaining write op is targeted,
    // and each of those targeted writes are grouped into a batch for a particular shard
    // endpoint.
    //
    // Targeting of ordered batches is a bit more complex - to respect the ordering of the
    // batch, we can only send:
    // A) a single targeted batch to one shard endpoint
    // B) multiple targeted batches, but only containing targeted writes for a single write op
    //
    // This means that any multi-shard write operation must be targeted and sent one-by-one.
    // Subsequent single-shard write operations can be batched together if they go to the same
    // place.
    //
    // Ex: ShardA : { skey : a->k }, ShardB : { skey : k->z }
    //
    // Ordered insert batch of: [{ skey : a }, { skey : b }, { skey : x }]
    // broken into:
    //  [{ skey : a }, { skey : b }],
    //  [{ skey : x }]
    //
    // Ordered update Batch of :
    //  [{ skey : a }{ $push },
    //   { skey : b }{ $push },
    //   { skey : [c, x] }{ $push },
    //   { skey : y }{ $push },
    //   { skey : z }{ $push }]
    // broken into:
    //  [{ skey : a }, { skey : b }],
    //  [{ skey : [c,x] }],
    //  [{ skey : y }, { skey : z }]
    //

    const bool ordered = _clientRequest->getOrdered();

    TargetedBatchMap batchMap;
    TargetedBatchSizeMap batchSizes;

    int numTargetErrors = 0;

    size_t numWriteOps = _clientRequest->sizeWriteOps();
    for (size_t i = 0; i < numWriteOps; ++i) {
        WriteOp& writeOp = _writeOps[i];

        // Only target _Ready ops
        if (writeOp.getWriteState() != WriteOpState_Ready)
            continue;

        //
        // Get TargetedWrites from the targeter for the write operation
        //

        // TargetedWrites need to be owned once returned
        OwnedPointerVector<TargetedWrite> writesOwned;
        vector<TargetedWrite*>& writes = writesOwned.mutableVector();

        Status targetStatus = writeOp.targetWrites(txn, targeter, &writes);

        if (!targetStatus.isOK()) {
            WriteErrorDetail targetError;
            buildTargetError(targetStatus, &targetError);

            if (!recordTargetErrors) {
                // Cancel current batch state with an error

                cancelBatches(targetError, _writeOps, &batchMap);
                dassert(batchMap.empty());
                return targetStatus;
            } else if (!ordered || batchMap.empty()) {
                // Record an error for this batch

                writeOp.setOpError(targetError);
                ++numTargetErrors;

                if (ordered)
                    return Status::OK();

                continue;
            } else {
                dassert(ordered && !batchMap.empty());

                // Send out what we have, but don't record an error yet, since there may be an
                // error in the writes before this point.

                writeOp.cancelWrites(&targetError);
                break;
            }
        }

        //
        // If ordered and we have a previous endpoint, make sure we don't need to send these
        // targeted writes to any other endpoints.
        //

        if (ordered && !batchMap.empty()) {
            dassert(batchMap.size() == 1u);
//.........这里部分代码省略.........
开发者ID:qihsh,项目名称:mongo,代码行数:101,代码来源:batch_write_op.cpp


示例17: run

        virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int options,
                          string& errmsg, BSONObjBuilder& result,
                          bool fromRepl = false ) {

            NamespaceString ns( dbname, cmdObj[name].String() );

            AutoGetCollectionForRead ctx(txn, ns.ns());

            Collection* collection = ctx.getCollection();
            if ( !collection )
                return appendCommandStatus( result,
                                            Status( ErrorCodes::NamespaceNotFound,
                                                    str::stream() <<
                                                    "ns does not exist: " << ns.ns() ) );

            size_t numCursors = static_cast<size_t>( cmdObj["numCursors"].numberInt() );

            if ( numCursors == 0 || numCursors > 10000 )
                return appendCommandStatus( result,
                                            Status( ErrorCodes::BadValue,
                                                    str::stream() <<
                                                    "numCursors has to be between 1 and 10000" <<
                                                    " was: " << numCursors ) );

            OwnedPointerVector<RecordIterator> iterators(collection->getManyIterators(txn));

            if (iterators.size() < numCursors) {
                numCursors = iterators.size();
            }

            OwnedPointerVector<PlanExecutor> execs;
            for ( size_t i = 0; i < numCursors; i++ ) {
                WorkingSet* ws = new WorkingSet();
                MultiIteratorStage* mis = new MultiIteratorStage(txn, ws, collection);

                PlanExecutor* rawExec;
                // Takes ownership of 'ws' and 'mis'.
                Status execStatus = PlanExecutor::make(txn, ws, mis, collection,
                                                       PlanExecutor::YIELD_AUTO, &rawExec);
                invariant(execStatus.isOK());
                auto_ptr<PlanExecutor> curExec(rawExec);

                // The PlanExecutor was registered on construction due to the YIELD_AUTO policy.
                // We have to deregister it, as it will be registered with ClientCursor.
                curExec->deregisterExec();

                // Need to save state while yielding locks between now and getMore().
                curExec->saveState();

                execs.push_back(curExec.release());
            }

            // transfer iterators to executors using a round-robin distribution.
            // TODO consider using a common work queue once invalidation issues go away.
            for (size_t i = 0; i < iterators.size(); i++) {
                PlanExecutor* theExec = execs[i % execs.size()];
                MultiIteratorStage* mis = static_cast<MultiIteratorStage*>(theExec->getRootStage());

                // This wasn't called above as they weren't assigned yet
                iterators[i]->saveState();

                mis->addIterator(iterators.releaseAt(i));
            }

            {
                BSONArrayBuilder bucketsBuilder;
                for (size_t i = 0; i < execs.size(); i++) {
                    // transfer ownership of an executor to the ClientCursor (which manages its own
                    // lifetime).
                    ClientCursor* cc = new ClientCursor( collection->getCursorManager(),
                                                         execs.releaseAt(i),
                                                         ns.ns() );

                    BSONObjBuilder threadResult;
                    appendCursorResponseObject( cc->cursorid(),
                                                ns.ns(),
                                                BSONArray(),
                                                &threadResult );
                    threadResult.appendBool( "ok", 1 );

                    bucketsBuilder.append( threadResult.obj() );
                }
                result.appendArray( "cursors", bucketsBuilder.obj() );
            }

            return true;

        }
开发者ID:ForNowForever,项目名称:mongo,代码行数:88,代码来源:parallel_collection_scan.cpp


示例18: run

        virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int options,
                          string& errmsg, BSONObjBuilder& result,
                          bool fromRepl = false ) {

            NamespaceString ns( dbname, cmdObj[name].String() );

            AutoGetCollectionForRead ctx(txn, ns.ns());

            Collection* collection = ctx.getCollection();
            if ( !collection )
                return appendCommandStatus( result,
                                            Status( ErrorCodes::NamespaceNotFound,
                                                    str::stream() <<
                                                    "ns does not exist: " << ns.ns() ) );

            size_t numCursors = static_cast<size_t>( cmdObj["numCursors"].numberInt() );

            if ( numCursors == 0 || numCursors > 10000 )
                return appendCommandStatus( result,
                                            Status( ErrorCodes::BadValue,
                                                    str::stream() <<
                                                    "numCursors has to be between 1 and 10000" <<
                                                    " was: " << numCursors ) );

            OwnedPointerVector<RecordIterator> iterators(collection->getManyIterators(txn));

            if (iterators.size() < numCursors) {
                numCursors = iterators.size();
            }

            OwnedPointerVector<PlanExecutor> execs;
            for ( size_t i = 0; i < numCursors; i++ ) {
                WorkingSet* ws = new WorkingSet();
                MultiIteratorStage* mis = new MultiIteratorStage(txn, ws, collection);
                // Takes ownership of 'ws' and 'mis'.
                auto_ptr<PlanExecutor> curExec(new PlanExecutor(txn, ws, mis, collection));

                // Each of the plan executors should yield automatically. We pass "false" to
                // indicate that 'curExec' should not register itself, as it will get registered
                // by ClientCursor instead.
                curExec->setYieldPolicy(PlanExecutor::YIELD_AUTO, false);

                // Need to save state while yielding locks between now and newGetMore.
                curExec->saveState();

                execs.push_back(curExec.release());
            }

            // transfer iterators to executors using a round-robin distribution.
            // TODO consider using a common work queue once invalidation issues go away.
            for (size_t i = 0; i < iterators.size(); i++) {
                PlanExecutor* theExec = execs[i % execs.size()];
                MultiIteratorStage* mis = static_cast<MultiIteratorStage*>(theExec->getRootStage());
                mis->addIterator(iterators.releaseAt(i));
            }

            {
                BSONArrayBuilder bucketsBuilder;
                for (size_t i = 0; i < execs.size(); i++) {
                    // transfer ownership of an executor to the ClientCursor (which manages its own
                    // lifetime).
                    ClientCursor* cc = new ClientCursor( collection, execs.releaseAt(i) );

                    // we are mimicking the aggregation cursor output here
                    // that is why there are ns, ok and empty firstBatch
                    BSONObjBuilder threadResult;
                    {
                        BSONObjBuilder cursor;
                        cursor.appendArray( "firstBatch", BSONObj() );
                        cursor.append( "ns", ns );
                        cursor.append( "id", cc->cursorid() );
                        threadResult.append( "cursor", cursor.obj() );
                    }
                    threadResult.appendBool( "ok", 1 );

                    bucketsBuilder.append( threadResult.obj() );
                }
                result.appendArray( "cursors", bucketsBuilder.obj() );
            }

            return true;

        }
开发者ID:Aaron20141021,项目名称:mongo,代码行数:83,代码来源:parallel_collection_scan.cpp


示例19: getMultiPlanStage

    // static
    void Explain::explainStages(PlanExecutor* exec,
                                ExplainCommon::Verbosity verbosity,
                                BSONObjBuilder* out) {
        //
        // Step 1: run the stages as required by the verbosity level.
        //

        // Inspect the tree to see if there is a MultiPlanStage.
        MultiPlanStage* mps = getMultiPlanStage(exec->getRootStage());

        // Get stats of the winning plan from the trial period, if the verbosity level
        // is high enough and there was a runoff between multiple plans.
        auto_ptr<PlanStageStats> winningStatsTrial;
        if (verbosity >= ExplainCommon::EXEC_ALL_PLANS && NULL != mps) {
            winningStatsTrial.reset(exec->getStats());
            invariant(winningStatsTrial.get());
        }

        // If we need execution stats, then run the plan in order to gather the stats.
        Status executePlanStatus = Status::OK();
        if (verbosity >= ExplainCommon::EXEC_STATS) {
            executePlanStatus = exec->executePlan();
        }

        //
        // Step 2: collect plan stats (which also give the structure of the plan tree).
        //

        // Get stats for the winning plan.
        scoped_ptr<PlanStageStats> winningStats(exec->getStats());

        // Get stats for the rejected plans, if more than one plan was considered.
        OwnedPointerVector<PlanStageStats> allPlansStats;
        if (NULL != mps) {
            allPlansStats = mps->generateCandidateStats();
        }

        //
        // Step 3: use the stats trees to produce explain BSON.
        //

        CanonicalQuery* query = exec->getCanonicalQuery();
        if (verbosity >= ExplainCommon::QUERY_PLANNER) {
       

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
C++ OwningBinary类代码示例发布时间:2022-05-31
下一篇:
C++ OwnedIFile类代码示例发布时间:2022-05-31
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap