本文整理汇总了C++中END_CRIT_SECTION函数的典型用法代码示例。如果您正苦于以下问题:C++ END_CRIT_SECTION函数的具体用法?C++ END_CRIT_SECTION怎么用?C++ END_CRIT_SECTION使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了END_CRIT_SECTION函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: _bt_delitems
/*
* Delete item(s) from a btree page.
*
* This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
* This routine assumes that the caller has pinned and locked the buffer.
* Also, the given itemnos *must* appear in increasing order in the array.
*/
void
_bt_delitems(Relation rel, Buffer buf,
OffsetNumber *itemnos, int nitems,
bool inVacuum)
{
Page page;
BTPageOpaque opaque;
MIRROREDLOCK_BUFMGR_MUST_ALREADY_BE_HELD;
page = BufferGetPage(buf);
// Fetch gp_persistent_relation_node information that will be added to XLOG record.
RelationFetchGpRelationNodeForXLog(rel);
/* No ereport(ERROR) until changes are logged */
START_CRIT_SECTION();
/* Fix the page */
PageIndexMultiDelete(page, itemnos, nitems);
/*
* If this is within VACUUM, we can clear the vacuum cycleID since this
* page has certainly been processed by the current vacuum scan.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (inVacuum)
opaque->btpo_cycleid = 0;
/*
* Mark the page as not containing any LP_DELETE items. This is not
* certainly true (there might be some that have recently been marked, but
* weren't included in our target-item list), but it will almost always be
* true and it doesn't seem worth an additional page scan to check it.
* Remember that BTP_HAS_GARBAGE is only a hint anyway.
*/
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
MarkBufferDirty(buf);
/* XLOG stuff */
if (!rel->rd_istemp)
{
xl_btree_delete xlrec;
XLogRecPtr recptr;
XLogRecData rdata[2];
xl_btreenode_set(&(xlrec.btreenode), rel);
xlrec.block = BufferGetBlockNumber(buf);
rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeDelete;
rdata[0].buffer = InvalidBuffer;
rdata[0].next = &(rdata[1]);
/*
* The target-offsets array is not in the buffer, but pretend that it
* is. When XLogInsert stores the whole buffer, the offsets array
* need not be stored too.
*/
if (nitems > 0)
{
rdata[1].data = (char *) itemnos;
rdata[1].len = nitems * sizeof(OffsetNumber);
}
else
{
rdata[1].data = NULL;
rdata[1].len = 0;
}
rdata[1].buffer = buf;
rdata[1].buffer_std = true;
rdata[1].next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE, rdata);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
END_CRIT_SECTION();
}
开发者ID:ricky-wu,项目名称:gpdb,代码行数:92,代码来源:nbtpage.c
示例2: SaveSlotToPath
//.........这里部分代码省略.........
LWLockAcquire(slot->io_in_progress_lock, LW_EXCLUSIVE);
/* silence valgrind :( */
memset(&cp, 0, sizeof(ReplicationSlotOnDisk));
sprintf(tmppath, "%s/state.tmp", dir);
sprintf(path, "%s/state", dir);
fd = OpenTransientFile(tmppath,
O_CREAT | O_EXCL | O_WRONLY | PG_BINARY,
S_IRUSR | S_IWUSR);
if (fd < 0)
{
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not create file \"%s\": %m",
tmppath)));
return;
}
cp.magic = SLOT_MAGIC;
INIT_CRC32C(cp.checksum);
cp.version = SLOT_VERSION;
cp.length = ReplicationSlotOnDiskV2Size;
SpinLockAcquire(&slot->mutex);
memcpy(&cp.slotdata, &slot->data, sizeof(ReplicationSlotPersistentData));
SpinLockRelease(&slot->mutex);
COMP_CRC32C(cp.checksum,
(char *) (&cp) + SnapBuildOnDiskNotChecksummedSize,
SnapBuildOnDiskChecksummedSize);
FIN_CRC32C(cp.checksum);
if ((write(fd, &cp, sizeof(cp))) != sizeof(cp))
{
int save_errno = errno;
CloseTransientFile(fd);
errno = save_errno;
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not write to file \"%s\": %m",
tmppath)));
return;
}
/* fsync the temporary file */
if (pg_fsync(fd) != 0)
{
int save_errno = errno;
CloseTransientFile(fd);
errno = save_errno;
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not fsync file \"%s\": %m",
tmppath)));
return;
}
CloseTransientFile(fd);
/* rename to permanent file, fsync file and directory */
if (rename(tmppath, path) != 0)
{
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not rename file \"%s\" to \"%s\": %m",
tmppath, path)));
return;
}
/* Check CreateSlot() for the reasoning of using a crit. section. */
START_CRIT_SECTION();
fsync_fname(path, false);
fsync_fname((char *) dir, true);
fsync_fname("pg_replslot", true);
END_CRIT_SECTION();
/*
* Successfully wrote, unset dirty bit, unless somebody dirtied again
* already.
*/
{
volatile ReplicationSlot *vslot = slot;
SpinLockAcquire(&vslot->mutex);
if (!vslot->just_dirtied)
vslot->dirty = false;
SpinLockRelease(&vslot->mutex);
}
LWLockRelease(slot->io_in_progress_lock);
}
开发者ID:EccentricLoggers,项目名称:peloton,代码行数:101,代码来源:slot.cpp
示例3: SlruPhysicalWritePage
/*
* Physical write of a page from a buffer slot
*
* On failure, we cannot just ereport(ERROR) since caller has put state in
* shared memory that must be undone. So, we return FALSE and save enough
* info in static variables to let SlruReportIOError make the report.
*
* For now, assume it's not worth keeping a file pointer open across
* independent read/write operations. We do batch operations during
* SimpleLruFlush, though.
*
* fdata is NULL for a standalone write, pointer to open-file info during
* SimpleLruFlush.
*/
static bool
SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata)
{
SlruShared shared = ctl->shared;
int segno = pageno / SLRU_PAGES_PER_SEGMENT;
int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
int offset = rpageno * BLCKSZ;
char path[MAXPGPATH];
int fd = -1;
/*
* Honor the write-WAL-before-data rule, if appropriate, so that we do not
* write out data before associated WAL records. This is the same action
* performed during FlushBuffer() in the main buffer manager.
*/
if (shared->group_lsn != NULL)
{
/*
* We must determine the largest async-commit LSN for the page. This
* is a bit tedious, but since this entire function is a slow path
* anyway, it seems better to do this here than to maintain a per-page
* LSN variable (which'd need an extra comparison in the
* transaction-commit path).
*/
XLogRecPtr max_lsn;
int lsnindex,
lsnoff;
lsnindex = slotno * shared->lsn_groups_per_page;
max_lsn = shared->group_lsn[lsnindex++];
for (lsnoff = 1; lsnoff < shared->lsn_groups_per_page; lsnoff++)
{
XLogRecPtr this_lsn = shared->group_lsn[lsnindex++];
if (max_lsn < this_lsn)
max_lsn = this_lsn;
}
if (!XLogRecPtrIsInvalid(max_lsn))
{
/*
* As noted above, elog(ERROR) is not acceptable here, so if
* XLogFlush were to fail, we must PANIC. This isn't much of a
* restriction because XLogFlush is just about all critical
* section anyway, but let's make sure.
*/
START_CRIT_SECTION();
XLogFlush(max_lsn);
END_CRIT_SECTION();
}
}
/*
* During a Flush, we may already have the desired file open.
*/
if (fdata)
{
int i;
for (i = 0; i < fdata->num_files; i++)
{
if (fdata->segno[i] == segno)
{
fd = fdata->fd[i];
break;
}
}
}
if (fd < 0)
{
/*
* If the file doesn't already exist, we should create it. It is
* possible for this to need to happen when writing a page that's not
* first in its segment; we assume the OS can cope with that. (Note:
* it might seem that it'd be okay to create files only when
* SimpleLruZeroPage is called for the first page of a segment.
* However, if after a crash and restart the REDO logic elects to
* replay the log from a checkpoint before the latest one, then it's
* possible that we will get commands to set transaction status of
* transactions that have already been truncated from the commit log.
* Easiest way to deal with that is to accept references to
* nonexistent files here and in SlruPhysicalReadPage.)
*
* Note: it is possible for more than one backend to be executing this
* code simultaneously for different pages of the same file. Hence,
//.........这里部分代码省略.........
开发者ID:5A68656E67,项目名称:postgres,代码行数:101,代码来源:slru.c
示例4: writeListPage
/*
* Build a pending-list page from the given array of tuples, and write it out.
*
* Returns amount of free space left on the page.
*/
static int32
writeListPage(Relation index, Buffer buffer,
IndexTuple *tuples, int32 ntuples, BlockNumber rightlink)
{
Page page = BufferGetPage(buffer);
int32 i,
freesize,
size = 0;
OffsetNumber l,
off;
char *workspace;
char *ptr;
/* workspace could be a local array; we use palloc for alignment */
workspace = palloc(BLCKSZ);
START_CRIT_SECTION();
GinInitBuffer(buffer, GIN_LIST);
off = FirstOffsetNumber;
ptr = workspace;
for (i = 0; i < ntuples; i++)
{
int this_size = IndexTupleSize(tuples[i]);
memcpy(ptr, tuples[i], this_size);
ptr += this_size;
size += this_size;
l = PageAddItem(page, (Item) tuples[i], this_size, off, false, false);
if (l == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in \"%s\"",
RelationGetRelationName(index));
off++;
}
Assert(size <= BLCKSZ); /* else we overran workspace */
GinPageGetOpaque(page)->rightlink = rightlink;
/*
* tail page may contain only whole row(s) or final part of row placed on
* previous pages (a "row" here meaning all the index tuples generated for
* one heap tuple)
*/
if (rightlink == InvalidBlockNumber)
{
GinPageSetFullRow(page);
GinPageGetOpaque(page)->maxoff = 1;
}
else
{
GinPageGetOpaque(page)->maxoff = 0;
}
MarkBufferDirty(buffer);
if (RelationNeedsWAL(index))
{
ginxlogInsertListPage data;
XLogRecPtr recptr;
data.rightlink = rightlink;
data.ntuples = ntuples;
XLogBeginInsert();
XLogRegisterData((char *) &data, sizeof(ginxlogInsertListPage));
XLogRegisterBuffer(0, buffer, REGBUF_WILL_INIT);
XLogRegisterBufData(0, workspace, size);
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_INSERT_LISTPAGE);
PageSetLSN(page, recptr);
}
/* get free space before releasing buffer */
freesize = PageGetExactFreeSpace(page);
UnlockReleaseBuffer(buffer);
END_CRIT_SECTION();
pfree(workspace);
return freesize;
}
开发者ID:dividedmind,项目名称:postgres,代码行数:95,代码来源:ginfast.c
示例5: RestoreSlotFromDisk
/*
* Load a single slot from disk into memory.
*/
static void
RestoreSlotFromDisk(const char *name)
{
ReplicationSlotOnDisk cp;
int i;
char path[MAXPGPATH];
int fd;
bool restored = false;
int readBytes;
pg_crc32c checksum;
/* no need to lock here, no concurrent access allowed yet */
/* delete temp file if it exists */
sprintf(path, "pg_replslot/%s/state.tmp", name);
if (unlink(path) < 0 && errno != ENOENT)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not remove file \"%s\": %m", path)));
sprintf(path, "pg_replslot/%s/state", name);
elog(DEBUG1, "restoring replication slot from \"%s\"", path);
fd = OpenTransientFile(path, O_RDWR | PG_BINARY, 0);
/*
* We do not need to handle this as we are rename()ing the directory into
* place only after we fsync()ed the state file.
*/
if (fd < 0)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not open file \"%s\": %m", path)));
/*
* Sync state file before we're reading from it. We might have crashed
* while it wasn't synced yet and we shouldn't continue on that basis.
*/
if (pg_fsync(fd) != 0)
{
CloseTransientFile(fd);
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not fsync file \"%s\": %m",
path)));
}
/* Also sync the parent directory */
START_CRIT_SECTION();
fsync_fname(path, true);
END_CRIT_SECTION();
/* read part of statefile that's guaranteed to be version independent */
readBytes = read(fd, &cp, ReplicationSlotOnDiskConstantSize);
if (readBytes != ReplicationSlotOnDiskConstantSize)
{
int saved_errno = errno;
CloseTransientFile(fd);
errno = saved_errno;
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not read file \"%s\", read %d of %u: %m",
path, readBytes,
(uint32) ReplicationSlotOnDiskConstantSize)));
}
/* verify magic */
if (cp.magic != SLOT_MAGIC)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("replication slot file \"%s\" has wrong magic %u instead of %u",
path, cp.magic, SLOT_MAGIC)));
/* verify version */
if (cp.version != SLOT_VERSION)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("replication slot file \"%s\" has unsupported version %u",
path, cp.version)));
/* boundary check on length */
if (cp.length != ReplicationSlotOnDiskV2Size)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("replication slot file \"%s\" has corrupted length %u",
path, cp.length)));
/* Now that we know the size, read the entire file */
readBytes = read(fd,
(char *) &cp + ReplicationSlotOnDiskConstantSize,
cp.length);
if (readBytes != cp.length)
{
int saved_errno = errno;
//.........这里部分代码省略.........
开发者ID:EccentricLoggers,项目名称:peloton,代码行数:101,代码来源:slot.cpp
示例6: ginbuild
IndexBuildResult *
ginbuild(Relation heap, Relation index, IndexInfo *indexInfo)
{
IndexBuildResult *result;
double reltuples;
GinBuildState buildstate;
Buffer RootBuffer,
MetaBuffer;
ItemPointerData *list;
Datum key;
GinNullCategory category;
uint32 nlist;
MemoryContext oldCtx;
OffsetNumber attnum;
if (RelationGetNumberOfBlocks(index) != 0)
elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
initGinState(&buildstate.ginstate, index);
buildstate.indtuples = 0;
memset(&buildstate.buildStats, 0, sizeof(GinStatsData));
/* initialize the meta page */
MetaBuffer = GinNewBuffer(index);
/* initialize the root page */
RootBuffer = GinNewBuffer(index);
START_CRIT_SECTION();
GinInitMetabuffer(MetaBuffer);
MarkBufferDirty(MetaBuffer);
GinInitBuffer(RootBuffer, GIN_LEAF);
MarkBufferDirty(RootBuffer);
if (RelationNeedsWAL(index))
{
XLogRecPtr recptr;
Page page;
XLogBeginInsert();
XLogRegisterBuffer(0, MetaBuffer, REGBUF_WILL_INIT | REGBUF_STANDARD);
XLogRegisterBuffer(1, RootBuffer, REGBUF_WILL_INIT);
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_CREATE_INDEX);
page = BufferGetPage(RootBuffer);
PageSetLSN(page, recptr);
page = BufferGetPage(MetaBuffer);
PageSetLSN(page, recptr);
}
UnlockReleaseBuffer(MetaBuffer);
UnlockReleaseBuffer(RootBuffer);
END_CRIT_SECTION();
/* count the root as first entry page */
buildstate.buildStats.nEntryPages++;
/*
* create a temporary memory context that is used to hold data not yet
* dumped out to the index
*/
buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
"Gin build temporary context",
ALLOCSET_DEFAULT_SIZES);
/*
* create a temporary memory context that is used for calling
* ginExtractEntries(), and can be reset after each tuple
*/
buildstate.funcCtx = AllocSetContextCreate(CurrentMemoryContext,
"Gin build temporary context for user-defined function",
ALLOCSET_DEFAULT_SIZES);
buildstate.accum.ginstate = &buildstate.ginstate;
ginInitBA(&buildstate.accum);
/*
* Do the heap scan. We disallow sync scan here because dataPlaceToPage
* prefers to receive tuples in TID order.
*/
reltuples = IndexBuildHeapScan(heap, index, indexInfo, false,
ginBuildCallback, (void *) &buildstate);
/* dump remaining entries to the index */
oldCtx = MemoryContextSwitchTo(buildstate.tmpCtx);
ginBeginBAScan(&buildstate.accum);
while ((list = ginGetBAEntry(&buildstate.accum,
&attnum, &key, &category, &nlist)) != NULL)
{
/* there could be many entries, so be willing to abort here */
CHECK_FOR_INTERRUPTS();
ginEntryInsert(&buildstate.ginstate, attnum, key, category,
list, nlist, &buildstate.buildStats);
}
MemoryContextSwitchTo(oldCtx);
MemoryContextDelete(buildstate.funcCtx);
//.........这里部分代码省略.........
开发者ID:paullmc,项目名称:postgres,代码行数:101,代码来源:gininsert.c
示例7: ginHeapTupleFastInsert
//.........这里部分代码省略.........
char *ptr;
char *collectordata;
buffer = ReadBuffer(index, metadata->tail);
LockBuffer(buffer, GIN_EXCLUSIVE);
page = BufferGetPage(buffer);
off = (PageIsEmpty(page)) ? FirstOffsetNumber :
OffsetNumberNext(PageGetMaxOffsetNumber(page));
collectordata = ptr = (char *) palloc(collector->sumsize);
data.ntuples = collector->ntuples;
if (needWal)
XLogBeginInsert();
START_CRIT_SECTION();
/*
* Increase counter of heap tuples
*/
Assert(GinPageGetOpaque(page)->maxoff <= metadata->nPendingHeapTuples);
GinPageGetOpaque(page)->maxoff++;
metadata->nPendingHeapTuples++;
for (i = 0; i < collector->ntuples; i++)
{
tupsize = IndexTupleSize(collector->tuples[i]);
l = PageAddItem(page, (Item) collector->tuples[i], tupsize, off, false, false);
if (l == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in \"%s\"",
RelationGetRelationName(index));
memcpy(ptr, collector->tuples[i], tupsize);
ptr += tupsize;
off++;
}
Assert((ptr - collectordata) <= collector->sumsize);
if (needWal)
{
XLogRegisterBuffer(1, buffer, REGBUF_STANDARD);
XLogRegisterBufData(1, collectordata, collector->sumsize);
}
metadata->tailFreeSize = PageGetExactFreeSpace(page);
MarkBufferDirty(buffer);
}
/*
* Write metabuffer, make xlog entry
*/
MarkBufferDirty(metabuffer);
if (needWal)
{
XLogRecPtr recptr;
memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
XLogRegisterBuffer(0, metabuffer, REGBUF_WILL_INIT);
XLogRegisterData((char *) &data, sizeof(ginxlogUpdateMeta));
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_UPDATE_META_PAGE);
PageSetLSN(metapage, recptr);
if (buffer != InvalidBuffer)
{
PageSetLSN(page, recptr);
}
}
if (buffer != InvalidBuffer)
UnlockReleaseBuffer(buffer);
/*
* Force pending list cleanup when it becomes too long. And,
* ginInsertCleanup could take significant amount of time, so we prefer to
* call it when it can do all the work in a single collection cycle. In
* non-vacuum mode, it shouldn't require maintenance_work_mem, so fire it
* while pending list is still small enough to fit into
* gin_pending_list_limit.
*
* ginInsertCleanup() should not be called inside our CRIT_SECTION.
*/
cleanupSize = GinGetPendingListCleanupSize(index);
if (metadata->nPendingPages * GIN_PAGE_FREESIZE > cleanupSize * 1024L)
needCleanup = true;
UnlockReleaseBuffer(metabuffer);
END_CRIT_SECTION();
if (needCleanup)
ginInsertCleanup(ginstate, true, NULL);
}
开发者ID:dividedmind,项目名称:postgres,代码行数:101,代码来源:ginfast.c
示例8: _bt_pagedel
//.........这里部分代码省略.........
nextrdata->data = NULL;
nextrdata->len = 0;
nextrdata->next = nextrdata + 1;
nextrdata->buffer = pbuf;
nextrdata->buffer_std = true;
nextrdata++;
nextrdata->data = NULL;
nextrdata->len = 0;
nextrdata->buffer = rbuf;
nextrdata->buffer_std = true;
nextrdata->next = NULL;
if (BufferIsValid(lbuf))
{
nextrdata->next = nextrdata + 1;
nextrdata++;
nextrdata->data = NULL;
nextrdata->len = 0;
nextrdata->buffer = lbuf;
nextrdata->buffer_std = true;
nextrdata->next = NULL;
}
recptr = XLogInsert(RM_BTREE_ID, xlinfo, rdata);
if (BufferIsValid(metabuf))
{
PageSetLSN(metapg, recptr);
PageSetTLI(metapg, ThisTimeLineID);
}
page = BufferGetPage(pbuf);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
page = BufferGetPage(rbuf);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
page = BufferGetPage(buf);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
if (BufferIsValid(lbuf))
{
page = BufferGetPage(lbuf);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
}
END_CRIT_SECTION();
/* release metapage; send out relcache inval if metapage changed */
if (BufferIsValid(metabuf))
{
CacheInvalidateRelcache(rel);
_bt_relbuf(rel, metabuf);
}
/* can always release leftsib immediately */
if (BufferIsValid(lbuf))
_bt_relbuf(rel, lbuf);
/*
* If parent became half dead, recurse to delete it. Otherwise, if right
* sibling is empty and is now the last child of the parent, recurse to
* try to delete it. (These cases cannot apply at the same time, though
* the second case might itself recurse to the first.)
*
* When recursing to parent, we hold the lock on the target page until
* done. This delays any insertions into the keyspace that was just
* effectively reassigned to the parent's right sibling. If we allowed
* that, and there were enough such insertions before we finish deleting
* the parent, page splits within that keyspace could lead to inserting
* out-of-order keys into the grandparent level. It is thought that that
* wouldn't have any serious consequences, but it still seems like a
* pretty bad idea.
*/
if (parent_half_dead)
{
/* recursive call will release pbuf */
_bt_relbuf(rel, rbuf);
result = _bt_pagedel(rel, pbuf, stack->bts_parent) + 1;
_bt_relbuf(rel, buf);
}
else if (parent_one_child && rightsib_empty)
{
_bt_relbuf(rel, pbuf);
_bt_relbuf(rel, buf);
/* recursive call will release rbuf */
result = _bt_pagedel(rel, rbuf, stack) + 1;
}
else
{
_bt_relbuf(rel, pbuf);
_bt_relbuf(rel, buf);
_bt_relbuf(rel, rbuf);
result = 1;
}
return result;
}
开发者ID:Joe-xXx,项目名称:postgres-old-soon-decommissioned,代码行数:101,代码来源:nbtpage.c
示例9: createPostingTree
/*
* Creates new posting tree containing the given TIDs. Returns the page
* number of the root of the new posting tree.
*
* items[] must be in sorted order with no duplicates.
*/
BlockNumber
createPostingTree(Relation index, ItemPointerData *items, uint32 nitems,
GinStatsData *buildStats)
{
BlockNumber blkno;
Buffer buffer;
Page page;
int nrootitems;
/* Calculate how many TIDs will fit on first page. */
nrootitems = Min(nitems, GinMaxLeafDataItems);
/*
* Create the root page.
*/
buffer = GinNewBuffer(index);
page = BufferGetPage(buffer);
blkno = BufferGetBlockNumber(buffer);
START_CRIT_SECTION();
GinInitBuffer(buffer, GIN_DATA | GIN_LEAF);
memcpy(GinDataPageGetData(page), items, sizeof(ItemPointerData) * nrootitems);
GinPageGetOpaque(page)->maxoff = nrootitems;
MarkBufferDirty(buffer);
if (RelationNeedsWAL(index))
{
XLogRecPtr recptr;
XLogRecData rdata[2];
ginxlogCreatePostingTree data;
data.node = index->rd_node;
data.blkno = blkno;
data.nitem = nrootitems;
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char *) &data;
rdata[0].len = sizeof(ginxlogCreatePostingTree);
rdata[0].next = &rdata[1];
rdata[1].buffer = InvalidBuffer;
rdata[1].data = (char *) items;
rdata[1].len = sizeof(ItemPointerData) * nrootitems;
rdata[1].next = NULL;
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_CREATE_PTREE, rdata);
PageSetLSN(page, recptr);
}
UnlockReleaseBuffer(buffer);
END_CRIT_SECTION();
/* During index build, count the newly-added data page */
if (buildStats)
buildStats->nDataPages++;
/*
* Add any remaining TIDs to the newly-created posting tree.
*/
if (nitems > nrootitems)
{
ginInsertItemPointers(index, blkno,
items + nrootitems,
nitems - nrootitems,
buildStats);
}
return blkno;
}
开发者ID:42penguins,项目名称:postgres,代码行数:78,代码来源:gindatapage.c
示例10: _bt_getroot
//.........这里部分代码省略.........
metad->btm_root = rootblkno;
metad->btm_level = 0;
metad->btm_fastroot = rootblkno;
metad->btm_fastlevel = 0;
MarkBufferDirty(rootbuf);
MarkBufferDirty(metabuf);
/* XLOG stuff */
if (!rel->rd_istemp)
{
xl_btree_newroot xlrec;
XLogRecPtr recptr;
XLogRecData rdata;
xlrec.node = rel->rd_node;
xlrec.rootblk = rootblkno;
xlrec.level = 0;
rdata.data = (char *) &xlrec;
rdata.len = SizeOfBtreeNewroot;
rdata.buffer = InvalidBuffer;
rdata.next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT, &rdata);
PageSetLSN(rootpage, recptr);
PageSetTLI(rootpage, ThisTimeLineID);
PageSetLSN(metapg, recptr);
PageSetTLI(metapg, ThisTimeLineID);
}
END_CRIT_SECTION();
/*
* Send out relcache inval for metapage change (probably unnecessary
* here, but let's be safe).
*/
CacheInvalidateRelcache(rel);
/*
* swap root write lock for read lock. There is no danger of anyone
* else accessing the new root page while it's unlocked, since no one
* else knows where it is yet.
*/
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_READ);
/* okay, metadata is correct, release lock on it */
_bt_relbuf(rel, metabuf);
}
else
{
rootblkno = metad->btm_fastroot;
Assert(rootblkno != P_NONE);
rootlevel = metad->btm_fastlevel;
/*
* Cache the metapage data for next time
*/
rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
sizeof(BTMetaPageData));
memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
/*
开发者ID:Joe-xXx,项目名称:postgres-old-soon-decommissioned,代码行数:67,代码来源:nbtpage.c
示例11: _bt_delitems_delete
void
_bt_delitems_delete(Relation rel, Buffer buf,
OffsetNumber *itemnos, int nitems, Relation heapRel)
{
Page page = BufferGetPage(buf);
BTPageOpaque opaque;
Assert(nitems > 0);
/* No ereport(ERROR) until changes are logged */
START_CRIT_SECTION();
/* Fix the page */
PageIndexMultiDelete(page, itemnos, nitems);
/*
* We can clear the vacuum cycle ID since this page has certainly been
* processed by the current vacuum scan.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_cycleid = 0;
/*
* Mark the page as not containing any LP_DEAD items. This is not
* certainly true (there might be some that have recently been marked, but
* weren't included in our target-item list), but it will almost always be
* true and it doesn't seem worth an additional page scan to check it.
* Remember that BTP_HAS_GARBAGE is only a hint anyway.
*/
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
MarkBufferDirty(buf);
/* XLOG stuff */
if (!rel->rd_istemp)
{
XLogRecPtr recptr;
XLogRecData rdata[3];
xl_btree_delete xlrec_delete;
xlrec_delete.node = rel->rd_node;
xlrec_delete.hnode = heapRel->rd_node;
xlrec_delete.block = BufferGetBlockNumber(buf);
xlrec_delete.nitems = nitems;
rdata[0].data = (char *) &xlrec_delete;
rdata[0].len = SizeOfBtreeDelete;
rdata[0].buffer = InvalidBuffer;
rdata[0].next = &(rdata[1]);
/*
* We need the target-offsets array whether or not we store the to
* allow us to find the latestRemovedXid on a standby server.
*/
rdata[1].data = (char *) itemnos;
rdata[1].len = nitems * sizeof(OffsetNumber);
rdata[1].buffer = InvalidBuffer;
rdata[1].next = &(rdata[2]);
rdata[2].data = NULL;
rdata[2].len = 0;
rdata[2].buffer = buf;
rdata[2].buffer_std = true;
rdata[2].next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE, rdata);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
END_CRIT_SECTION();
}
开发者ID:Joe-xXx,项目名称:postgres-old-soon-decommissioned,代码行数:74,代码来源:nbtpage.c
示例12: _bt_delitems_vacuum
/*
* Delete item(s) from a btree page.
*
* This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
* This routine assumes that the caller has pinned and locked the buffer.
* Also, the given itemnos *must* appear in increasing order in the array.
*
* We record VACUUMs and b-tree deletes differently in WAL. InHotStandby
* we need to be able to pin all of the blocks in the btree in physical
* order when replaying the effects of a VACUUM, just as we do for the
* original VACUUM itself. lastBlockVacuumed allows us to tell whether an
* intermediate range of blocks has had no changes at all by VACUUM,
* and so must be scanned anyway during replay. We always write a WAL record
* for the last block in the index, whether or not it contained any items
* to be removed. This allows us to scan right up to end of index to
* ensure correct locking.
*/
void
_bt_delitems_vacuum(Relation rel, Buffer buf,
OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed)
{
Page page = BufferGetPage(buf);
BTPageOpaque opaque;
/* No ereport(ERROR) until changes are logged */
START_CRIT_SECTION();
/* Fix the page */
if (nitems > 0)
PageIndexMultiDelete(page, itemnos, nitems);
/*
* We can clear the vacuum cycle ID since this page has certainly been
* processed by the current vacuum scan.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_cycleid = 0;
/*
* Mark the page as not containing any LP_DEAD items. This is not
* certainly true (there might be some that have recently been marked, but
* weren't included in our target-item list), but it will almost always be
* true and it doesn't seem worth an additional page scan to check it.
* Remember that BTP_HAS_GARBAGE is only a hint anyway.
*/
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
MarkBufferDirty(buf);
/* XLOG stuff */
if (!rel->rd_istemp)
{
XLogRecPtr recptr;
XLogRecData rdata[2];
xl_btree_vacuum xlrec_vacuum;
xlrec_vacuum.node = rel->rd_node;
xlrec_vacuum.block = BufferGetBlockNumber(buf);
xlrec_vacuum.lastBlockVacuumed = lastBlockVacuumed;
rdata[0].data = (char *) &xlrec_vacuum;
rdata[0].len = SizeOfBtreeVacuum;
rdata[0].buffer = InvalidBuffer;
rdata[0].next = &(rdata[1]);
/*
* The target-offsets array is not in the buffer, but pretend that it
* is. When XLogInsert stores the whole buffer, the offsets array
* need not be stored too.
*/
if (nitems > 0)
{
rdata[1].data = (char *) itemnos;
rdata[1].len = nitems * sizeof(OffsetNumber);
}
else
{
rdata[1].data = NULL;
rdata[1].len = 0;
}
rdata[1].buffer = buf;
rdata[1].buffer_std = true;
rdata[1].next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_VACUUM, rdata);
PageSetLSN(page, recptr);
PageSetTLI(page, ThisTimeLineID);
}
END_CRIT_SECTION();
}
开发者ID:Joe-xXx,项目名称:postgres-old-soon-decommissioned,代码行数:96,代码来源:nbtpage.c
示例13: ginDeletePage
/*
* Delete a posting tree page.
*/
static void
ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno,
BlockNumber parentBlkno, OffsetNumber myoff, bool isParentRoot)
{
Buffer dBuffer;
Buffer lBuffer;
Buffer pBuffer;
Page page,
parentPage;
BlockNumber rightlink;
/*
* Lock the pages in the same order as an insertion would, to avoid
* deadlocks: left, then right, then parent.
*/
lBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, leftBlkno,
RBM_NORMAL, gvs->strategy);
dBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, deleteBlkno,
RBM_NORMAL, gvs->strategy);
pBuffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, parentBlkno,
RBM_NORMAL, gvs->strategy);
LockBuffer(lBuffer, GIN_EXCLUSIVE);
LockBuffer(dBuffer, GIN_EXCLUSIVE);
if (!isParentRoot) /* parent is already locked by
* LockBufferForCleanup() */
LockBuffer(pBuffer, GIN_EXCLUSIVE);
START_CRIT_SECTION();
/* Unlink the page by changing left sibling's rightlink */
page = BufferGetPage(dBuffer);
rightlink = GinPageGetOpaque(page)->rightlink;
page = BufferGetPage(lBuffer);
GinPageGetOpaque(page)->rightlink = rightlink;
/* Delete downlink from parent */
parentPage = BufferGetPage(pBuffer);
#ifdef USE_ASSERT_CHECKING
do
{
PostingItem *tod = GinDataPageGetPostingItem(parentPage, myoff);
Assert(PostingItemGetBlockNumber(tod) == deleteBlkno);
} while (0);
#endif
GinPageDeletePostingItem(parentPage, myoff);
page = BufferGetPage(dBuffer);
/*
* we shouldn't change rightlink field to save workability of running
* search scan
*/
GinPageGetOpaque(page)->flags = GIN_DELETED;
MarkBufferDirty(pBuffer);
MarkBufferDirty(lBuffer);
MarkBufferDirty(dBuffer);
if (RelationNeedsWAL(gvs->index))
{
XLogRecPtr recptr;
ginxlogDeletePage data;
/*
* We can't pass REGBUF_STANDARD for the deleted page, because we
* didn't set pd_lower on pre-9.4 versions. The page might've been
* binary-upgraded from an older version, and hence not have pd_lower
* set correctly. Ditto for the left page, but removing the item from
* the parent updated its pd_lower, so we know that's OK at this
* point.
*/
XLogBeginInsert();
XLogRegisterBuffer(0, dBuffer, 0);
XLogRegisterBuffer(1, pBuffer, REGBUF_STANDARD);
XLogRegisterBuffer(2, lBuffer, 0);
data.parentOffset = myoff;
data.rightLink = GinPageGetOpaque(page)->rightlink;
XLogRegisterData((char *) &data, sizeof(ginxlogDeletePage));
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_DELETE_PAGE);
PageSetLSN(page, recptr);
PageSetLSN(parentPage, recptr);
PageSetLSN(BufferGetPage(lBuffer), recptr);
}
if (!isParentRoot)
LockBuffer(pBuffer, GIN_UNLOCK);
ReleaseBuffer(pBuffer);
UnlockReleaseBuffer(lBuffer);
UnlockReleaseBuffer(dBuffer);
END_CRIT_SECTION();
//.........这里部分代码省略.........
开发者ID:Gordiychuk,项目名称:postgres,代码行数:101,代码来源:ginvacuum.c
示例14: ginbulkdelete
IndexBulkDeleteResult *
ginbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
IndexBulkDeleteCallback callback, void *callback_state)
{
Relation index = info->index;
BlockNumber blkno = GIN_ROOT_BLKNO;
GinVacuumState gvs;
Buffer buffer;
BlockNumber rootOfPostingTree[BLCKSZ / (sizeof(IndexTupleData) + sizeof(ItemId))];
uint32 nRoot;
gvs.tmpCxt = AllocSetContextCreate(CurrentMemoryContext,
"Gin vacuum temporary context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
gvs.index = index;
gvs.callback = callback;
gvs.callback_state = callback_state;
gvs.strategy = info->strategy;
initGinState(&gvs.ginstate, index);
/* first time through? */
if (stats == NULL)
{
/* Yes, so initialize stats to zeroes */
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
/*
* and cleanup any pending inserts */
ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(),
false, stats);
}
/* we'll re-count the tuples each time */
stats->num_index_tuples = 0;
gvs.result = stats;
buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,
RBM_NORMAL, info->strategy);
/* find leaf page */
for (;;)
{
Page page = BufferGetPage(buffer);
IndexTuple itup;
LockBuffer(buffer, GIN_SHARE);
Assert(!GinPageIsData(page));
if (GinPageIsLeaf(page))
{
LockBuffer(buffer, GIN_UNLOCK);
LockBuffer(buffer, GIN_EXCLUSIVE);
if (blkno == GIN_ROOT_BLKNO && !GinPageIsLeaf(page))
{
LockBuffer(buffer, GIN_UNLOCK);
continue; /* check it one more */
}
break;
}
Assert(PageGetMaxOffsetNumber(page) >= FirstOffsetNumber);
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, FirstOffsetNumber));
blkno = GinGetDownlink(itup);
Assert(blkno != InvalidBlockNumber);
UnlockReleaseBuffer(buffer);
buffer = ReadBufferExtended(index, MAIN_FORKNUM, blkno,
RBM_NORMAL, info->strategy);
}
/* right now we found leftmost page in entry's BTree */
for (;;)
{
Page page = BufferGetPage(buffer);
Page resPage;
uint32 i;
Assert(!GinPageIsData(page));
resPage = ginVacuumEntryPage(&gvs, buffer, rootOfPostingTree, &nRoot);
blkno = GinPageGetOpaque(page)->rightlink;
if (resPage)
{
START_CRIT_SECTION();
PageRestoreTempPage(resPage, page);
MarkBufferDirty(buffer);
xlogVacuumPage(gvs.index, buffer);
UnlockReleaseBuffer(buffer);
END_CRIT_SECTION();
}
else
{
UnlockReleaseBuffer(buffer);
//.........这里部分代码省略.........
开发者ID:Gordiychuk,项目名称:postgres,代码行数:101,代码来源:ginvacuum.c
示例15: shiftList
//.........这里部分代码省略.........
Page metapage;
GinMetaPageData *metadata;
BlockNumber blknoToDelete;
metapage = BufferGetPage(metabuffer);
metadata = GinPageGetMeta(metapage);
blknoToDelete = metadata->head;
do
{
Page page;
int i;
int64 nDeletedHeapTuples = 0;
ginxlogDeleteListPages data;
XLogRecData rdata[1];
Buffer buffers[GIN_NDELETE_AT_ONCE];
data.node = index->rd_node;
rdata[0].buffer = InvalidBuffer;
rdata[0].data = (char *) &data;
rdata[0].len = sizeof(ginxlogDeleteListPages);
rdata[0].next = NULL;
data.ndeleted = 0;
while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)
{
data.toDelete[data.ndeleted] = blknoToDelete;
buffers[data.ndeleted] = ReadBuffer(index, blknoToDelete);
LockBuffer(buffers[data.ndeleted], GIN_EXCLUSIVE);
page = BufferGetPage(buffers[data.ndeleted]);
data.ndeleted++;
if (GinPageIsDeleted(page))
{
/* concurrent cleanup process is detected */
for (i = 0; i < data.ndeleted; i++)
UnlockReleaseBuffer(buffers[i]);
return true;
}
nDeletedHeapTuples += GinPageGetOpaque(page)->maxoff;
blknoToDelete = GinPageGetOpaque(page)->rightlink;
}
if (stats)
stats->pages_deleted += data.ndeleted;
START_CRIT_SECTION();
metadata->head = blknoToDelete;
Assert(metadata->nPendingPages >= data.ndeleted);
metadata->nPendingPages -= data.ndeleted;
Assert(metadata->nPendingHeapTuples >= nDeletedHeapTuples);
metadata->nPendingHeapTuples -= nDeletedHeapTuples;
if (blknoToDelete == InvalidBlockNumber)
{
metadata->tail = InvalidBlockNumber;
metadata->tailFreeSize = 0;
metadata->nPendingPages = 0;
metadata->nPendingHeapTuples = 0;
}
MarkBufferDirty(metabuffer);
for (i = 0; i < data.ndeleted; i++)
{
page = BufferGetPage(buffers[i]);
GinPageGetOpaque(page)->flags = GIN_DELETED;
MarkBufferDirty(buffers[i]);
}
if (RelationNeedsWAL(index))
{
XLogRecPtr recptr;
memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
recptr = XLogInsert(RM_GIN_ID, XLOG_GIN_DELETE_LISTPAGE, rdata);
PageSetLSN(metapage, recptr);
for (i = 0; i < data.ndeleted; i++)
{
page = BufferGetPage(buffers[i]);
PageSetLSN(page, recptr);
}
}
for (i = 0; i < data.ndeleted; i++)
UnlockReleaseBuffer(buffers[i]);
END_CRIT_SECTION();
} while (blknoToDelete != newHead);
return false;
}
开发者ID:42penguins,项目名称:postgres,代码行数:101,代码来源:ginfast.c
示例16: visibilitymap_set
/*
* visibilitymap_set - set a bit on a previously pinned page
*
* recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
* or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
* one provided; in normal running, we generate a new XLOG record and set the
* page LSN to that value. cutoff_xid is the largest xmin on the page being
* marked all-visible; it is needed for Hot Standby, and can be
* InvalidTransactionId if the page contains no tuples.
*
* Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
* this function. Except in recovery, caller should also pass the heap
* buffer. When checksums are enabled and we're not in recovery, we must add
* the heap buffer to the WAL chain to protect it from being torn.
*
* You must pass a buffer containing the correct map page to this function.
* Call visibilitymap_pin first to pin the right one. This function doesn't do
* any I/O.
*/
void
visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid)
{
BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
uint8 mapBit = HEAPBLK_TO_MAPBIT(heapBlk);
Page page;
char *map;
#ifdef TRACE_VISIBILITYMAP
elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
#endif
Assert(InRecovery || XLogRecPtrIsInvalid(recptr));
Assert(InRecovery || BufferIsValid(heapBuf));
/* Check that we have the right heap page pinned, if present */
if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
/* Check that we have the right VM page pinned */
if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
page = BufferGetPage(vmBuf);
map = PageGetContents(page);
LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
if (!(map[mapByte] & (1 << mapBit)))
{
START_CRIT_SECTION();
map[mapByte] |= (1 << mapBit);
MarkBufferDirty(vmBuf);
if (RelationNeedsWAL(rel))
{
if (XLogRecPtrIsInvalid(recptr))
{
Assert(!InRecovery);
recptr = log_heap_visible(rel->rd_node, heapBuf, vmBuf,
cutoff_xid);
/*
* If data checksums are enabled, we need to protect the heap
* page from being torn.
*/
if (DataChecksumsEnabled())
{
Page heapPage = BufferGetPage(heapBuf);
/* caller is expected to set PD_ALL_VISIBLE first */
Assert(PageIsAllVisible(heapPage));
PageSetLSN(heapPage, recptr);
}
}
PageSetLSN(page, recptr);
}
END_CRIT_SECTION();
}
LockBuffer(vmBuf, BUFFER_LOCK_UNLOCK);
}
开发者ID:EMARQUIS,项目名称:postgres,代码行数:84,代码来源:visibilitymap.c
|
请发表评论