/*
* Associate a BackgroundWorkerHandle with a shm_mq_handle just as if it had
* been passed to shm_mq_attach.
*/
void
shm_mq_set_handle(shm_mq_handle *mqh, BackgroundWorkerHandle *handle)
{
Assert(mqh->mqh_handle == NULL);
mqh->mqh_handle = handle;
}
开发者ID:kasoku,项目名称:jpug-doc,代码行数:10,代码来源:shm_mq.c
示例5: shm_mq_receive
/*
* Receive a message from a shared message queue.
*
* We set *nbytes to the message length and *data to point to the message
* payload. If the entire message exists in the queue as a single,
* contiguous chunk, *data will point directly into shared memory; otherwise,
* it will point to a temporary buffer. This mostly avoids data copying in
* the hoped-for case where messages are short compared to the buffer size,
* while still allowing longer messages. In either case, the return value
* remains valid until the next receive operation is perfomed on the queue.
*
* When nowait = false, we'll wait on our process latch when the ring buffer
* is empty and we have not yet received a full message. The sender will
* set our process latch after more data has been written, and we'll resume
* processing. Each call will therefore return a complete message
* (unless the sender detaches the queue).
*
* When nowait = true, we do not manipulate the state of the process latch;
* instead, whenever the buffer is empty and we need to read from it, we
* return SHM_MQ_WOULD_BLOCK. In this case, the caller should call this
* function again after the process latch has been set.
*/
shm_mq_result
shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
{
shm_mq *mq = mqh->mqh_queue;
shm_mq_result res;
Size rb = 0;
Size nbytes;
void *rawdata;
Assert(mq->mq_receiver == MyProc);
/* We can't receive data until the sender has attached. */
if (!mqh->mqh_counterparty_attached)
{
if (nowait)
{
if (shm_mq_get_sender(mq) == NULL)
return SHM_MQ_WOULD_BLOCK;
}
else if (!shm_mq_wait_internal(mq, &mq->mq_sender, mqh->mqh_handle)
&& shm_mq_get_sender(mq) == NULL)
{
mq->mq_detached = true;
return SHM_MQ_DETACHED;
}
mqh->mqh_counterparty_attached = true;
}
/* Consume any zero-copy data from previous receive operation. */
if (mqh->mqh_consume_pending > 0)
{
shm_mq_inc_bytes_read(mq, mqh->mqh_consume_pending);
mqh->mqh_consume_pending = 0;
}
/* Try to read, or finish reading, the length word from the buffer. */
while (!mqh->mqh_length_word_complete)
{
/* Try to receive the message length word. */
Assert(mqh->mqh_partial_bytes < sizeof(Size));
res = shm_mq_receive_bytes(mq, sizeof(Size) - mqh->mqh_partial_bytes,
nowait, &rb, &rawdata);
if (res != SHM_MQ_SUCCESS)
return res;
/*
* Hopefully, we'll receive the entire message length word at once.
* But if sizeof(Size) > MAXIMUM_ALIGNOF, then it might be split over
* multiple reads.
*/
if (mqh->mqh_partial_bytes == 0 && rb >= sizeof(Size))
{
Size needed;
nbytes = *(Size *) rawdata;
/* If we've already got the whole message, we're done. */
needed = MAXALIGN(sizeof(Size)) + MAXALIGN(nbytes);
if (rb >= needed)
{
/*
* Technically, we could consume the message length
* information at this point, but the extra write to shared
* memory wouldn't be free and in most cases we would reap no
* benefit.
*/
mqh->mqh_consume_pending = needed;
*nbytesp = nbytes;
*datap = ((char *) rawdata) + MAXALIGN(sizeof(Size));
return SHM_MQ_SUCCESS;
}
/*
* We don't have the whole message, but we at least have the whole
* length word.
*/
mqh->mqh_expected_bytes = nbytes;
mqh->mqh_length_word_complete = true;
//.........这里部分代码省略.........
开发者ID:kasoku,项目名称:jpug-doc,代码行数:101,代码来源:shm_mq.c
示例6: get_object_address
/*
* Translate an object name and arguments (as passed by the parser) to an
* ObjectAddress.
*
* The returned object will be locked using the specified lockmode. If a
* sub-object is looked up, the parent object will be locked instead.
*
* If the object is a relation or a child object of a relation (e.g. an
* attribute or contraint), the relation is also opened and *relp receives
* the open relcache entry pointer; otherwise, *relp is set to NULL. This
* is a bit grotty but it makes life simpler, since the caller will
* typically need the relcache entry too. Caller must close the relcache
* entry when done with it. The relation is locked with the specified lockmode
* if the target object is the relation itself or an attribute, but for other
* child objects, only AccessShareLock is acquired on the relation.
*
* We don't currently provide a function to release the locks acquired here;
* typically, the lock must be held until commit to guard against a concurrent
* drop operation.
*/
ObjectAddress
get_object_address(ObjectType objtype, List *objname, List *objargs,
Relation *relp, LOCKMODE lockmode)
{
ObjectAddress address;
Relation relation = NULL;
/* Some kind of lock must be taken. */
Assert(lockmode != NoLock);
switch (objtype)
{
case OBJECT_INDEX:
case OBJECT_SEQUENCE:
case OBJECT_TABLE:
case OBJECT_VIEW:
relation =
get_relation_by_qualified_name(objtype, objname, lockmode);
address.classId = RelationRelationId;
address.objectId = RelationGetRelid(relation);
address.objectSubId = 0;
break;
case OBJECT_COLUMN:
address =
get_object_address_attribute(objtype, objname, &relation,
lockmode);
break;
case OBJECT_RULE:
case OBJECT_TRIGGER:
case OBJECT_CONSTRAINT:
address = get_object_address_relobject(objtype, objname, &relation);
break;
case OBJECT_DATABASE:
case OBJECT_EXTENSION:
case OBJECT_TABLESPACE:
case OBJECT_ROLE:
case OBJECT_SCHEMA:
case OBJECT_LANGUAGE:
address = get_object_address_unqualified(objtype, objname);
break;
case OBJECT_TYPE:
case OBJECT_DOMAIN:
address.classId = TypeRelationId;
address.objectId =
typenameTypeId(NULL, makeTypeNameFromNameList(objname), NULL);
address.objectSubId = 0;
break;
case OBJECT_AGGREGATE:
address.classId = ProcedureRelationId;
address.objectId = LookupAggNameTypeNames(objname, objargs, false);
address.objectSubId = 0;
break;
case OBJECT_FUNCTION:
address.classId = ProcedureRelationId;
address.objectId = LookupFuncNameTypeNames(objname, objargs, false);
address.objectSubId = 0;
break;
case OBJECT_OPERATOR:
Assert(list_length(objargs) == 2);
address.classId = OperatorRelationId;
address.objectId =
LookupOperNameTypeNames(NULL, objname,
(TypeName *) linitial(objargs),
(TypeName *) lsecond(objargs),
false, -1);
address.objectSubId = 0;
break;
case OBJECT_OPCLASS:
case OBJECT_OPFAMILY:
address = get_object_address_opcf(objtype, objname, objargs);
break;
case OBJECT_CAST:
{
TypeName *sourcetype = (TypeName *) linitial(objname);
TypeName *targettype = (TypeName *) linitial(objargs);
Oid sourcetypeid = typenameTypeId(NULL, sourcetype, NULL);
Oid targettypeid = typenameTypeId(NULL, targettype, NULL);
address.classId = CastRelationId;
address.objectId =
//.........这里部分代码省略.........
/*
* get_relation_info -
* Retrieves catalog information for a given relation.
*
* Given the Oid of the relation, return the following info into fields
* of the RelOptInfo struct:
*
* min_attr lowest valid AttrNumber
* max_attr highest valid AttrNumber
* indexlist list of IndexOptInfos for relation's indexes
* pages number of pages
* tuples number of tuples
*
* Also, initialize the attr_needed[] and attr_widths[] arrays. In most
* cases these are left as zeroes, but sometimes we need to compute attr
* widths here, and we may as well cache the results for costsize.c.
*
* If inhparent is true, all we need to do is set up the attr arrays:
* the RelOptInfo actually represents the appendrel formed by an inheritance
* tree, and so the parent rel's physical size and index information isn't
* important for it.
*/
void
get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
RelOptInfo *rel)
{
Index varno = rel->relid;
Relation relation;
bool hasindex;
List *indexinfos = NIL;
bool needs_longlock;
/*
* We need not lock the relation since it was already locked, either by
* the rewriter or when expand_inherited_rtentry() added it to the query's
* rangetable.
*/
relation = heap_open(relationObjectId, NoLock);
needs_longlock = rel_needs_long_lock(relationObjectId);
rel->min_attr = FirstLowInvalidHeapAttributeNumber + 1;
rel->max_attr = RelationGetNumberOfAttributes(relation);
Assert(rel->max_attr >= rel->min_attr);
rel->attr_needed = (Relids *)
palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
rel->attr_widths = (int32 *)
palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));
/*
* CDB: Get partitioning key info for distributed relation.
*/
rel->cdbpolicy = RelationGetPartitioningKey(relation);
/*
* Estimate relation size --- unless it's an inheritance parent, in which
* case the size will be computed later in set_append_rel_pathlist, and we
* must leave it zero for now to avoid bollixing the total_table_pages
* calculation.
*/
if (!inhparent)
{
cdb_estimate_rel_size
(
rel,
relation,
relation,
rel->attr_widths - rel->min_attr,
&rel->pages,
&rel->tuples,
&rel->cdb_default_stats_used
);
}
/*
* Make list of indexes. Ignore indexes on system catalogs if told to.
* Don't bother with indexes for an inheritance parent, either.
*/
if (inhparent ||
(IgnoreSystemIndexes && IsSystemClass(relation->rd_rel)))
hasindex = false;
else
hasindex = relation->rd_rel->relhasindex;
if (hasindex)
{
List *indexoidlist;
ListCell *l;
LOCKMODE lmode;
/* Warn if indexed table needs ANALYZE. */
if (rel->cdb_default_stats_used)
cdb_default_stats_warning_for_table(relation->rd_id);
indexoidlist = RelationGetIndexList(relation);
/*
* For each index, we get the same type of lock that the executor will
* need, and do not release it. This saves a couple of trips to the
* shared lock manager while not creating any real loss of
//.........这里部分代码省略.........
//.........这里部分代码省略.........
nargs = list ? PySequence_Length(list) : 0;
plan->nargs = nargs;
plan->types = nargs ? palloc(sizeof(Oid) * nargs) : NULL;
plan->values = nargs ? palloc(sizeof(Datum) * nargs) : NULL;
plan->args = nargs ? palloc(sizeof(PLyTypeInfo) * nargs) : NULL;
MemoryContextSwitchTo(oldcontext);
oldcontext = CurrentMemoryContext;
oldowner = CurrentResourceOwner;
PLy_spi_subtransaction_begin(oldcontext, oldowner);
PG_TRY();
{
int i;
PLyExecutionContext *exec_ctx = PLy_current_execution_context();
/*
* the other loop might throw an exception, if PLyTypeInfo member
* isn't properly initialized the Py_DECREF(plan) will go boom
*/
for (i = 0; i < nargs; i++)
{
PLy_typeinfo_init(&plan->args[i], plan->mcxt);
plan->values[i] = PointerGetDatum(NULL);
}
for (i = 0; i < nargs; i++)
{
char *sptr;
HeapTuple typeTup;
Oid typeId;
int32 typmod;
optr = PySequence_GetItem(list, i);
if (PyString_Check(optr))
sptr = PyString_AsString(optr);
else if (PyUnicode_Check(optr))
sptr = PLyUnicode_AsString(optr);
else
{
ereport(ERROR,
(errmsg("plpy.prepare: type name at ordinal position %d is not a string", i)));
sptr = NULL; /* keep compiler quiet */
}
/********************************************************
* Resolve argument type names and then look them up by
* oid in the system cache, and remember the required
*information for input conversion.
********************************************************/
parseTypeString(sptr, &typeId, &typmod, false);
typeTup = SearchSysCache1(TYPEOID,
ObjectIdGetDatum(typeId));
if (!HeapTupleIsValid(typeTup))
elog(ERROR, "cache lookup failed for type %u", typeId);
Py_DECREF(optr);
/*
* set optr to NULL, so we won't try to unref it again in case of
* an error
*/
optr = NULL;
plan->types[i] = typeId;
PLy_output_datum_func(&plan->args[i], typeTup, exec_ctx->curr_proc->langid, exec_ctx->curr_proc->trftypes);
ReleaseSysCache(typeTup);
}
pg_verifymbstr(query, strlen(query), false);
plan->plan = SPI_prepare(query, plan->nargs, plan->types);
if (plan->plan == NULL)
elog(ERROR, "SPI_prepare failed: %s",
SPI_result_code_string(SPI_result));
/* transfer plan from procCxt to topCxt */
if (SPI_keepplan(plan->plan))
elog(ERROR, "SPI_keepplan failed");
PLy_spi_subtransaction_commit(oldcontext, oldowner);
}
PG_CATCH();
{
Py_DECREF(plan);
Py_XDECREF(optr);
PLy_spi_subtransaction_abort(oldcontext, oldowner);
return NULL;
}
PG_END_TRY();
Assert(plan->plan != NULL);
return (PyObject *) plan;
}
/**
* Destroys a PDM thread.
*
* This will wakeup the thread, tell it to terminate, and wait for it terminate.
*
* @returns VBox status code.
* This reflects the success off destroying the thread and not the exit code
* of the thread as this is stored in *pRcThread.
* @param pThread The thread to destroy.
* @param pRcThread Where to store the thread exit code. Optional.
* @thread The emulation thread (EMT).
*/
VMMR3DECL(int) PDMR3ThreadDestroy(PPDMTHREAD pThread, int *pRcThread)
{
/*
* Assert sanity.
*/
AssertPtrReturn(pThread, VERR_INVALID_POINTER);
AssertReturn(pThread->u32Version == PDMTHREAD_VERSION, VERR_INVALID_MAGIC);
Assert(pThread->Thread != RTThreadSelf());
AssertPtrNullReturn(pRcThread, VERR_INVALID_POINTER);
PVM pVM = pThread->Internal.s.pVM;
VM_ASSERT_EMT(pVM);
PUVM pUVM = pVM->pUVM;
/*
* Advance the thread to the terminating state.
*/
int rc = VINF_SUCCESS;
if (pThread->enmState <= PDMTHREADSTATE_TERMINATING)
{
for (;;)
{
PDMTHREADSTATE enmState = pThread->enmState;
switch (enmState)
{
case PDMTHREADSTATE_RUNNING:
if (!pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
continue;
rc = pdmR3ThreadWakeUp(pThread);
break;
case PDMTHREADSTATE_SUSPENDED:
case PDMTHREADSTATE_SUSPENDING:
case PDMTHREADSTATE_RESUMING:
case PDMTHREADSTATE_INITIALIZING:
if (!pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
continue;
break;
case PDMTHREADSTATE_TERMINATING:
case PDMTHREADSTATE_TERMINATED:
break;
default:
AssertMsgFailed(("enmState=%d\n", enmState));
rc = VERR_PDM_THREAD_IPE_2;
break;
}
break;
}
}
int rc2 = RTSemEventMultiSignal(pThread->Internal.s.BlockEvent);
AssertRC(rc2);
/*
* Wait for it to terminate and the do cleanups.
*/
rc2 = RTThreadWait(pThread->Thread, RT_SUCCESS(rc) ? 60*1000 : 150, pRcThread);
if (RT_SUCCESS(rc2))
{
/* make it invalid. */
pThread->u32Version = 0xffffffff;
pThread->enmState = PDMTHREADSTATE_INVALID;
pThread->Thread = NIL_RTTHREAD;
/* unlink */
RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
if (pUVM->pdm.s.pThreads == pThread)
{
pUVM->pdm.s.pThreads = pThread->Internal.s.pNext;
if (!pThread->Internal.s.pNext)
pUVM->pdm.s.pThreadsTail = NULL;
}
else
{
PPDMTHREAD pPrev = pUVM->pdm.s.pThreads;
while (pPrev && pPrev->Internal.s.pNext != pThread)
pPrev = pPrev->Internal.s.pNext;
Assert(pPrev);
if (pPrev)
pPrev->Internal.s.pNext = pThread->Internal.s.pNext;
if (!pThread->Internal.s.pNext)
pUVM->pdm.s.pThreadsTail = pPrev;
}
pThread->Internal.s.pNext = NULL;
RTCritSectLeave(&pUVM->pdm.s.ListCritSect);
/* free the resources */
RTSemEventMultiDestroy(pThread->Internal.s.BlockEvent);
//.........这里部分代码省略.........
//.........这里部分代码省略.........
LPPLLOCALVARS lpplLocalVars; // Ptr to re-entrant vars
LPUBOOL lpbCtrlBreak; // Ptr to Ctrl-Break flag
LPVARARRAY_HEADER lpMemHdrRht; // Ptr to right arg header
// Get the thread's ptr to local vars
lpplLocalVars = TlsGetValue (dwTlsPlLocalVars);
// Get the ptr to the Ctrl-Break flag
lpbCtrlBreak = &lpplLocalVars->bCtrlBreak;
//***************************************************************
// This function is not sensitive to the axis operator,
// so signal a syntax error if present
//***************************************************************
if (lptkAxis NE NULL)
goto AXIS_SYNTAX_EXIT;
// Get the attributes (Type, NELM, and Rank) of the left & right args
AttrsOfToken (lptkLftArg, &aplTypeLft, &aplNELMLft, &aplRankLft, NULL);
AttrsOfToken (lptkRhtArg, &aplTypeRht, &aplNELMRht, &aplRankRht, NULL);
// Get left and right arg's global ptrs
GetGlbPtrs_LOCK (lptkLftArg, &hGlbLft, &lpMemLft);
GetGlbPtrs_LOCK (lptkRhtArg, &hGlbRht, &lpMemRht);
// Check for RANK ERROR
if (IsMultiRank (aplRankLft))
goto RANK_EXIT;
// Check for LENGTH ERROR
if (aplNELMLft NE aplRankRht)
goto LENGTH_EXIT;
// Treat the left arg as an axis
if (!CheckAxis_EM (lptkLftArg, // The "axis" token
aplRankRht, // All values less than this
FALSE, // TRUE iff scalar or one-element vector only
FALSE, // TRUE iff want sorted axes
TRUE, // TRUE iff axes must be contiguous
TRUE, // TRUE iff duplicate axes are allowed
NULL, // TRUE iff fractional values allowed
&aplRankRes, // Return last axis value
NULL, // Return # elements in axis vector
&hGlbAxis)) // Return HGLOBAL with APLUINT axis values
goto DOMAIN_EXIT;
// Map APA right arg to INT result
if (IsSimpleAPA (aplTypeRht))
aplTypeRes = ARRAY_INT;
else
aplTypeRes = aplTypeRht;
// Strip out the simple scalar right argument case
if (IsScalar (aplRankRht) && IsSimpleNH (aplTypeRes))
{
// Allocate a new YYRes
lpYYRes = YYAlloc ();
// Split cases based upon the right arg's token type
switch (lptkRhtArg->tkFlags.TknType)
{
case TKT_VARNAMED:
// tkData is an LPSYMENTRY
Assert (GetPtrTypeDir (lptkRhtArg->tkData.tkVoid) EQ PTRTYPE_STCONST);
// If it's not immediate, we must look inside the array
if (!lptkRhtArg->tkData.tkSym->stFlags.Imm)
{
// stData is a valid HGLOBAL variable array
Assert (IsGlbTypeVarDir_PTB (lptkRhtArg->tkData.tkSym->stData.stGlbData));
#ifdef DEBUG
// If we ever get here, we must have missed a type demotion
DbgStop (); // #ifdef DEBUG
#endif
} // End IF
// Handle the immediate case
// Fill in the result token
lpYYRes->tkToken.tkFlags.TknType = TKT_VARIMMED;
lpYYRes->tkToken.tkFlags.ImmType = lptkRhtArg->tkData.tkSym->stFlags.ImmType;
////////////////lpYYRes->tkToken.tkFlags.NoDisplay = FALSE; // Already zero from YYAlloc
lpYYRes->tkToken.tkData.tkLongest = lptkRhtArg->tkData.tkSym->stData.stLongest;
lpYYRes->tkToken.tkCharIndex = lptkFunc->tkCharIndex;
break;
case TKT_VARIMMED:
// Fill in the result token
lpYYRes->tkToken = *lptkRhtArg;
break;
defstop
break;
} // End SWITCH
goto NORMAL_EXIT;
} // End IF
/**
* The PDM thread function.
*
* @returns return from pfnThread.
*
* @param Thread The thread handle.
* @param pvUser Pointer to the PDMTHREAD structure.
*/
static DECLCALLBACK(int) pdmR3ThreadMain(RTTHREAD Thread, void *pvUser)
{
PPDMTHREAD pThread = (PPDMTHREAD)pvUser;
Log(("PDMThread: Initializing thread %RTthrd / %p / '%s'...\n", Thread, pThread, RTThreadGetName(Thread)));
pThread->Thread = Thread;
PUVM pUVM = pThread->Internal.s.pVM->pUVM;
if ( pUVM->pVmm2UserMethods
&& pUVM->pVmm2UserMethods->pfnNotifyPdmtInit)
pUVM->pVmm2UserMethods->pfnNotifyPdmtInit(pUVM->pVmm2UserMethods, pUVM);
/*
* The run loop.
*
* It handles simple thread functions which returns when they see a suspending
* request and leaves the PDMR3ThreadIAmSuspending and PDMR3ThreadIAmRunning
* parts to us.
*/
int rc;
for (;;)
{
switch (pThread->Internal.s.enmType)
{
case PDMTHREADTYPE_DEVICE:
rc = pThread->u.Dev.pfnThread(pThread->u.Dev.pDevIns, pThread);
break;
case PDMTHREADTYPE_USB:
rc = pThread->u.Usb.pfnThread(pThread->u.Usb.pUsbIns, pThread);
break;
case PDMTHREADTYPE_DRIVER:
rc = pThread->u.Drv.pfnThread(pThread->u.Drv.pDrvIns, pThread);
break;
case PDMTHREADTYPE_INTERNAL:
rc = pThread->u.Int.pfnThread(pThread->Internal.s.pVM, pThread);
break;
case PDMTHREADTYPE_EXTERNAL:
rc = pThread->u.Ext.pfnThread(pThread);
break;
default:
AssertMsgFailed(("%d\n", pThread->Internal.s.enmType));
rc = VERR_PDM_THREAD_IPE_1;
break;
}
if (RT_FAILURE(rc))
break;
/*
* If this is a simple thread function, the state will be suspending
* or initializing now. If it isn't we're supposed to terminate.
*/
if ( pThread->enmState != PDMTHREADSTATE_SUSPENDING
&& pThread->enmState != PDMTHREADSTATE_INITIALIZING)
{
Assert(pThread->enmState == PDMTHREADSTATE_TERMINATING);
break;
}
rc = PDMR3ThreadIAmSuspending(pThread);
if (RT_FAILURE(rc))
break;
if (pThread->enmState != PDMTHREADSTATE_RESUMING)
{
Assert(pThread->enmState == PDMTHREADSTATE_TERMINATING);
break;
}
rc = PDMR3ThreadIAmRunning(pThread);
if (RT_FAILURE(rc))
break;
}
if (RT_FAILURE(rc))
LogRel(("PDMThread: Thread '%s' (%RTthrd) quit unexpectedly with rc=%Rrc.\n", RTThreadGetName(Thread), Thread, rc));
/*
* Advance the state to terminating and then on to terminated.
*/
for (;;)
{
PDMTHREADSTATE enmState = pThread->enmState;
if ( enmState == PDMTHREADSTATE_TERMINATING
|| pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
break;
}
ASMAtomicXchgSize(&pThread->enmState, PDMTHREADSTATE_TERMINATED);
int rc2 = RTThreadUserSignal(Thread); AssertRC(rc2);
//.........这里部分代码省略.........
请发表评论