// -----------------------------------------------------------------------
// Given a column list providing identifiers for columns of this table,
// this method returns a list of VEG expressions and/or base columns that
// show the equivalence of base columns with index columns.
// -----------------------------------------------------------------------
void TableDesc::getEquivVEGCols (const ValueIdList& columnList,
ValueIdList &VEGColumnList) const
{
for (CollIndex i=0; i < columnList.entries(); i++)
{
ItemExpr *ie = columnList[i].getItemExpr();
BaseColumn *bc = NULL;
switch (ie->getOperatorType())
{
case ITM_BASECOLUMN:
bc = (BaseColumn *) ie;
break;
case ITM_INDEXCOLUMN:
bc = (BaseColumn *) ((IndexColumn *) ie)->getDefinition().
getItemExpr();
CMPASSERT(bc->getOperatorType() == ITM_BASECOLUMN);
break;
default:
ABORT("Invalid argument to TableDesc::getEquivVEGCols()\n");
}
CMPASSERT(bc->getTableDesc() == this);
VEGColumnList.insert(getColumnVEGList()[bc->getColNumber()]);
}
}
NABoolean IndexDesc::isUniqueIndex() const
{
return getNAFileSet()->uniqueIndex();
#pragma nowarn(269) // warning elimination
ValueIdList nonKeyColumnList;
#pragma warn(269) // warning elimination
getNonKeyColumnList(nonKeyColumnList);
// if there are some non-index-key columns(the key of base table),
// then this is a unique index. The primary key of base table is
// not needed to define the key of the index. It is, of course,
// needed to be present in the index as a non-key column.
if (nonKeyColumnList.entries() > 0)
return TRUE;
else
return FALSE;
}
开发者ID:,项目名称:,代码行数:19,代码来源:
示例5: getIndexColumns
void
IndexDesc::getNonKeyColumnList(ValueIdList& nonKeyColumnList) const
{
const ValueIdList
&indexColumns = getIndexColumns(),
&keyColumns = getIndexKey();
// clean up input:
nonKeyColumnList.clear();
// Add all index columns
CollIndex i = 0;
for (i=0;
i < indexColumns.entries();
i++)
{
nonKeyColumnList.insert(indexColumns[i]);
}
// And remove all key columns:
for (i=0;
i < keyColumns.entries();
i++)
{
nonKeyColumnList.remove(keyColumns[i]);
// if this is a secondary index, the base column
// which is part of the index,
// may also be present, remove it:
const ItemExpr *colPtr = keyColumns[i].getItemExpr();
if (colPtr->getOperatorType()
==
ITM_INDEXCOLUMN)
{
const ValueId & colDef = ((IndexColumn *)(colPtr))->getDefinition();
nonKeyColumnList.remove(colDef);
}
}
} // IndexDesc::getNonKeyColumnList(ValueIdSet& nonKeyColumnSet) const
开发者ID:,项目名称:,代码行数:40,代码来源:
示例6: getIdentityColumn
// -----------------------------------------------------------------------
// TableDesc::getIdentityColumn()
// -----------------------------------------------------------------------
void TableDesc::getIdentityColumn(ValueIdList &columnList) const
{
for (CollIndex i = 0; i < colList_.entries(); i++)
{
ValueId valId = colList_[i];
NAColumn *column = valId.getNAColumn();
if (column->isIdentityColumn())
{
columnList.insert(valId);
break; // Break when you find the first,
// as there can only be one Identity column per table.
}
}
}
short
PhysUnPackRows::codeGen(Generator *generator)
{
// Get handles on expression generator, map table, and heap allocator
//
ExpGenerator *expGen = generator->getExpGenerator();
Space *space = generator->getSpace();
// Allocate a new map table for this operation
//
MapTable *localMapTable = generator->appendAtEnd();
// Generate the child and capture the task definition block and a description
// of the reply composite row layout and the explain information.
//
child(0)->codeGen(generator);
ComTdb *childTdb = (ComTdb*)(generator->getGenObj());
ex_cri_desc *childCriDesc = generator->getCriDesc(Generator::UP);
ExplainTuple *childExplainTuple = generator->getExplainTuple();
// Make all of my child's outputs map to ATP 1. Since they are
// not needed above, they will not be in the work ATP (0).
// (Later, they will be removed from the map table)
//
localMapTable->setAllAtp(1);
// Generate the given and returned composite row descriptors.
// unPackRows adds a tupp (for the generated outputs) to the
// row given by the parent. The workAtp will have the 2 more
// tupps (1 for the generated outputs and another for the
// indexValue) than the given.
//
ex_cri_desc *givenCriDesc = generator->getCriDesc(Generator::DOWN);
ex_cri_desc *returnedCriDesc =
#pragma nowarn(1506) // warning elimination
new(space) ex_cri_desc(givenCriDesc->noTuples() + 1, space);
#pragma warn(1506) // warning elimination
ex_cri_desc *workCriDesc =
#pragma nowarn(1506) // warning elimination
new(space) ex_cri_desc(givenCriDesc->noTuples() + 2, space);
#pragma warn(1506) // warning elimination
// unPackCols is the next to the last Tp in Atp 0, the work ATP.
// and the last Tp in the returned ATP.
//
const Int32 unPackColsAtpIndex = workCriDesc->noTuples() - 2;
const Int32 unPackColsAtp = 0;
// The length of the new tuple which will contain the columns
// generated by unPackRows
//
ULng32 unPackColsTupleLen;
// The Tuple Desc describing the tuple containing the new unPacked columns
// It is generated when the expression is generated.
//
ExpTupleDesc *unPackColsTupleDesc = 0;
// indexValue is the last Tp in Atp 0, the work ATP.
//
const Int32 indexValueAtpIndex = workCriDesc->noTuples() - 1;
const Int32 indexValueAtp = 0;
// The length of the work tuple which will contain the value
// of the index. This should always be sizeof(int).
//
ULng32 indexValueTupleLen = 0;
// The Tuple Desc describing the tuple containing the new unPacked columns
// It is generated when the expression is generated.
//
ExpTupleDesc *indexValueTupleDesc = 0;
ValueIdList indexValueList;
if (indexValue() != NULL_VALUE_ID)
{
indexValueList.insert(indexValue());
expGen->processValIdList(indexValueList,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
indexValueTupleLen,
indexValueAtp,
indexValueAtpIndex,
&indexValueTupleDesc,
ExpTupleDesc::SHORT_FORMAT);
GenAssert(indexValueTupleLen == sizeof(Int32),
"UnPackRows::codeGen: Internal Error");
}
// If a packingFactor exists, generate a move expression for this.
// It is assumed that the packingFactor expression evaluates to a
// 4 byte integer.
//.........这里部分代码省略.........
//.........这里部分代码省略.........
// errorsCanOccur() determines if errors can occur converting the class
// datatype to the target datatype. The object on whose behalf the
// member function is called is expected to be a NAType.
NABoolean generateNarrow =
keyValue->getValueId().getType().errorsCanOccur(*mdamHelper.getTargetType());
if ((generateNarrow) &&
(getenv("NO_NARROWS"))) // for testing -- allows turning off Narrows
generateNarrow = FALSE;
if (generateNarrow)
{
vnode = new(generator->wHeap())
Narrow(keyValue,
mdamHelper.getDataConversionErrorFlag(),
mdamHelper.getTargetType()->newCopy());
}
else
{
vnode = new(generator->wHeap())
Cast(keyValue,mdamHelper.getTargetType()->newCopy());
}
#pragma nowarn(1506) // warning elimination
vnode = new CompEncode(vnode,mdamHelper.isDescending());
#pragma warn(1506) // warning elimination
vnode->bindNode(generator->getBindWA());
// add CASE
// WHEN child(2)
// CAST(round up/round down)
// ELSE
// no-op
ItemExpr * hnode = 0;
if (predType == MdamPred::MDAM_LT)
hnode = new ConstValue(2 /* ex_conv_clause::CONV_RESULT_ROUNDED_UP_TO_MIN */);
else
hnode = new ConstValue(-2 /* ex_conv_clause::CONV_RESULT_ROUNDED_DOWN_TO_MAX */);
hnode = generator->getExpGenerator()->
createExprTree("CASE WHEN @B1 THEN @A2 ELSE @A3 END",
0,
3, // number of subtree parameters
child(2), // @B1
hnode, // @A2
0); // @A3 -- results in no operation
hnode->bindNode(generator->getBindWA());
// Assign attributes for result value
ValueId vnodeId = vnode->getValueId();
ValueId hnodeId = hnode->getValueId();
ULng32 tupleLength = 0;
ValueIdList vnodeList;
vnodeList.insert(vnode->getValueId());
generator->getExpGenerator()->processValIdList(
vnodeList,
mdamHelper.getTupleDataFormat(),
tupleLength, // out
mdamHelper.getAtp(),
mdamHelper.getAtpIndex());
// Assign attributes for modifying data conversion error flag
// Note that all we do is copy the already-assigned attributes
ItemExpr * dataCEF = mdamHelper.getDataConversionErrorFlag();
ValueId dataCEFId = dataCEF->getValueId();
Attributes * dataCEFAttr =
(generator->getMapInfo(dataCEFId))->getAttr();
generator->addMapInfoToThis(generator->getLastMapTable(), hnodeId,dataCEFAttr);
// finally generate the expression and hang it off an MdamPred
ex_expr *vexpr = 0;
vnodeList.insert(hnode->getValueId()); // put hnode in there too
rc = generator->getExpGenerator()->generateListExpr(
vnodeList,
ex_expr::exp_ARITH_EXPR,
&vexpr);
#pragma nowarn(1506) // warning elimination
*head = *tail = new(generator->getSpace())
MdamPred(mdamHelper.getDisjunctNumber(),
predType,
vexpr);
#pragma warn(1506) // warning elimination
return rc;
}
// BiRelat for which the following is called will be a predicate for one of the
// endpoints of an MDAM_BETWEEN.
void BiRelat::getMdamPredDetails(Generator* generator,
MdamCodeGenHelper& mdamHelper,
MdamPred::MdamPredType& predType,
ex_expr** vexpr)
{
// Find out what kind of predicate this is. Inequality preds are not inverted
// for descending keys here; instead, the endpoints of the MDAM_BETWEEN
// interval are switched during creation of the mdam network in the executor.
switch (getOperatorType())
{
case ITM_LESS:
predType = MdamPred::MDAM_LT;
break;
case ITM_LESS_EQ:
predType = MdamPred::MDAM_LE;
break;
case ITM_GREATER:
predType = MdamPred::MDAM_GT;
break;
case ITM_GREATER_EQ:
predType = MdamPred::MDAM_GE;
break;
default:
GenAssert(0, "mdamPredGen: invalid comparison for subrange.");
break;
}
ItemExpr* child0 = child(0);
ItemExpr* child1 = child(1);
ValueId keyColumn = mdamHelper.getKeyColumn();
// Canonical form used by rangespec is <key> <compare> <value>.
ItemExpr* keyValue = child1;
GenAssert(child0->getValueId() == keyColumn,
"mdamPredGen: unexpected form for key predicate.");
// generate an expression to convert the key value to the
// type of the key column (in its key buffer) and encode it
ItemExpr* vnode = NULL;
// errorsCanOccur() determines if errors can occur converting the class
// datatype to the target datatype. The object on whose behalf the
// member function is called is expected to be a NAType.
NABoolean generateNarrow =
keyValue->getValueId().getType().errorsCanOccur(*mdamHelper.getTargetType());
#ifdef _DEBUG
if ((generateNarrow) &&
(getenv("NO_NARROWS"))) // for testing -- allows turning off Narrows
generateNarrow = FALSE;
#endif
if (generateNarrow)
vnode = new(generator->wHeap())
Narrow(keyValue,
mdamHelper.getDataConversionErrorFlag(),
mdamHelper.getTargetType()->newCopy(generator->wHeap()));
else
vnode = new(generator->wHeap())
Cast(keyValue,
mdamHelper.getTargetType()->newCopy(generator->wHeap()));
vnode->bindNode(generator->getBindWA());
vnode->preCodeGen(generator);
#pragma nowarn(1506) // warning elimination
vnode = new(generator->wHeap()) CompEncode(vnode,mdamHelper.isDescending());
#pragma warn(1506) // warning elimination
vnode->bindNode(generator->getBindWA());
ValueIdList vnodeList;
vnodeList.insert(vnode->getValueId());
ULng32 dummyLen = 0;
short rc =
generator->getExpGenerator()
->generateContiguousMoveExpr(vnodeList,
0, // don't add convert nodes
mdamHelper.getAtp(),
mdamHelper.getAtpIndex(),
mdamHelper.getTupleDataFormat(),
dummyLen, // out
vexpr);
GenAssert(rc == 0, "generateContiguousMoveExpr() returned error when called "
"from BiRelat::getMdamPredDetails().");
}
void ItemExprList::insertTree(ItemExpr *tree,
OperatorTypeEnum backBoneType,
NABoolean flattenSBQ, NABoolean flattenUDF)
{
if (tree->getOperatorType() == backBoneType)
{
for (Int32 i = 0; i < tree->getArity(); i++)
{
// Check for NULL list for right linear trees. That is, arity may be
// two, but second child is NULL.
ItemExpr *child = tree->child(i);
if (child)
insertTree(tree->child(i), backBoneType, flattenSBQ, flattenUDF);
}
}
else if (tree->getOperatorType() == ITM_ONE_ROW)
{
Aggregate *agr = (Aggregate *)tree;
if (agr->isOneRowTransformed_)
{
for (Int32 i = 0; i < tree->getArity(); i++)
insertTree(tree->child(i), backBoneType, flattenSBQ, flattenUDF);
}
else
{
// do nothing, postpone this processing until OneRow transformation
// is done
}
}
else if ((flattenSBQ AND tree->isASubquery()) OR
(flattenUDF AND
(tree->getOperatorType() == ITM_USER_DEF_FUNCTION)) AND
(NOT tree->nodeIsTransformed()))
// Added the extra check for transformation above to avoid any issues
// where we might flatten a subquery/MVF a second time around while
// we deal with ValueIdProxies.
// The ValueIdProxy->needToTransformChild()
// flag should be sufficient, but it never hurts to be safe.
{
ValueIdList cols;
NABoolean haveRDesc(FALSE);
if (tree->isASubquery())
{
// flatten the subquery select list
RETDesc *retDesc = ((Subquery*)tree)->getSubquery()->getRETDesc();
if (retDesc)
{
retDesc->getColumnList()->getValueIdList(cols);
if (cols.entries() > 1)
{
haveRDesc = TRUE;
}
}
}
else if (tree->getOperatorType() == ITM_USER_DEF_FUNCTION)
{
// flatten the UDF by adding the additional outputs to the tree
const RoutineDesc *rDesc = ((UDFunction *)tree)->getRoutineDesc();
if (rDesc && rDesc->getOutputColumnList().entries() > 1)
{
cols = rDesc->getOutputColumnList();
haveRDesc = TRUE;
}
}
if (haveRDesc == TRUE)
{
for (CollIndex i = 0; i < cols.entries(); i++)
{
ValueId proxyId;
proxyId = cols[i];
// We create a ValueIdProxy for each element in the subquery's
// select list or for each output parameter of a MVF. The first
// one of these will be marked to be transformed. This allows
// us to get the correct degree of statements containing MVFs or
// subquery with degree > 1 at bind time.
ValueIdProxy *proxyOutput =
new (CmpCommon::statementHeap())
ValueIdProxy( tree->getValueId(),
proxyId,
i);
proxyOutput->synthTypeAndValueId();
// Make sure we transform the subquery or MVF
if (i == 0 ) proxyOutput->setTransformChild(TRUE);
insert(proxyOutput);
}
}
else
insert(tree); // we are processing a valueId of a UDFunction
//.........这里部分代码省略.........
void RangePartitioningFunction::generatePivLayout(
Generator *generator,
Lng32 &partitionInputDataLength,
Lng32 atp,
Lng32 atpIndex,
Attributes ***pivAttrs)
{
// Make a layout of the partition input data record such that
// begin and end key are aligned in the same way.
// (layout = ((beg. key) (filler1) (end key) (filler2) (exclusion flag)))
ExpGenerator *expGen = generator->getExpGenerator();
CollIndex numPartInputs = getPartitionInputValuesLayout().entries();
CollIndex numPartKeyCols = (numPartInputs - 1) / 2;
// the number of partition input variables must be odd
GenAssert(2*numPartKeyCols+1 == numPartInputs,
"NOT 2*numPartKeyCols+1 == numPartInputs");
// ---------------------------------------------------------------------
// Start by processing the begin key PIVs
// ---------------------------------------------------------------------
ValueIdList partialPivs;
Attributes **returnedAttrs = NULL;
Attributes **localPartialAttrs;
Lng32 maxAlignment = 1;
Lng32 alignedPartKeyLen;
if (pivAttrs)
{
returnedAttrs = new(generator->wHeap()) Attributes *[numPartInputs];
}
CollIndex i = 0;
for (i = 0; i < numPartKeyCols; i++)
partialPivs.insert(getPartitionInputValuesLayout()[i]);
expGen->processValIdList(
partialPivs,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
(ULng32 &) partitionInputDataLength,
atp,
atpIndex,
NULL,
ExpTupleDesc::SHORT_FORMAT,
0,
&localPartialAttrs);
if (returnedAttrs)
for (i = 0; i < numPartKeyCols; i++)
returnedAttrs[i] = localPartialAttrs[i];
// ---------------------------------------------------------------------
// Now find out the max. alignment that is needed in the begin key,
// make sure that the end key starts on an offset that is a
// multiple of the max. alignment in the partition input values
// ---------------------------------------------------------------------
for (i = 0; i < numPartKeyCols; i++)
{
if (localPartialAttrs[i]->getDataAlignmentSize() > maxAlignment)
maxAlignment = localPartialAttrs[i]->getDataAlignmentSize();
if (localPartialAttrs[i]->getVCIndicatorLength() > maxAlignment)
maxAlignment = localPartialAttrs[i]->getVCIndicatorLength();
if (localPartialAttrs[i]->getNullIndicatorLength() > maxAlignment)
maxAlignment = localPartialAttrs[i]->getNullIndicatorLength();
}
alignedPartKeyLen = partitionInputDataLength;
while (alignedPartKeyLen % maxAlignment != 0)
alignedPartKeyLen++;
// ---------------------------------------------------------------------
// Now that we are starting on a good offset, process the end key
// ---------------------------------------------------------------------
partialPivs.clear();
for (i = numPartKeyCols; i < numPartInputs-1; i++)
partialPivs.insert(getPartitionInputValuesLayout()[i]);
expGen->processValIdList(
partialPivs,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
(ULng32 &) partitionInputDataLength,
atp,
atpIndex,
NULL,
ExpTupleDesc::SHORT_FORMAT,
alignedPartKeyLen,
&localPartialAttrs);
if (returnedAttrs)
for (i = numPartKeyCols; i < numPartInputs-1; i++)
returnedAttrs[i] = localPartialAttrs[i-numPartKeyCols];
// ---------------------------------------------------------------------
// Process the exclusion flag at offset 2*alignedPartKeyLen
// ---------------------------------------------------------------------
partialPivs.clear();
partialPivs.insert(getPartitionInputValuesLayout()[numPartInputs-1]);
expGen->processValIdList(
//.........这里部分代码省略.........
short RangePartitioningFunction::codeGen(Generator *generator,
Lng32 partInputDataLength)
{
ExpGenerator * exp_gen = generator->getExpGenerator();
Lng32 myOwnPartInputDataLength;
const Int32 pivMoveAtp = 0; // only one atp is used for this expr
const Int32 pivMoveAtpIndex = 2; // 0: consts, 1: temps, 2: result
const ExpTupleDesc::TupleDataFormat pivFormat = // format of PIVs
ExpTupleDesc::SQLARK_EXPLODED_FORMAT;
ex_cri_desc *partInputCriDesc = new(generator->getSpace())
ex_cri_desc(pivMoveAtpIndex+1,
generator->getSpace());
ExpTupleDesc *partInputTupleDesc;
ExRangePartInputData *generatedObject = NULL;
// get the list of partition input variables
ValueIdList piv(getPartitionInputValuesLayout());
CollIndex numPartInputs = piv.entries();
CollIndex numPartKeyCols = (numPartInputs - 1) / 2;
// the number of partition input variables must be odd
GenAssert(2*numPartKeyCols+1 == numPartInputs,
"NOT 2*numPartKeyCols+1 == numPartInputs");
Attributes **begEndAttrs;
Int32 alignedPartKeyLen;
// make a layout of the partition input data record
generatePivLayout(
generator,
myOwnPartInputDataLength,
pivMoveAtp,
pivMoveAtpIndex,
&begEndAttrs);
// the aligned part key length is where the end key values start
alignedPartKeyLen = (Int32) begEndAttrs[numPartKeyCols]->getOffset();
if (begEndAttrs[numPartKeyCols]->getNullIndicatorLength() > 0)
alignedPartKeyLen = MINOF(
alignedPartKeyLen,
(Int32)begEndAttrs[numPartKeyCols]->getNullIndOffset());
if (begEndAttrs[numPartKeyCols]->getVCIndicatorLength() > 0)
alignedPartKeyLen = MINOF(
alignedPartKeyLen,
begEndAttrs[numPartKeyCols]->getVCLenIndOffset());
// generate a tuple desc for the whole PIV record and a cri desc
partInputTupleDesc = new(generator->getSpace()) ExpTupleDesc(
numPartInputs,
begEndAttrs,
myOwnPartInputDataLength,
pivFormat,
ExpTupleDesc::LONG_FORMAT,
generator->getSpace());
partInputCriDesc->setTupleDescriptor(pivMoveAtpIndex,partInputTupleDesc);
// make sure we fulfill the assertions we made
// optimizer and generator should agree on the part input data length
GenAssert(partInputDataLength == (Lng32) myOwnPartInputDataLength,
"NOT partInputDataLength == myOwnPartInputDataLength");
// the length of the begin key and the end key must be the same
// (compare offsets of their last fields)
// Commented out because this check does not work. The check needs
// to compute the LENGTH of each key field, by subtracting the current
// offset from the next offset, taking into account varchar length
// and null indicator fields (which are not part of the length but
// increase the offset).
//GenAssert(begEndAttrs[numPartKeyCols-1]->getOffset() + alignedPartKeyLen ==
// begEndAttrs[2*numPartKeyCols-1]->getOffset(),
// "begin/end piv keys have different layouts");
#pragma nowarn(1506) // warning elimination
generatedObject = new(generator->getSpace()) ExRangePartInputData(
partInputCriDesc,
partInputDataLength,
alignedPartKeyLen, //len of one part key + filler
begEndAttrs[numPartInputs-1]->getOffset(),//offset of last field
getCountOfPartitions(),
generator->getSpace(),
TRUE); // uses expressions to calculate ranges in the executor
generatedObject->setPartitionExprAtp(pivMoveAtp);
generatedObject->setPartitionExprAtpIndex(pivMoveAtpIndex);
#pragma warn(1506) // warning elimination
// now fill in the individual partition boundaries
// (NOTE: there is one more than there are partitions)
ULng32 boundaryDataLength = 0;
for (Lng32 i = 0; i <= getCountOfPartitions(); i++)
{
const ItemExprList *iel = partitionBoundaries_->getBoundaryValues(i);
ex_expr * generatedExpr = NULL;
ValueIdList boundaryColValues;
ULng32 checkedBoundaryLength;
// convert the ItemExpressionList iel into a ValueIdList
//.........这里部分代码省略.........
short RelInternalSP::codeGen(Generator * generator)
{
Space * space = generator->getSpace();
ExpGenerator * exp_gen = generator->getExpGenerator();
MapTable * last_map_table = generator->getLastMapTable();
ex_expr * input_expr = NULL;
ex_expr * output_expr = NULL;
////////////////////////////////////////////////////////////////////////////
//
// Returned atp layout:
//
// |--------------------------------|
// | input data | stored proc row |
// | ( I tupps ) | ( 1 tupp ) |
// |--------------------------------|
// <-- returned row to parent ---->
//
// input data: the atp input to this node by its parent.
// stored proc row: tupp where the row read from SP is moved.
//
////////////////////////////////////////////////////////////////////////////
ex_cri_desc * given_desc
= generator->getCriDesc(Generator::DOWN);
ex_cri_desc * returned_desc
= new(space) ex_cri_desc(given_desc->noTuples() + 1, space);
// cri descriptor for work atp has 3 entries:
// -- the first two entries for consts and temps.
// -- Entry 3(index #2) is where the input and output rows will be created.
ex_cri_desc * work_cri_desc = new(space) ex_cri_desc(3, space);
const Int32 work_atp = 1;
const Int32 work_atp_index = 2;
ExpTupleDesc * input_tuple_desc = NULL;
ExpTupleDesc * output_tuple_desc = NULL;
// Generate expression to create the input row that will be
// given to the stored proc.
// The input value is in sp->getProcAllParams()
// and has to be converted to sp->procType().
// Generate Cast node to convert procParam to ProcType.
// If procType is a varchar, explode it. This is done
// so that values could be extracted correctly.
ValueIdList procVIDList;
for (CollIndex i = 0; i < procTypes().entries(); i++)
{
Cast * cn;
if ((procTypes())[i].getType().getVarLenHdrSize() > 0)
{
// 5/9/98: add support for VARNCHAR
const CharType& char_type =
(CharType&)((procTypes())[i].getType());
// Explode varchars by moving them to a fixed field
// whose length is equal to the max length of varchar.
cn = new(generator->wHeap())
Cast ((getProcAllParamsVids())[i].getItemExpr(),
(new(generator->wHeap())
SQLChar(generator->wHeap(),
CharLenInfo(char_type.getStrCharLimit(), char_type.getDataStorageSize()),
char_type.supportsSQLnull(),
FALSE, FALSE, FALSE,
char_type.getCharSet(),
char_type.getCollation(),
char_type.getCoercibility()
/*
(procTypes())[i].getType().getNominalSize(),
(procTypes())[i].getType().supportsSQLnull()
*/
)
)
);
// Move the exploded field to a varchar field since
// procType is varchar.
// Can optimize by adding an option to convert node to
// blankpad. TBD.
//
cn = new(generator->wHeap())
Cast(cn, &((procTypes())[i].getType()));
}
else
cn = new(generator->wHeap()) Cast((getProcAllParamsVids())[i].getItemExpr(),
&((procTypes())[i].getType()));
cn->bindNode(generator->getBindWA());
procVIDList.insert(cn->getValueId());
}
ULng32 inputRowlen_ = 0;
exp_gen->generateContiguousMoveExpr(procVIDList, -1, /*add conv nodes*/
work_atp, work_atp_index,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
inputRowlen_,
//.........这里部分代码省略.........
short ProbeCache::codeGen(Generator *generator)
{
ExpGenerator * exp_gen = generator->getExpGenerator();
Space * space = generator->getSpace();
MapTable * last_map_table = generator->getLastMapTable();
ex_cri_desc * given_desc
= generator->getCriDesc(Generator::DOWN);
ex_cri_desc * returned_desc
= new(space) ex_cri_desc(given_desc->noTuples() + 1, space);
// cri descriptor for work atp has 5 entries:
// entry #0 for const
// entry #1 for temp
// entry #2 for hash value of probe input data in Probe Cache Manager
// entry #3 for encoded probe input data in Probe Cache Manager
// enrry #4 for inner table row data in this operator's cache buffer
Int32 work_atp = 1;
ex_cri_desc * work_cri_desc = new(space) ex_cri_desc(5, space);
unsigned short hashValIdx = 2;
unsigned short encodedProbeDataIdx = 3;
unsigned short innerRowDataIdx = 4;
// generate code for child tree, and get its tdb and explain tuple.
child(0)->codeGen(generator);
ComTdb * child_tdb = (ComTdb *)(generator->getGenObj());
ExplainTuple *childExplainTuple = generator->getExplainTuple();
//////////////////////////////////////////////////////
// Generate up to 4 runtime expressions.
//////////////////////////////////////////////////////
// Will use child's char. inputs (+ execution count) for the next
// two runtime expressions.
ValueIdList inputsToUse = child(0).getGroupAttr()->getCharacteristicInputs();
inputsToUse.insert(generator->getOrAddStatementExecutionCount());
// Expression #1 gets the hash value of the probe input data
ValueIdList hvAsList;
// Executor has hard-coded assumption that the result is long,
// so add a Cast node to convert result to a long.
ItemExpr *probeHashAsIe = new (generator->wHeap())
HashDistPartHash(inputsToUse.rebuildExprTree(ITM_ITEM_LIST));
probeHashAsIe->bindNode(generator->getBindWA());
NumericType &nTyp = (NumericType &)probeHashAsIe->getValueId().getType();
GenAssert(nTyp.isSigned() == FALSE,
"Unexpected signed HashDistPartHash.");
GenAssert(probeHashAsIe->getValueId().getType().supportsSQLnullLogical()
== FALSE, "Unexpected nullable HashDistPartHash.");
ItemExpr *hvAsIe = new (generator->wHeap()) Cast(
probeHashAsIe,
new (generator->wHeap())
SQLInt(FALSE, // false == unsigned.
FALSE // false == not nullable.
));
hvAsIe->bindNode(generator->getBindWA());
hvAsList.insert(hvAsIe->getValueId());
ex_expr *hvExpr = NULL;
ULng32 hvLength;
exp_gen->generateContiguousMoveExpr(
hvAsList,
0, // don't add convert node
work_atp,
hashValIdx,
ExpTupleDesc::SQLARK_EXPLODED_FORMAT,
hvLength,
&hvExpr);
GenAssert(hvLength == sizeof(Lng32),
"Unexpected length of result of hash function.");
// Expression #2 encodes the probe input data for storage in
// the ProbeCacheManager.
ValueIdList encodeInputAsList;
CollIndex inputListIndex;
for (inputListIndex = 0;
inputListIndex < inputsToUse.entries();
inputListIndex++) {
ItemExpr *inputIe =
(inputsToUse[inputListIndex].getValueDesc())->getItemExpr();
if (inputIe->getValueId().getType().getVarLenHdrSize() > 0)
{
// This logic copied from Sort::codeGen().
//.........这里部分代码省略.........
请发表评论