--- /dev/null
+/*\r
+\r
+ Derby - Class org.apache.derby.impl.sql.compile.HashJoinStrategy\r
+\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to you under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+\r
+ */\r
+\r
+package org.apache.derby.impl.sql.compile;\r
+\r
+import org.apache.derby.iapi.sql.compile.CostEstimate;\r
+import org.apache.derby.iapi.sql.compile.ExpressionClassBuilderInterface;\r
+import org.apache.derby.iapi.sql.compile.JoinStrategy;\r
+import org.apache.derby.iapi.sql.compile.Optimizable;\r
+import org.apache.derby.iapi.sql.compile.Optimizer;\r
+import org.apache.derby.iapi.sql.compile.OptimizablePredicate;\r
+import org.apache.derby.iapi.sql.compile.OptimizablePredicateList;\r
+\r
+import org.apache.derby.iapi.sql.dictionary.DataDictionary;\r
+import org.apache.derby.iapi.sql.dictionary.ConglomerateDescriptor;\r
+\r
+import org.apache.derby.iapi.store.access.StoreCostController;\r
+import org.apache.derby.iapi.store.access.TransactionController;\r
+\r
+import org.apache.derby.iapi.services.compiler.MethodBuilder;\r
+\r
+import org.apache.derby.impl.sql.compile.ExpressionClassBuilder;\r
+import org.apache.derby.impl.sql.compile.ProjectRestrictNode;\r
+import org.apache.derby.impl.sql.compile.Predicate;\r
+\r
+import org.apache.derby.iapi.error.StandardException;\r
+\r
+import org.apache.derby.iapi.reference.SQLState;\r
+\r
+import org.apache.derby.iapi.services.cache.ClassSize;\r
+\r
+import org.apache.derby.iapi.services.sanity.SanityManager;\r
+\r
+import org.apache.derby.iapi.services.io.FormatableArrayHolder;\r
+import org.apache.derby.iapi.services.io.FormatableIntHolder;\r
+\r
+import org.apache.derby.iapi.util.JBitSet;\r
+\r
+import java.util.Vector;\r
+\r
+public class HashJoinStrategy extends BaseJoinStrategy {\r
+ public HashJoinStrategy() {\r
+ }\r
+\r
+ /**\r
+ * @see JoinStrategy#feasible\r
+ *\r
+ * @exception StandardException Thrown on error\r
+ */\r
+ public boolean feasible(Optimizable innerTable,\r
+ OptimizablePredicateList predList,\r
+ Optimizer optimizer\r
+ )\r
+ throws StandardException \r
+ {\r
+ int[] hashKeyColumns = null;\r
+\r
+ ConglomerateDescriptor cd = null;\r
+\r
+ /* If the innerTable is a VTI, then we\r
+ * must check to see if there are any\r
+ * join columns in the VTI's parameters.\r
+ * If so, then hash join is not feasible.\r
+ */\r
+ if (! innerTable.isMaterializable())\r
+ {\r
+\r
+ optimizer.trace(Optimizer.HJ_SKIP_NOT_MATERIALIZABLE, 0, 0, 0.0,\r
+ null);\r
+ return false;\r
+ }\r
+\r
+ /* Don't consider hash join on the target table of an update/delete.\r
+ * RESOLVE - this is a temporary restriction. Problem is that we\r
+ * do not put RIDs into the row in the hash table when scanning\r
+ * the heap and we need them for a target table.\r
+ */\r
+ if (innerTable.isTargetTable())\r
+ {\r
+ return false;\r
+ }\r
+\r
+ /* If the predicate given by the user _directly_ references\r
+ * any of the base tables _beneath_ this node, then we\r
+ * cannot safely use the predicate for a hash because the\r
+ * predicate correlates two nodes at different nesting levels. \r
+ * If we did a hash join in this case, materialization of\r
+ * innerTable could lead to incorrect results--and in particular,\r
+ * results that are missing rows. We can check for this by\r
+ * looking at the predicates' reference maps, which are set based\r
+ * on the initial query (as part of pre-processing). Note that\r
+ * by the time we get here, it's possible that a predicate's\r
+ * reference map holds table numbers that do not agree with the\r
+ * table numbers of the column references used by the predicate.\r
+ * That's okay--this occurs as a result of "remapping" predicates\r
+ * that have been pushed down the query tree. And in fact\r
+ * it's a good thing because, by looking at the column reference's\r
+ * own table numbers instead of the predicate's referenced map,\r
+ * we are more readily able to find equijoin predicates that\r
+ * we otherwise would not have found.\r
+ *\r
+ * Note: do not perform this check if innerTable is a FromBaseTable\r
+ * because a base table does not have a "subtree" to speak of.\r
+ */\r
+ if ((predList != null) && (predList.size() > 0) &&\r
+ !(innerTable instanceof FromBaseTable))\r
+ {\r
+ FromTable ft = (FromTable)innerTable;\r
+\r
+ // First get a list of all of the base tables in the subtree\r
+ // below innerTable.\r
+ JBitSet tNums = new JBitSet(ft.getReferencedTableMap().size());\r
+ BaseTableNumbersVisitor btnVis = new BaseTableNumbersVisitor(tNums);\r
+ ft.accept(btnVis);\r
+\r
+ // Now get a list of all table numbers referenced by the\r
+ // join predicates that we'll be searching.\r
+ JBitSet pNums = new JBitSet(tNums.size());\r
+ Predicate pred = null;\r
+ for (int i = 0; i < predList.size(); i++)\r
+ {\r
+ pred = (Predicate)predList.getOptPredicate(i);\r
+ if (pred.isJoinPredicate())\r
+ pNums.or(pred.getReferencedSet());\r
+ }\r
+\r
+ // If tNums and pNums have anything in common, then at\r
+ // least one predicate in the list refers directly to\r
+ // a base table beneath this node (as opposed to referring\r
+ // just to this node), which means it's not safe to do a\r
+ // hash join.\r
+ tNums.and(pNums);\r
+ if (tNums.getFirstSetBit() != -1)\r
+ return false;\r
+ }\r
+\r
+ if (innerTable.isBaseTable())\r
+ {\r
+ /* Must have an equijoin on a column in the conglomerate */\r
+ cd = innerTable.getCurrentAccessPath().getConglomerateDescriptor();\r
+ }\r
+ \r
+ /* Look for equijoins in the predicate list */\r
+ hashKeyColumns = findHashKeyColumns(\r
+ innerTable,\r
+ cd,\r
+ predList);\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if (hashKeyColumns == null)\r
+ {\r
+ optimizer.trace(Optimizer.HJ_SKIP_NO_JOIN_COLUMNS, 0, 0, 0.0, null);\r
+ }\r
+ else\r
+ {\r
+ optimizer.trace(Optimizer.HJ_HASH_KEY_COLUMNS, 0, 0, 0.0, hashKeyColumns);\r
+ }\r
+ }\r
+\r
+ if (hashKeyColumns == null)\r
+ {\r
+ return false;\r
+ }\r
+\r
+ return true;\r
+ }\r
+\r
+ /** @see JoinStrategy#ignoreBulkFetch */\r
+ public boolean ignoreBulkFetch() {\r
+ return true;\r
+ }\r
+\r
+ /** @see JoinStrategy#multiplyBaseCostByOuterRows */\r
+ public boolean multiplyBaseCostByOuterRows() {\r
+ return false;\r
+ }\r
+\r
+ /**\r
+ * @see JoinStrategy#getBasePredicates\r
+ *\r
+ * @exception StandardException Thrown on error\r
+ */\r
+ public OptimizablePredicateList getBasePredicates(\r
+ OptimizablePredicateList predList,\r
+ OptimizablePredicateList basePredicates,\r
+ Optimizable innerTable)\r
+ throws StandardException {\r
+ if (SanityManager.DEBUG) {\r
+ SanityManager.ASSERT(basePredicates.size() == 0,\r
+ "The base predicate list should be empty.");\r
+ }\r
+\r
+ for (int i = predList.size() - 1; i >= 0; i--) {\r
+ OptimizablePredicate pred = predList.getOptPredicate(i);\r
+\r
+ if (innerTable.getReferencedTableMap().contains(pred.getReferencedMap()))\r
+ {\r
+ basePredicates.addOptPredicate(pred);\r
+ predList.removeOptPredicate(i);\r
+ }\r
+ }\r
+\r
+ basePredicates.classify(\r
+ innerTable,\r
+ innerTable.getCurrentAccessPath().getConglomerateDescriptor());\r
+\r
+ return basePredicates;\r
+ }\r
+\r
+ /** @see JoinStrategy#nonBasePredicateSelectivity */\r
+ public double nonBasePredicateSelectivity(\r
+ Optimizable innerTable,\r
+ OptimizablePredicateList predList) \r
+ throws StandardException {\r
+ double retval = 1.0;\r
+\r
+ if (predList != null) {\r
+ for (int i = 0; i < predList.size(); i++) {\r
+ // Don't include redundant join predicates in selectivity calculations\r
+ if (predList.isRedundantPredicate(i))\r
+ {\r
+ continue;\r
+ }\r
+\r
+ retval *= predList.getOptPredicate(i).selectivity(innerTable);\r
+ }\r
+ }\r
+\r
+ return retval;\r
+ }\r
+ \r
+ /**\r
+ * @see JoinStrategy#putBasePredicates\r
+ *\r
+ * @exception StandardException Thrown on error\r
+ */\r
+ public void putBasePredicates(OptimizablePredicateList predList,\r
+ OptimizablePredicateList basePredicates)\r
+ throws StandardException {\r
+ for (int i = basePredicates.size() - 1; i >= 0; i--) {\r
+ OptimizablePredicate pred = basePredicates.getOptPredicate(i);\r
+\r
+ predList.addOptPredicate(pred);\r
+ basePredicates.removeOptPredicate(i);\r
+ }\r
+ }\r
+\r
+ /** @see JoinStrategy#estimateCost */\r
+ public void estimateCost(Optimizable innerTable,\r
+ OptimizablePredicateList predList,\r
+ ConglomerateDescriptor cd,\r
+ CostEstimate outerCost,\r
+ Optimizer optimizer,\r
+ CostEstimate costEstimate) {\r
+ /*\r
+ ** The cost of a hash join is the cost of building the hash table.\r
+ ** There is no extra cost per outer row, so don't do anything here.\r
+ */\r
+ }\r
+\r
+ /** @see JoinStrategy#maxCapacity */\r
+ public int maxCapacity( int userSpecifiedCapacity,\r
+ int maxMemoryPerTable,\r
+ double perRowUsage) {\r
+ if( userSpecifiedCapacity >= 0)\r
+ return userSpecifiedCapacity;\r
+ perRowUsage += ClassSize.estimateHashEntrySize();\r
+ if( perRowUsage <= 1)\r
+ return maxMemoryPerTable;\r
+ return (int)(maxMemoryPerTable/perRowUsage);\r
+ }\r
+\r
+ /** @see JoinStrategy#getName */\r
+ public String getName() {\r
+ return "HASH";\r
+ }\r
+\r
+ /** @see JoinStrategy#scanCostType */\r
+ public int scanCostType() {\r
+ return StoreCostController.STORECOST_SCAN_SET;\r
+ }\r
+\r
+ /** @see JoinStrategy#resultSetMethodName */\r
+ public String resultSetMethodName(boolean bulkFetch, boolean multiprobe) {\r
+ return "getHashScanResultSet";\r
+ }\r
+\r
+ /** @see JoinStrategy#joinResultSetMethodName */\r
+ public String joinResultSetMethodName() {\r
+ return "getHashJoinResultSet";\r
+ }\r
+\r
+ /** @see JoinStrategy#halfOuterJoinResultSetMethodName */\r
+ public String halfOuterJoinResultSetMethodName() {\r
+ return "getHashLeftOuterJoinResultSet";\r
+ }\r
+\r
+ /**\r
+ * @see JoinStrategy#getScanArgs\r
+ *\r
+ * @exception StandardException Thrown on error\r
+ */\r
+ public int getScanArgs(\r
+ TransactionController tc,\r
+ MethodBuilder mb,\r
+ Optimizable innerTable,\r
+ OptimizablePredicateList storeRestrictionList,\r
+ OptimizablePredicateList nonStoreRestrictionList,\r
+ ExpressionClassBuilderInterface acbi,\r
+ int bulkFetch,\r
+ MethodBuilder resultRowAllocator,\r
+ int colRefItem,\r
+ int indexColItem,\r
+ int lockMode,\r
+ boolean tableLocked,\r
+ int isolationLevel,\r
+ int maxMemoryPerTable,\r
+ boolean genInListVals\r
+ )\r
+ throws StandardException\r
+ {\r
+ /* We do not currently support IN-list "multi-probing" for hash scans\r
+ * (though we could do so in the future). So if we're doing a hash\r
+ * join then we shouldn't have any IN-list probe predicates in the\r
+ * store restriction list at this point. The reason is that, in the\r
+ * absence of proper multi-probing logic, such predicates would act\r
+ * as restrictions on the rows read from disk. That would be wrong\r
+ * because a probe predicate is of the form "col = <val>" where <val>\r
+ * is the first value in the IN-list. Enforcement of that restriction\r
+ * would lead to incorrect results--we need to return all rows having\r
+ * any value that appears in the IN-list, not just those rows matching\r
+ * the first value. Checks elsewhere in the code should ensure that\r
+ * no probe predicates have made it this far, but if we're running in\r
+ * SANE mode it doesn't hurt to verify.\r
+ */\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ Predicate pred = null;\r
+ for (int i = storeRestrictionList.size() - 1; i >= 0; i--)\r
+ {\r
+ pred = (Predicate)storeRestrictionList.getOptPredicate(i);\r
+ if (pred.isInListProbePredicate())\r
+ {\r
+ SanityManager.THROWASSERT("Found IN-list probing " +\r
+ "(" + pred.binaryRelOpColRefsToString() +\r
+ ") while generating HASH join, which should " +\r
+ "not happen.");\r
+ }\r
+ }\r
+ }\r
+\r
+ ExpressionClassBuilder acb = (ExpressionClassBuilder) acbi;\r
+\r
+ fillInScanArgs1(tc,\r
+ mb,\r
+ innerTable,\r
+ storeRestrictionList,\r
+ acb,\r
+ resultRowAllocator);\r
+\r
+ nonStoreRestrictionList.generateQualifiers(acb, mb, innerTable, true);\r
+ mb.push(innerTable.initialCapacity());\r
+ mb.push(innerTable.loadFactor());\r
+ mb.push(innerTable.maxCapacity( (JoinStrategy) this, maxMemoryPerTable));\r
+ /* Get the hash key columns and wrap them in a formattable */\r
+ int[] hashKeyColumns = innerTable.hashKeyColumns();\r
+ FormatableIntHolder[] fihArray = \r
+ FormatableIntHolder.getFormatableIntHolders(hashKeyColumns); \r
+ FormatableArrayHolder hashKeyHolder = new FormatableArrayHolder(fihArray);\r
+ int hashKeyItem = acb.addItem(hashKeyHolder);\r
+ mb.push(hashKeyItem);\r
+\r
+ fillInScanArgs2(mb,\r
+ innerTable,\r
+ bulkFetch,\r
+ colRefItem,\r
+ indexColItem,\r
+ lockMode,\r
+ tableLocked,\r
+ isolationLevel);\r
+\r
+ return 28;\r
+ }\r
+\r
+ /**\r
+ * @see JoinStrategy#divideUpPredicateLists\r
+ *\r
+ * @exception StandardException Thrown on error\r
+ */\r
+ public void divideUpPredicateLists(\r
+ Optimizable innerTable,\r
+ OptimizablePredicateList originalRestrictionList,\r
+ OptimizablePredicateList storeRestrictionList,\r
+ OptimizablePredicateList nonStoreRestrictionList,\r
+ OptimizablePredicateList requalificationRestrictionList,\r
+ DataDictionary dd\r
+ ) throws StandardException\r
+ {\r
+ /*\r
+ ** If we are walking a non-covering index, then all predicates that\r
+ ** get evaluated in the HashScanResultSet, whether during the building\r
+ ** or probing of the hash table, need to be evaluated at both the\r
+ ** IndexRowToBaseRowResultSet and the HashScanResultSet to ensure\r
+ ** that the rows materialized into the hash table still qualify when\r
+ ** we go to read the row from the heap. This also includes predicates\r
+ ** that are not qualifier/start/stop keys (hence not in store/non-store\r
+ ** list).\r
+ */\r
+ originalRestrictionList.copyPredicatesToOtherList(\r
+ requalificationRestrictionList);\r
+\r
+ ConglomerateDescriptor cd =\r
+ innerTable.getTrulyTheBestAccessPath().getConglomerateDescriptor();\r
+\r
+ /* For the inner table of a hash join, then divide up the predicates:\r
+ *\r
+ * o restrictionList - predicates that get applied when creating \r
+ * the hash table (single table clauses)\r
+ *\r
+ * o nonBaseTableRestrictionList\r
+ * - those that get applied when probing into the \r
+ * hash table (equijoin clauses on key columns,\r
+ * ordered by key column position first, followed\r
+ * by any other join predicates. (All predicates\r
+ * in this list are qualifiers which can be \r
+ * evaluated in the store).\r
+ *\r
+ * o baseTableRL - Only applicable if this is not a covering \r
+ * index. In that case, we will need to \r
+ * requalify the data page. Thus, this list \r
+ * will include all predicates.\r
+ */\r
+\r
+ // Build the list to be applied when creating the hash table\r
+ originalRestrictionList.transferPredicates(\r
+ storeRestrictionList,\r
+ innerTable.getReferencedTableMap(),\r
+ innerTable);\r
+\r
+ /* \r
+ * Eliminate any non-qualifiers that may have been pushed, but\r
+ * are redundant and not useful for hash join.\r
+ * \r
+ * For instance "in" (or other non-qualifier) was pushed down for \r
+ * start/stop key, * but for hash join, it may no longer be because \r
+ * previous key column may have been disqualified (eg., correlation). \r
+ * We simply remove \r
+ * such non-qualifier ("in") because we left it as residual predicate \r
+ * anyway. It's easier/safer to filter it out here than detect it \r
+ * ealier (and not push it down). Beetle 4316.\r
+ *\r
+ * Can't filter out OR list, as it is not a residual predicate, \r
+ */\r
+ for (int i = storeRestrictionList.size() - 1; i >= 0; i--)\r
+ {\r
+ Predicate p1 = (Predicate) storeRestrictionList.getOptPredicate(i);\r
+\r
+ \r
+ if (!p1.isStoreQualifier() && !p1.isStartKey() && !p1.isStopKey())\r
+ {\r
+ storeRestrictionList.removeOptPredicate(i);\r
+ }\r
+ }\r
+\r
+ for (int i = originalRestrictionList.size() - 1; i >= 0; i--)\r
+ {\r
+ Predicate p1 = \r
+ (Predicate) originalRestrictionList.getOptPredicate(i);\r
+\r
+ if (!p1.isStoreQualifier())\r
+ originalRestrictionList.removeOptPredicate(i);\r
+ }\r
+\r
+ /* Copy the rest of the predicates to the non-store list */\r
+ originalRestrictionList.copyPredicatesToOtherList(\r
+ nonStoreRestrictionList);\r
+\r
+ /* If innerTable is ProjectRestrictNode, we need to use its child\r
+ * to find hash key columns, this is because ProjectRestrictNode may\r
+ * not have underlying node's every result column as result column,\r
+ * and the predicate's column reference was bound to the underlying\r
+ * node's column position. Also we have to pass in the \r
+ * ProjectRestrictNode rather than the underlying node to this method\r
+ * because a predicate's referencedTableMap references the table number\r
+ * of the ProjectRestrictiveNode. And we need this info to see if\r
+ * a predicate is in storeRestrictionList that can be pushed down.\r
+ * Beetle 3458.\r
+ */\r
+ Optimizable hashTableFor = innerTable;\r
+ if (innerTable instanceof ProjectRestrictNode)\r
+ {\r
+ ProjectRestrictNode prn = (ProjectRestrictNode) innerTable;\r
+ if (prn.getChildResult() instanceof Optimizable)\r
+ hashTableFor = (Optimizable) (prn.getChildResult());\r
+ }\r
+ int[] hashKeyColumns = findHashKeyColumns(hashTableFor,\r
+ cd,\r
+ nonStoreRestrictionList);\r
+ if (hashKeyColumns != null)\r
+ {\r
+ innerTable.setHashKeyColumns(hashKeyColumns);\r
+ }\r
+ else\r
+ {\r
+ String name;\r
+ if (cd != null && cd.isIndex())\r
+ {\r
+ name = cd.getConglomerateName();\r
+ }\r
+ else\r
+ {\r
+ name = innerTable.getBaseTableName();\r
+ }\r
+\r
+ throw StandardException.newException(SQLState.LANG_HASH_NO_EQUIJOIN_FOUND, \r
+ name,\r
+ innerTable.getBaseTableName());\r
+ }\r
+\r
+ // Mark all of the predicates in the probe list as qualifiers\r
+ nonStoreRestrictionList.markAllPredicatesQualifiers();\r
+\r
+ int[] conglomColumn = new int[hashKeyColumns.length];\r
+ if (cd != null && cd.isIndex())\r
+ {\r
+ /*\r
+ ** If the conglomerate is an index, get the column numbers of the\r
+ ** hash keys in the base heap.\r
+ */\r
+ for (int index = 0; index < hashKeyColumns.length; index++)\r
+ {\r
+ conglomColumn[index] =\r
+ cd.getIndexDescriptor().baseColumnPositions()[hashKeyColumns[index]];\r
+ }\r
+ }\r
+ else\r
+ {\r
+ /*\r
+ ** If the conglomerate is a heap, the column numbers of the hash\r
+ ** key are the column numbers returned by findHashKeyColumns().\r
+ **\r
+ ** NOTE: Must switch from zero-based to one-based\r
+ */\r
+ for (int index = 0; index < hashKeyColumns.length; index++)\r
+ {\r
+ conglomColumn[index] = hashKeyColumns[index] + 1;\r
+ }\r
+ }\r
+\r
+ /* Put the equality predicates on the key columns for the hash first.\r
+ * (Column # is columns[colCtr] from above.)\r
+ */\r
+ for (int index = hashKeyColumns.length - 1; index >= 0; index--)\r
+ {\r
+ nonStoreRestrictionList.putOptimizableEqualityPredicateFirst(\r
+ innerTable,\r
+ conglomColumn[index]);\r
+ }\r
+ }\r
+\r
+ /**\r
+ * @see JoinStrategy#isHashJoin\r
+ */\r
+ public boolean isHashJoin()\r
+ {\r
+ return true;\r
+ }\r
+\r
+ /**\r
+ * @see JoinStrategy#doesMaterialization\r
+ */\r
+ public boolean doesMaterialization()\r
+ {\r
+ return true;\r
+ }\r
+\r
+ /**\r
+ * Find the hash key columns, if any, to use with this join.\r
+ *\r
+ * @param innerTable The inner table of the join\r
+ * @param cd The conglomerate descriptor to use on inner table\r
+ * @param predList The predicate list to look for the equijoin in\r
+ *\r
+ * @return the numbers of the hash key columns, or null if no hash key column\r
+ *\r
+ * @exception StandardException Thrown on error\r
+ */\r
+ private int[] findHashKeyColumns(Optimizable innerTable,\r
+ ConglomerateDescriptor cd,\r
+ OptimizablePredicateList predList)\r
+ throws StandardException\r
+ {\r
+ if (predList == null)\r
+ return (int[]) null;\r
+\r
+ /* Find the column to use as the hash key.\r
+ * (There must be an equijoin condition on this column.)\r
+ * If cd is null, then Optimizable is not a scan.\r
+ * For indexes, we start at the first column in the key\r
+ * and walk the key columns until we find the first one with\r
+ * an equijoin condition on it. We do essentially the same\r
+ * for heaps. (From column 1 through column n.)\r
+ */\r
+ int[] columns = null;\r
+ if (cd == null)\r
+ {\r
+ columns = new int[innerTable.getNumColumnsReturned()];\r
+ for (int j = 0; j < columns.length; j++)\r
+ {\r
+ columns[j] = j + 1;\r
+ }\r
+ }\r
+ else if (cd.isIndex())\r
+ {\r
+ columns = cd.getIndexDescriptor().baseColumnPositions();\r
+ }\r
+ else\r
+ {\r
+ columns =\r
+ new int[innerTable.getTableDescriptor().getNumberOfColumns()];\r
+ for (int j = 0; j < columns.length; j++)\r
+ {\r
+ columns[j] = j + 1;\r
+ }\r
+ }\r
+\r
+ // Build a Vector of all the hash key columns\r
+ int colCtr;\r
+ Vector hashKeyVector = new Vector();\r
+ for (colCtr = 0; colCtr < columns.length; colCtr++)\r
+ {\r
+ // Is there an equijoin condition on this column?\r
+ if (predList.hasOptimizableEquijoin(innerTable, columns[colCtr]))\r
+ {\r
+ hashKeyVector.addElement(new Integer(colCtr));\r
+ }\r
+ }\r
+\r
+ // Convert the Vector into an int[], if there are hash key columns\r
+ if (hashKeyVector.size() > 0)\r
+ {\r
+ int[] keyCols = new int[hashKeyVector.size()];\r
+ for (int index = 0; index < keyCols.length; index++)\r
+ {\r
+ keyCols[index] = ((Integer) hashKeyVector.elementAt(index)).intValue();\r
+ }\r
+ return keyCols;\r
+ }\r
+ else\r
+ return (int[]) null;\r
+ }\r
+\r
+ public String toString() {\r
+ return getName();\r
+ }\r
+}\r