--- /dev/null
+/*\r
+\r
+ Derby - Class org.apache.derby.iapi.store.access.BackingStoreHashtable\r
+\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to you under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+\r
+ */\r
+\r
+package org.apache.derby.iapi.store.access;\r
+\r
+import org.apache.derby.iapi.services.sanity.SanityManager;\r
+\r
+import org.apache.derby.iapi.services.io.Storable;\r
+\r
+import org.apache.derby.iapi.error.StandardException; \r
+\r
+import org.apache.derby.iapi.types.CloneableObject;\r
+import org.apache.derby.iapi.types.DataValueDescriptor;\r
+\r
+import org.apache.derby.iapi.services.cache.ClassSize;\r
+\r
+import java.util.ArrayList;\r
+import java.util.Collections;\r
+import java.util.Enumeration;\r
+import java.util.HashMap;\r
+import java.util.Iterator;\r
+import java.util.List;\r
+import java.util.Properties; \r
+import java.util.NoSuchElementException;\r
+\r
+/**\r
+A BackingStoreHashtable is a utility class which will store a set of rows into\r
+an in memory hash table, or overflow the hash table to a tempory on disk \r
+structure.\r
+<p>\r
+All rows must contain the same number of columns, and the column at position\r
+N of all the rows must have the same format id. If the BackingStoreHashtable needs to be\r
+overflowed to disk, then an arbitrary row will be chosen and used as a template\r
+for creating the underlying overflow container.\r
+\r
+<p>\r
+The hash table will be built logically as follows (actual implementation\r
+may differ). The important points are that the hash value is the standard\r
+java hash value on the row[key_column_numbers[0], if key_column_numbers.length is 1,\r
+or row[key_column_numbers[0, 1, ...]] if key_column_numbers.length > 1, \r
+and that duplicate detection is done by the standard java duplicate detection provided by \r
+java.util.Hashtable.\r
+<p>\r
+<pre>\r
+import java.util.Hashtable;\r
+\r
+hash_table = new Hashtable();\r
+\r
+Object[] row;\r
+boolean needsToClone = rowSource.needsToClone();\r
+\r
+while((row = rowSource.getNextRowFromRowSource()) != null)\r
+{\r
+ if (needsToClone)\r
+ row = clone_row_from_row(row);\r
+\r
+ Object key = KeyHasher.buildHashKey(row, key_column_numbers);\r
+\r
+ if ((duplicate_value = \r
+ hash_table.put(key, row)) != null)\r
+ {\r
+ Vector row_vec;\r
+\r
+ // inserted a duplicate\r
+ if ((duplicate_value instanceof vector))\r
+ {\r
+ row_vec = (Vector) duplicate_value;\r
+ }\r
+ else\r
+ {\r
+ // allocate vector to hold duplicates\r
+ row_vec = new Vector(2);\r
+\r
+ // insert original row into vector\r
+ row_vec.addElement(duplicate_value);\r
+\r
+ // put the vector as the data rather than the row\r
+ hash_table.put(key, row_vec);\r
+ }\r
+ \r
+ // insert new row into vector\r
+ row_vec.addElement(row);\r
+ }\r
+}\r
+</pre>\r
+\r
+**/\r
+\r
+public class BackingStoreHashtable\r
+{\r
+\r
+ /**************************************************************************\r
+ * Fields of the class\r
+ **************************************************************************\r
+ */\r
+ private TransactionController tc;\r
+ private HashMap hash_table;\r
+ private int[] key_column_numbers;\r
+ private boolean remove_duplicates;\r
+ private boolean skipNullKeyColumns;\r
+ private Properties auxillary_runtimestats;\r
+ private RowSource row_source;\r
+ /* If max_inmemory_rowcnt > 0 then use that to decide when to spill to disk.\r
+ * Otherwise compute max_inmemory_size based on the JVM memory size when the BackingStoreHashtable\r
+ * is constructed and use that to decide when to spill to disk.\r
+ */\r
+ private long max_inmemory_rowcnt;\r
+ private long inmemory_rowcnt;\r
+ private long max_inmemory_size;\r
+ private boolean keepAfterCommit;\r
+\r
+ /**\r
+ * The estimated number of bytes used by ArrayList(0)\r
+ */ \r
+ private final static int ARRAY_LIST_SIZE =\r
+ ClassSize.estimateBaseFromCatalog(ArrayList.class);\r
+ \r
+ private DiskHashtable diskHashtable;\r
+\r
+ /**************************************************************************\r
+ * Constructors for This class:\r
+ **************************************************************************\r
+ */\r
+ private BackingStoreHashtable(){}\r
+\r
+ /**\r
+ * Create the BackingStoreHashtable from a row source.\r
+ * <p>\r
+ * This routine drains the RowSource. The performance characteristics\r
+ * depends on the number of rows inserted and the parameters to the \r
+ * constructor. \r
+ * <p>\r
+ * If the number of rows is <= "max_inmemory_rowcnt", then the rows are\r
+ * inserted into a java.util.HashMap. In this case no\r
+ * TransactionController is necessary, a "null" tc is valid.\r
+ * <p>\r
+ * If the number of rows is > "max_inmemory_rowcnt", then the rows will\r
+ * be all placed in some sort of Access temporary file on disk. This \r
+ * case requires a valid TransactionController.\r
+ *\r
+ * @param tc An open TransactionController to be used if the\r
+ * hash table needs to overflow to disk.\r
+ *\r
+ * @param row_source RowSource to read rows from.\r
+ *\r
+ * @param key_column_numbers The column numbers of the columns in the\r
+ * scan result row to be the key to the HashMap.\r
+ * "0" is the first column in the scan result\r
+ * row (which may be different than the first\r
+ * row in the table of the scan).\r
+ *\r
+ * @param remove_duplicates Should the HashMap automatically remove\r
+ * duplicates, or should it create the list of\r
+ * duplicates?\r
+ *\r
+ * @param estimated_rowcnt The estimated number of rows in the hash table.\r
+ * Pass in -1 if there is no estimate.\r
+ *\r
+ * @param max_inmemory_rowcnt\r
+ * The maximum number of rows to insert into the \r
+ * inmemory Hash table before overflowing to disk.\r
+ * Pass in -1 if there is no maximum.\r
+ *\r
+ * @param initialCapacity If not "-1" used to initialize the java HashMap\r
+ *\r
+ * @param loadFactor If not "-1" used to initialize the java HashMap\r
+ *\r
+ * @param skipNullKeyColumns Skip rows with a null key column, if true.\r
+ *\r
+ * @param keepAfterCommit If true the hash table is kept after a commit,\r
+ * if false the hash table is dropped on the next commit.\r
+ *\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public BackingStoreHashtable(\r
+ TransactionController tc,\r
+ RowSource row_source,\r
+ int[] key_column_numbers,\r
+ boolean remove_duplicates,\r
+ long estimated_rowcnt,\r
+ long max_inmemory_rowcnt,\r
+ int initialCapacity,\r
+ float loadFactor,\r
+ boolean skipNullKeyColumns,\r
+ boolean keepAfterCommit)\r
+ throws StandardException\r
+ {\r
+ this.key_column_numbers = key_column_numbers;\r
+ this.remove_duplicates = remove_duplicates;\r
+ this.row_source = row_source;\r
+ this.skipNullKeyColumns = skipNullKeyColumns;\r
+ this.max_inmemory_rowcnt = max_inmemory_rowcnt;\r
+ if( max_inmemory_rowcnt > 0)\r
+ max_inmemory_size = Long.MAX_VALUE;\r
+ else\r
+ max_inmemory_size = Runtime.getRuntime().totalMemory()/100;\r
+ this.tc = tc;\r
+ this.keepAfterCommit = keepAfterCommit;\r
+\r
+ // use passed in capacity and loadfactor if not -1, you must specify\r
+ // capacity if you want to specify loadfactor.\r
+ if (initialCapacity != -1)\r
+ {\r
+ hash_table = \r
+ ((loadFactor == -1) ? \r
+ new HashMap(initialCapacity) :\r
+ new HashMap(initialCapacity, loadFactor));\r
+ }\r
+ else\r
+ {\r
+ /* We want to create the hash table based on the estimated row\r
+ * count if a) we have an estimated row count (i.e. it's greater\r
+ * than zero) and b) we think we can create a hash table to\r
+ * hold the estimated row count without running out of memory.\r
+ * The check for "b" is required because, for deeply nested\r
+ * queries and/or queries with a high number of tables in\r
+ * their FROM lists, the optimizer can end up calculating\r
+ * some very high row count estimates--even up to the point of\r
+ * Double.POSITIVE_INFINITY (see DERBY-1259 for an explanation\r
+ * of how that can happen). In that case any attempts to\r
+ * create a hash table of size estimated_rowcnt can cause\r
+ * OutOfMemory errors when we try to create the hash table.\r
+ * So as a "red flag" for that kind of situation, we check to\r
+ * see if the estimated row count is greater than the max\r
+ * in-memory size for this table. Unit-wise this comparison\r
+ * is relatively meaningless: rows vs bytes. But if our\r
+ * estimated row count is greater than the max number of\r
+ * in-memory bytes that we're allowed to consume, then\r
+ * it's very likely that creating a hash table with a capacity\r
+ * of estimated_rowcnt will lead to memory problems. So in\r
+ * that particular case we leave hash_table null here and\r
+ * initialize it further below, using the estimated in-memory\r
+ * size of the first row to figure out what a reasonable size\r
+ * for the hash table might be.\r
+ */\r
+ hash_table = \r
+ (((estimated_rowcnt <= 0) || (row_source == null)) ?\r
+ new HashMap() :\r
+ (estimated_rowcnt < max_inmemory_size) ?\r
+ new HashMap((int) estimated_rowcnt) :\r
+ null);\r
+ }\r
+\r
+ if (row_source != null)\r
+ {\r
+ boolean needsToClone = row_source.needsToClone();\r
+\r
+ DataValueDescriptor[] row;\r
+ while ((row = getNextRowFromRowSource()) != null)\r
+ {\r
+ // If we haven't initialized the hash_table yet then that's\r
+ // because a hash table with capacity estimated_rowcnt would\r
+ // probably cause memory problems. So look at the first row\r
+ // that we found and use that to create the hash table with\r
+ // an initial capacity such that, if it was completely full,\r
+ // it would still satisfy the max_inmemory condition. Note\r
+ // that this isn't a hard limit--the hash table can grow if\r
+ // needed.\r
+ if (hash_table == null)\r
+ {\r
+ // Check to see how much memory we think the first row\r
+ // is going to take, and then use that to set the initial\r
+ // capacity of the hash table.\r
+ double rowUsage = getEstimatedMemUsage(row);\r
+ hash_table =\r
+ new HashMap((int)(max_inmemory_size / rowUsage));\r
+ }\r
+ \r
+ add_row_to_hash_table(row, needsToClone);\r
+ }\r
+ }\r
+\r
+ // In the (unlikely) event that we received a "red flag" estimated_rowcnt\r
+ // that is too big (see comments above), it's possible that, if row_source\r
+ // was null or else didn't have any rows, hash_table could still be null\r
+ // at this point. So we initialize it to an empty hashtable (representing\r
+ // an empty result set) so that calls to other methods on this\r
+ // BackingStoreHashtable (ex. "size()") will have a working hash_table\r
+ // on which to operate.\r
+ if (hash_table == null)\r
+ hash_table = new HashMap();\r
+ }\r
+\r
+ /**************************************************************************\r
+ * Private/Protected methods of This class:\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ * Call method to either get next row or next row with non-null\r
+ * key columns.\r
+ *\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ */\r
+ private DataValueDescriptor[] getNextRowFromRowSource()\r
+ throws StandardException\r
+ {\r
+ DataValueDescriptor[] row = row_source.getNextRowFromRowSource();\r
+\r
+ if (skipNullKeyColumns)\r
+ {\r
+ while (row != null)\r
+ {\r
+ // Are any key columns null?\r
+ int index = 0;\r
+ for ( ; index < key_column_numbers.length; index++)\r
+ {\r
+ if (row[key_column_numbers[index]].isNull())\r
+ {\r
+ break;\r
+ }\r
+ }\r
+ // No null key columns\r
+ if (index == key_column_numbers.length)\r
+ {\r
+ return row;\r
+ }\r
+ // 1 or more null key columns\r
+ row = row_source.getNextRowFromRowSource();\r
+ }\r
+ }\r
+ return row;\r
+ }\r
+\r
+ /**\r
+ * Return a cloned copy of the row.\r
+ *\r
+ * @return The cloned row row to use.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private static DataValueDescriptor[] cloneRow(DataValueDescriptor[] old_row)\r
+ throws StandardException\r
+ {\r
+ DataValueDescriptor[] new_row = new DataValueDescriptor[old_row.length];\r
+\r
+ // the only difference between getClone and cloneObject is cloneObject does\r
+ // not objectify a stream. We use getClone here. Beetle 4896.\r
+ for (int i = 0; i < old_row.length; i++)\r
+ {\r
+ if( old_row[i] != null)\r
+ new_row[i] = old_row[i].getClone();\r
+ }\r
+\r
+ return(new_row);\r
+ }\r
+\r
+ /**\r
+ * Return a shallow cloned row\r
+ *\r
+ * @return The cloned row row to use.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ static DataValueDescriptor[] shallowCloneRow(DataValueDescriptor[] old_row)\r
+ throws StandardException\r
+ {\r
+ DataValueDescriptor[] new_row = new DataValueDescriptor[old_row.length];\r
+ // the only difference between getClone and cloneObject is cloneObject does\r
+ // not objectify a stream. We use cloneObject here. DERBY-802\r
+ for (int i = 0; i < old_row.length; i++)\r
+ {\r
+ if( old_row[i] != null)\r
+ new_row[i] = (DataValueDescriptor) \r
+ ((CloneableObject) old_row[i]).cloneObject();\r
+ }\r
+\r
+ return(new_row);\r
+ }\r
+\r
+ /**\r
+ * Do the work to add one row to the hash table.\r
+ * <p>\r
+ *\r
+ * @param row Row to add to the hash table.\r
+ * @param needsToClone If the row needs to be cloned\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private void add_row_to_hash_table(DataValueDescriptor[] row, boolean needsToClone)\r
+ throws StandardException\r
+ {\r
+ if (spillToDisk(row))\r
+ return;\r
+ \r
+ if (needsToClone)\r
+ {\r
+ row = cloneRow(row);\r
+ }\r
+ Object key = KeyHasher.buildHashKey(row, key_column_numbers);\r
+ Object duplicate_value = null;\r
+\r
+ if ((duplicate_value = hash_table.put(key, row)) == null)\r
+ doSpaceAccounting( row, false);\r
+ else\r
+ {\r
+ if (!remove_duplicates)\r
+ {\r
+ List row_vec;\r
+\r
+ // inserted a duplicate\r
+ if (duplicate_value instanceof List)\r
+ {\r
+ doSpaceAccounting( row, false);\r
+ row_vec = (List) duplicate_value;\r
+ }\r
+ else\r
+ {\r
+ // allocate list to hold duplicates\r
+ row_vec = new ArrayList(2);\r
+\r
+ // insert original row into vector\r
+ row_vec.add(duplicate_value);\r
+ doSpaceAccounting( row, true);\r
+ }\r
+\r
+ // insert new row into list\r
+ row_vec.add(row);\r
+\r
+ // store list of rows back into hash table,\r
+ // overwriting the duplicate key that was \r
+ // inserted.\r
+ hash_table.put(key, row_vec);\r
+ }\r
+ }\r
+\r
+ row = null;\r
+ }\r
+\r
+ private void doSpaceAccounting(DataValueDescriptor[] row,\r
+ boolean firstDuplicate)\r
+ {\r
+ inmemory_rowcnt++;\r
+ if( max_inmemory_rowcnt <= 0)\r
+ {\r
+ max_inmemory_size -= getEstimatedMemUsage(row);\r
+ if( firstDuplicate)\r
+ max_inmemory_size -= ARRAY_LIST_SIZE;\r
+ }\r
+ } // end of doSpaceAccounting\r
+\r
+ /**\r
+ * Determine whether a new row should be spilled to disk and, if so, do it.\r
+ *\r
+ * @param row\r
+ *\r
+ * @return true if the row was spilled to disk, false if not\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ */\r
+ private boolean spillToDisk(DataValueDescriptor[] row) throws StandardException {\r
+ // Once we have started spilling all new rows will go to disk, even if we have freed up some\r
+ // memory by moving duplicates to disk. This simplifies handling of duplicates and accounting.\r
+ if( diskHashtable == null)\r
+ {\r
+ if( max_inmemory_rowcnt > 0)\r
+ {\r
+ if( inmemory_rowcnt < max_inmemory_rowcnt)\r
+ return false; // Do not spill\r
+ }\r
+ else if( max_inmemory_size > getEstimatedMemUsage(row))\r
+ \r
+ return false;\r
+ // Want to start spilling\r
+ \r
+ diskHashtable = \r
+ new DiskHashtable(\r
+ tc,\r
+ row,\r
+ (int[]) null, //TODO-COLLATION, set non default collation if necessary.\r
+ key_column_numbers,\r
+ remove_duplicates,\r
+ keepAfterCommit);\r
+ }\r
+ Object key = KeyHasher.buildHashKey(row, key_column_numbers);\r
+ Object duplicateValue = hash_table.get( key);\r
+ if( duplicateValue != null)\r
+ {\r
+ if( remove_duplicates)\r
+ return true; // a degenerate case of spilling\r
+ // If we are keeping duplicates then move all the duplicates from memory to disk\r
+ // This simplifies finding duplicates: they are either all in memory or all on disk.\r
+ if (duplicateValue instanceof List)\r
+ {\r
+ List duplicateVec = (List) duplicateValue;\r
+ for( int i = duplicateVec.size() - 1; i >= 0; i--)\r
+ {\r
+ DataValueDescriptor[] dupRow = (DataValueDescriptor[]) duplicateVec.get(i);\r
+ diskHashtable.put( key, dupRow);\r
+ }\r
+ }\r
+ else\r
+ diskHashtable.put( key, (DataValueDescriptor[]) duplicateValue);\r
+ hash_table.remove( key);\r
+ }\r
+ diskHashtable.put( key, row);\r
+ return true;\r
+ } // end of spillToDisk\r
+\r
+ /**\r
+ * Take a row and return an estimate as to how much memory that\r
+ * row will consume.\r
+ * \r
+ * @param row The row for which we want to know the memory usage.\r
+ * @return A guess as to how much memory the current row will\r
+ * use.\r
+ */\r
+ private long getEstimatedMemUsage(DataValueDescriptor[] row)\r
+ {\r
+ long rowMem = 0;\r
+ for( int i = 0; i < row.length; i++)\r
+ {\r
+ rowMem += row[i].estimateMemoryUsage();\r
+ rowMem += ClassSize.refSize;\r
+ }\r
+\r
+ rowMem += ClassSize.refSize;\r
+ return rowMem;\r
+ }\r
+\r
+ /**************************************************************************\r
+ * Public Methods of This class:\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ * Close the BackingStoreHashtable.\r
+ * <p>\r
+ * Perform any necessary cleanup after finishing with the hashtable. Will\r
+ * deallocate/dereference objects as necessary. If the table has gone\r
+ * to disk this will drop any on disk files used to support the hash table.\r
+ * <p>\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public void close() \r
+ throws StandardException\r
+ {\r
+ hash_table = null;\r
+ if( diskHashtable != null)\r
+ {\r
+ diskHashtable.close();\r
+ diskHashtable = null;\r
+ }\r
+ return;\r
+ }\r
+\r
+ /**\r
+ * Return an Enumeration that can be used to scan entire table.\r
+ * <p>\r
+ * RESOLVE - is it worth it to support this routine when we have a\r
+ * disk overflow hash table?\r
+ *\r
+ * @return The Enumeration.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public Enumeration elements()\r
+ throws StandardException\r
+ {\r
+ if( diskHashtable == null)\r
+ return Collections.enumeration(hash_table.values());\r
+ return new BackingStoreHashtableEnumeration();\r
+ }\r
+\r
+ /**\r
+ * get data associated with given key.\r
+ * <p>\r
+ * There are 2 different types of objects returned from this routine.\r
+ * <p>\r
+ * In both cases, the key value is either the object stored in \r
+ * row[key_column_numbers[0]], if key_column_numbers.length is 1, \r
+ * otherwise it is a KeyHasher containing\r
+ * the objects stored in row[key_column_numbers[0, 1, ...]].\r
+ * For every qualifying unique row value an entry is placed into the \r
+ * hash table.\r
+ * <p>\r
+ * For row values with duplicates, the value of the data is a list of\r
+ * rows.\r
+ * <p>\r
+ * The caller will have to call "instanceof" on the data value\r
+ * object if duplicates are expected, to determine if the data value\r
+ * of the hash table entry is a row or is a list of rows.\r
+ * <p>\r
+ * The BackingStoreHashtable "owns" the objects returned from the get()\r
+ * routine. They remain valid until the next access to the \r
+ * BackingStoreHashtable. If the client needs to keep references to these\r
+ * objects, it should clone copies of the objects. A valid \r
+ * BackingStoreHashtable can place all rows into a disk based conglomerate,\r
+ * declare a row buffer and then reuse that row buffer for every get()\r
+ * call.\r
+ *\r
+ * @return The value to which the key is mapped in this hashtable; \r
+ * null if the key is not mapped to any value in this hashtable.\r
+ *\r
+ * @param key The key to hash on.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public Object get(Object key)\r
+ throws StandardException\r
+ {\r
+ Object obj = hash_table.get(key);\r
+ if( diskHashtable == null || obj != null)\r
+ return obj;\r
+ return diskHashtable.get( key);\r
+ }\r
+\r
+ /**\r
+ * Return runtime stats to caller by adding them to prop.\r
+ * <p>\r
+ *\r
+ * @param prop The set of properties to append to.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public void getAllRuntimeStats(Properties prop)\r
+ throws StandardException\r
+ {\r
+ if (auxillary_runtimestats != null)\r
+ org.apache.derby.iapi.util.PropertyUtil.copyProperties(auxillary_runtimestats, prop);\r
+ }\r
+\r
+ /**\r
+ * remove a row from the hash table.\r
+ * <p>\r
+ * a remove of a duplicate removes the entire duplicate list.\r
+ *\r
+ * @param key The key of the row to remove.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public Object remove(\r
+ Object key)\r
+ throws StandardException\r
+ {\r
+ Object obj = hash_table.remove(key);\r
+ if( obj != null || diskHashtable == null)\r
+ return obj;\r
+ return diskHashtable.remove(key);\r
+ }\r
+\r
+ /**\r
+ * Set the auxillary runtime stats.\r
+ * <p>\r
+ * getRuntimeStats() will return both the auxillary stats and any\r
+ * BackingStoreHashtable() specific stats. Note that each call to\r
+ * setAuxillaryRuntimeStats() overwrites the Property set that was\r
+ * set previously.\r
+ *\r
+ * @param prop The set of properties to append from.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public void setAuxillaryRuntimeStats(Properties prop)\r
+ throws StandardException\r
+ {\r
+ auxillary_runtimestats = prop;\r
+ }\r
+\r
+ /**\r
+ * Put a row into the hash table.\r
+ * <p>\r
+ * The in memory hash table will need to keep a reference to the row\r
+ * after the put call has returned. If "needsToClone" is true then the\r
+ * hash table will make a copy of the row and put that, else if \r
+ * "needsToClone" is false then the hash table will keep a reference to\r
+ * the row passed in and no copy will be made.\r
+ * <p>\r
+ * If rouine returns false, then no reference is kept to the duplicate\r
+ * row which was rejected (thus allowing caller to reuse the object).\r
+ *\r
+ * @param needsToClone does this routine have to make a copy of the row,\r
+ * in order to keep a reference to it after return?\r
+ * @param row The row to insert into the table.\r
+ *\r
+ * @return true if row was inserted into the hash table. Returns\r
+ * false if the BackingStoreHashtable is eliminating \r
+ * duplicates, and the row being inserted is a duplicate,\r
+ * or if we are skipping rows with 1 or more null key columns\r
+ * and we find a null key column.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public boolean putRow(\r
+ boolean needsToClone,\r
+ DataValueDescriptor[] row)\r
+ throws StandardException\r
+ {\r
+ // Are any key columns null?\r
+ if (skipNullKeyColumns)\r
+ {\r
+ int index = 0;\r
+ for ( ; index < key_column_numbers.length; index++)\r
+ {\r
+ if (row[key_column_numbers[index]].isNull())\r
+ {\r
+ return false;\r
+ }\r
+ }\r
+ }\r
+\r
+ Object key = KeyHasher.buildHashKey(row, key_column_numbers);\r
+\r
+ if ((remove_duplicates) && (get(key) != null))\r
+ {\r
+ return(false);\r
+ }\r
+ else\r
+ {\r
+ add_row_to_hash_table(row, needsToClone);\r
+ return(true);\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Return number of unique rows in the hash table.\r
+ * <p>\r
+ *\r
+ * @return The number of unique rows in the hash table.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public int size()\r
+ throws StandardException\r
+ {\r
+ if( diskHashtable == null)\r
+ return(hash_table.size());\r
+ return hash_table.size() + diskHashtable.size();\r
+ }\r
+\r
+ private class BackingStoreHashtableEnumeration implements Enumeration\r
+ {\r
+ private Iterator memoryIterator;\r
+ private Enumeration diskEnumeration;\r
+\r
+ BackingStoreHashtableEnumeration()\r
+ {\r
+ memoryIterator = hash_table.values().iterator();\r
+ if( diskHashtable != null)\r
+ {\r
+ try\r
+ {\r
+ diskEnumeration = diskHashtable.elements();\r
+ }\r
+ catch( StandardException se)\r
+ {\r
+ diskEnumeration = null;\r
+ }\r
+ }\r
+ }\r
+ \r
+ public boolean hasMoreElements()\r
+ {\r
+ if (memoryIterator != null) {\r
+ if (memoryIterator.hasNext()) {\r
+ return true;\r
+ }\r
+ memoryIterator = null;\r
+ }\r
+ if( diskEnumeration == null)\r
+ return false;\r
+ return diskEnumeration.hasMoreElements();\r
+ }\r
+\r
+ public Object nextElement() throws NoSuchElementException\r
+ {\r
+ if (memoryIterator != null) {\r
+ if (memoryIterator.hasNext()) {\r
+ return memoryIterator.next();\r
+ }\r
+ memoryIterator = null;\r
+ }\r
+ return diskEnumeration.nextElement();\r
+ }\r
+ } // end of class BackingStoreHashtableEnumeration\r
+}\r