Adding JMCR-Stable version
[Benchmarks_CSolver.git] / JMCR-Stable / real-world application / derby-10.3.2.1 / java / engine / org / apache / derby / iapi / store / access / DiskHashtable.java
diff --git a/JMCR-Stable/real-world application/derby-10.3.2.1/java/engine/org/apache/derby/iapi/store/access/DiskHashtable.java b/JMCR-Stable/real-world application/derby-10.3.2.1/java/engine/org/apache/derby/iapi/store/access/DiskHashtable.java
new file mode 100644 (file)
index 0000000..a05c7f8
--- /dev/null
@@ -0,0 +1,488 @@
+/*\r
+\r
+   Derby - Class org.apache.derby.iapi.store.access.DiskHashtable\r
+\r
+   Licensed to the Apache Software Foundation (ASF) under one or more\r
+   contributor license agreements.  See the NOTICE file distributed with\r
+   this work for additional information regarding copyright ownership.\r
+   The ASF licenses this file to you under the Apache License, Version 2.0\r
+   (the "License"); you may not use this file except in compliance with\r
+   the License.  You may obtain a copy of the License at\r
+\r
+      http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+   Unless required by applicable law or agreed to in writing, software\r
+   distributed under the License is distributed on an "AS IS" BASIS,\r
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+   See the License for the specific language governing permissions and\r
+   limitations under the License.\r
+\r
+ */\r
+package org.apache.derby.iapi.store.access;\r
+\r
+import java.util.Enumeration;\r
+import java.util.NoSuchElementException;\r
+import java.util.Properties;\r
+import java.util.Vector;\r
+import org.apache.derby.shared.common.reference.SQLState;\r
+import org.apache.derby.iapi.error.StandardException;\r
+import org.apache.derby.iapi.types.DataValueDescriptor;\r
+import org.apache.derby.iapi.types.SQLInteger;\r
+import org.apache.derby.iapi.types.RowLocation;\r
+import org.apache.derby.iapi.types.StringDataValue;\r
+\r
+import org.apache.derby.iapi.services.context.ContextService;\r
+import org.apache.derby.iapi.services.io.FormatableBitSet;\r
+import org.apache.derby.iapi.services.sanity.SanityManager;\r
+\r
+import org.apache.derby.iapi.sql.conn.LanguageConnectionContext;\r
+\r
+/**\r
+ * This class is used by BackingStoreHashtable when the BackingStoreHashtable \r
+ * must spill to disk.  It implements the methods of a hash table: put, get, \r
+ * remove, elements, however it is not implemented as a hash table. In order to\r
+ * minimize the amount of unique code it is implemented using a Btree and a \r
+ * heap conglomerate. The Btree indexes the hash code of the row key. The \r
+ * actual key may be too long for our Btree implementation.\r
+ *\r
+ * Created: Fri Jan 28 13:58:03 2005\r
+ *\r
+ * @version 1.0\r
+ */\r
+\r
+public class DiskHashtable \r
+{\r
+    private final long                    rowConglomerateId;\r
+    private       ConglomerateController  rowConglomerate;\r
+    private final long                    btreeConglomerateId;\r
+    private       ConglomerateController  btreeConglomerate;\r
+    private final DataValueDescriptor[]   btreeRow;\r
+    private final int[]                   key_column_numbers;\r
+    private final boolean                 remove_duplicates;\r
+    private final TransactionController   tc;\r
+    private final DataValueDescriptor[]   row;\r
+    private final DataValueDescriptor[]   scanKey = { new SQLInteger()};\r
+    private int                           size;\r
+    private boolean                       keepStatistics;\r
+    private final boolean                 keepAfterCommit;\r
+\r
+    /**\r
+     * Creates a new <code>DiskHashtable</code> instance.\r
+     *\r
+     * @param tc\r
+     * @param template              An array of DataValueDescriptors that \r
+     *                              serves as a template for the rows.\r
+     * @param key_column_numbers    The indexes of the key columns (0 based)\r
+     * @param remove_duplicates     If true then rows with duplicate keys are \r
+     *                              removed.\r
+     * @param keepAfterCommit       If true then the hash table is kept after \r
+     *                              a commit\r
+     */\r
+    public DiskHashtable( \r
+    TransactionController   tc,\r
+    DataValueDescriptor[]   template,\r
+    int[]                   collation_ids,\r
+    int[]                   key_column_numbers,\r
+    boolean                 remove_duplicates,\r
+    boolean                 keepAfterCommit)\r
+        throws StandardException\r
+    {\r
+        this.tc                         = tc;\r
+        this.key_column_numbers         = key_column_numbers;\r
+        this.remove_duplicates          = remove_duplicates;\r
+        this.keepAfterCommit            = keepAfterCommit;\r
+        LanguageConnectionContext lcc   = (LanguageConnectionContext)\r
+            ContextService.getContextOrNull(\r
+                LanguageConnectionContext.CONTEXT_ID);\r
+\r
+        keepStatistics = (lcc != null) && lcc.getRunTimeStatisticsMode();\r
+\r
+        // Create template row used for creating the conglomerate and \r
+        // fetching rows.\r
+        row = new DataValueDescriptor[template.length];\r
+        for( int i = 0; i < row.length; i++)\r
+        {\r
+            row[i] = template[i].getNewNull();\r
+\r
+            if (SanityManager.DEBUG)\r
+            {\r
+                // must have an object template for all cols in hash overflow.\r
+                SanityManager.ASSERT(\r
+                    row[i] != null, \r
+                    "Template for the hash table must have non-null object");\r
+            }\r
+        }\r
+\r
+        int tempFlags = \r
+            keepAfterCommit ? \r
+            (TransactionController.IS_TEMPORARY | \r
+             TransactionController.IS_KEPT) : \r
+            TransactionController.IS_TEMPORARY;\r
+        \r
+        // create the "base" table of the hash overflow.\r
+        rowConglomerateId = \r
+            tc.createConglomerate( \r
+                "heap",\r
+                template,\r
+                (ColumnOrdering[]) null,\r
+                collation_ids,\r
+                (Properties) null,\r
+                tempFlags);\r
+\r
+        // open the "base" table of the hash overflow.\r
+        rowConglomerate = \r
+            tc.openConglomerate( \r
+                rowConglomerateId,\r
+                keepAfterCommit,\r
+                TransactionController.OPENMODE_FORUPDATE,\r
+                TransactionController.MODE_TABLE,\r
+                TransactionController.ISOLATION_NOLOCK/* Single thread only */);\r
+\r
+        // create the index on the "hash" base table.  The key of the index\r
+        // is the hash code of the row key.  The second column is the \r
+        // RowLocation of the row in the "base" table of the hash overflow.\r
+        btreeRow = \r
+            new DataValueDescriptor[] \r
+                { new SQLInteger(), rowConglomerate.newRowLocationTemplate()};\r
+\r
+        Properties btreeProps = new Properties();\r
+\r
+        btreeProps.put("baseConglomerateId", \r
+                String.valueOf(rowConglomerateId));\r
+        btreeProps.put("rowLocationColumn",  \r
+                "1");\r
+        btreeProps.put("allowDuplicates",    \r
+                "false"); // Because the row location is part of the key\r
+        btreeProps.put("nKeyFields",         \r
+                "2"); // Include the row location column\r
+        btreeProps.put("nUniqueColumns",     \r
+                "2"); // Include the row location column\r
+        btreeProps.put("maintainParentLinks", \r
+                "false");\r
+\r
+        // default collation is used for hash code and row location\r
+        int[] index_collation_ids = \r
+            {StringDataValue.COLLATION_TYPE_UCS_BASIC,\r
+             StringDataValue.COLLATION_TYPE_UCS_BASIC};\r
+\r
+        btreeConglomerateId = \r
+            tc.createConglomerate( \r
+                "BTREE",\r
+                btreeRow,\r
+                (ColumnOrdering[]) null,\r
+                index_collation_ids,\r
+                btreeProps,\r
+                tempFlags);\r
+\r
+        // open the "index" of the hash overflow.\r
+        btreeConglomerate = \r
+            tc.openConglomerate( \r
+                btreeConglomerateId,\r
+                keepAfterCommit,\r
+                TransactionController.OPENMODE_FORUPDATE,\r
+                TransactionController.MODE_TABLE,\r
+                TransactionController.ISOLATION_NOLOCK /*Single thread only*/ );\r
+\r
+    } // end of constructor\r
+\r
+    public void close() throws StandardException\r
+    {\r
+        btreeConglomerate.close();\r
+        rowConglomerate.close();\r
+        tc.dropConglomerate( btreeConglomerateId);\r
+        tc.dropConglomerate( rowConglomerateId);\r
+    } // end of close\r
+    \r
+    /**\r
+     * Put a new row in the overflow structure.\r
+     *\r
+     * @param row The row to be inserted.\r
+     *\r
+     * @return true  if the row was added,\r
+     *         false if it was not added (because it was a duplicate and we \r
+     *               are eliminating duplicates).\r
+     *\r
+     * @exception StandardException standard error policy\r
+     */\r
+    public boolean put(Object key, Object[] row)\r
+        throws StandardException\r
+    {\r
+        boolean isDuplicate = false;\r
+        if (remove_duplicates || keepStatistics)\r
+        {\r
+            // Go to the work of finding out whether it is a duplicate\r
+            isDuplicate = (getRemove(key, false, true) != null);\r
+            if (remove_duplicates && isDuplicate)\r
+                return false;\r
+        }\r
+\r
+        // insert the row into the "base" conglomerate.\r
+        rowConglomerate.insertAndFetchLocation( \r
+            (DataValueDescriptor[]) row, (RowLocation) btreeRow[1]);\r
+\r
+        // create index row from hashcode and rowlocation just inserted, and\r
+        // insert index row into index.\r
+        btreeRow[0].setValue( key.hashCode());\r
+        btreeConglomerate.insert( btreeRow);\r
+\r
+        if (keepStatistics && !isDuplicate)\r
+            size++;\r
+\r
+        return true;\r
+\r
+    } // end of put\r
+\r
+    /**\r
+     * Get a row from the overflow structure.\r
+     *\r
+     * @param key If the rows only have one key column then the key value. \r
+     *            If there is more than one key column then a KeyHasher\r
+     *\r
+     * @return null if there is no corresponding row,\r
+     *         the row (DataValueDescriptor[]) if there is exactly one row \r
+     *         with the key, or\r
+     *         a Vector of all the rows with the key if there is more than one.\r
+     *\r
+     * @exception StandardException\r
+     */\r
+    public Object get(Object key)\r
+        throws StandardException\r
+    {\r
+        return getRemove(key, false, false);\r
+    }\r
+\r
+    private Object getRemove(Object key, boolean remove, boolean existenceOnly)\r
+        throws StandardException\r
+    {\r
+        int hashCode = key.hashCode();\r
+        int rowCount = 0;\r
+        Object retValue = null;\r
+\r
+        scanKey[0].setValue( hashCode);\r
+        ScanController scan = \r
+            tc.openScan( \r
+                btreeConglomerateId,\r
+                false, // do not hold\r
+                remove ? TransactionController.OPENMODE_FORUPDATE : 0,\r
+                TransactionController.MODE_TABLE,\r
+                TransactionController.ISOLATION_READ_UNCOMMITTED,\r
+                null, // Scan all the columns\r
+                scanKey,\r
+                ScanController.GE,\r
+                (Qualifier[][]) null,\r
+                scanKey,\r
+                ScanController.GT);\r
+        try\r
+        {\r
+            while (scan.fetchNext(btreeRow))\r
+            {\r
+                if (rowConglomerate.fetch(\r
+                        (RowLocation) btreeRow[1], \r
+                        row, \r
+                        (FormatableBitSet) null /* all columns */)\r
+                    && rowMatches( row, key))\r
+                {\r
+                    if( existenceOnly)\r
+                        return this;\r
+\r
+                    rowCount++;\r
+                    if( rowCount == 1)\r
+                    {\r
+                        // if there is only one matching row just return row.\r
+                       DataValueDescriptor[] v;\r
+                       v = (DataValueDescriptor[]) retValue;\r
+                        v = BackingStoreHashtable.shallowCloneRow(row);\r
+                    }\r
+                    else \r
+                    {\r
+                        // if there is more than one row, return a vector of\r
+                        // the rows.\r
+                        //\r
+                        Vector v;\r
+                        if( rowCount == 2)\r
+                        {\r
+                            // convert the "single" row retrieved from the\r
+                            // first trip in the loop, to a vector with the\r
+                            // first two rows.\r
+                            v = new Vector( 2);\r
+                            v.add( retValue);\r
+                            retValue = v;\r
+                        }\r
+                        else\r
+                        {\r
+                            v = (Vector) retValue;\r
+                        }\r
+                        v.add( BackingStoreHashtable.shallowCloneRow( row));\r
+                    }\r
+                    if( remove)\r
+                    {\r
+                        rowConglomerate.delete( (RowLocation) btreeRow[1]);\r
+                        scan.delete();\r
+                        size--;\r
+                    }\r
+                    if( remove_duplicates)\r
+                        // This must be the only row with the key\r
+                        return retValue;\r
+                }\r
+            }\r
+        }\r
+        finally\r
+        {\r
+            scan.close();\r
+        }\r
+        return retValue;\r
+    } // end of getRemove\r
+\r
+\r
+    private boolean rowMatches( \r
+    DataValueDescriptor[] row,\r
+    Object                key)\r
+    {\r
+        if( key_column_numbers.length == 1)\r
+            return row[ key_column_numbers[0]].equals( key);\r
+\r
+        KeyHasher kh = (KeyHasher) key;\r
+        for( int i = 0; i < key_column_numbers.length; i++)\r
+        {\r
+            if( ! row[ key_column_numbers[i]].equals( kh.getObject(i)))\r
+                return false;\r
+        }\r
+        return true;\r
+    } // end of rowMatches\r
+\r
+    /**\r
+     * remove all rows with a given key from the hash table.\r
+     *\r
+     * @param key          The key of the rows to remove.\r
+     *\r
+     * @return The removed row(s).\r
+     *\r
+        * @exception  StandardException  Standard exception policy.\r
+     **/\r
+    public Object remove( Object key)\r
+               throws StandardException\r
+    {\r
+        return getRemove( key, true, false);\r
+    } // end of remove\r
+\r
+    /**\r
+     * @return The number of rows in the hash table\r
+     */\r
+    public int size()\r
+    {\r
+        return size;\r
+    }\r
+    \r
+    /**\r
+     * Return an Enumeration that can be used to scan entire table.\r
+     * <p>\r
+     * RESOLVE - is it worth it to support this routine?\r
+     *\r
+        * @return The Enumeration.\r
+     *\r
+        * @exception  StandardException  Standard exception policy.\r
+     **/\r
+    public Enumeration elements()\r
+        throws StandardException\r
+    {\r
+        return new ElementEnum();\r
+    }\r
+\r
+    private class ElementEnum implements Enumeration\r
+    {\r
+        private ScanController scan;\r
+        private boolean hasMore;\r
+        private RowLocation rowloc;\r
+\r
+        ElementEnum()\r
+        {\r
+            try\r
+            {\r
+                scan = tc.openScan( rowConglomerateId,\r
+                                    keepAfterCommit,\r
+                                    0, // read only\r
+                                    TransactionController.MODE_TABLE,\r
+                                    TransactionController.ISOLATION_NOLOCK,\r
+                                    (FormatableBitSet) null, // all columns\r
+                                    (DataValueDescriptor[]) null, // no start key\r
+                                    0, // no start key operator\r
+                                    (Qualifier[][]) null,\r
+                                    (DataValueDescriptor[]) null, // no stop key\r
+                                    0 /* no stop key operator */);\r
+                hasMore = scan.next();\r
+                if( ! hasMore)\r
+                {\r
+                    scan.close();\r
+                    scan = null;\r
+                } else if (keepAfterCommit) {\r
+                    rowloc = rowConglomerate.newRowLocationTemplate();\r
+                    scan.fetchLocation(rowloc);\r
+                }\r
+            }\r
+            catch( StandardException se)\r
+            {\r
+                hasMore = false;\r
+                if( scan != null)\r
+                {\r
+                    try\r
+                    {\r
+                        scan.close();\r
+                    }\r
+                    catch( StandardException se1){};\r
+                    scan = null;\r
+                }\r
+            }\r
+        } // end of constructor\r
+\r
+        public boolean hasMoreElements()\r
+        {\r
+            return hasMore;\r
+        }\r
+\r
+        public Object nextElement()\r
+        {\r
+            if( ! hasMore)\r
+                throw new NoSuchElementException();\r
+            try\r
+            {\r
+                if (scan.isHeldAfterCommit()) {\r
+                    // automatically reopens scan:\r
+                    if (!scan.positionAtRowLocation(rowloc)) {\r
+                        // Will not happen unless compress of this table\r
+                        // has invalidated the row location. Possible?\r
+                        throw StandardException.\r
+                            newException(SQLState.NO_CURRENT_ROW);\r
+                    }\r
+                }\r
+\r
+                scan.fetch(row);\r
+\r
+                Object retValue =  BackingStoreHashtable.shallowCloneRow( row);\r
+                hasMore = scan.next();\r
+\r
+                if( ! hasMore)\r
+                {\r
+                    scan.close();\r
+                    scan = null;\r
+                } else if (keepAfterCommit) {\r
+                    scan.fetchLocation(rowloc);\r
+                }\r
+\r
+                return retValue;\r
+            }\r
+            catch( StandardException se)\r
+            {\r
+                if( scan != null)\r
+                {\r
+                    try\r
+                    {\r
+                        scan.close();\r
+                    }\r
+                    catch( StandardException se1){};\r
+                    scan = null;\r
+                }\r
+                throw new NoSuchElementException();\r
+            }\r
+        } // end of nextElement\r
+    } // end of class ElementEnum\r
+}\r