--- /dev/null
+/*\r
+\r
+ Derby - Class org.apache.derby.iapi.store.access.conglomerate.ScanManager\r
+\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to you under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+\r
+ */\r
+\r
+package org.apache.derby.iapi.store.access.conglomerate;\r
+\r
+import org.apache.derby.iapi.store.access.GroupFetchScanController;\r
+import org.apache.derby.iapi.store.access.ScanController;\r
+import org.apache.derby.iapi.store.raw.Page;\r
+import org.apache.derby.iapi.error.StandardException;\r
+\r
+import org.apache.derby.iapi.store.access.BackingStoreHashtable;\r
+\r
+/**\r
+\r
+The ScanManager interface contains those methods private to access method\r
+implementors necessary to implement Scans on Conglomerates. Client of scans\r
+use the ScanController to interact with the scan.\r
+<P>\r
+@see ScanController\r
+\r
+**/\r
+\r
+public interface ScanManager extends ScanController, GroupFetchScanController\r
+{\r
+\r
+ /**\r
+ * Close scan as part of terminating a transaction.\r
+ * <p>\r
+ * Use this call to close the scan resources as part of committing or\r
+ * aborting a transaction. The normal close() routine may do some cleanup\r
+ * that is either unnecessary, or not correct due to the unknown condition\r
+ * of the scan following a transaction ending error. Use this call when\r
+ * closing all scans as part of an abort of a transaction.\r
+ *\r
+ * @param closeHeldScan If true, means to close scan even if it has been\r
+ * opened to be kept opened across commit. This is\r
+ * used to close these scans on abort.\r
+ *\r
+ * @return boolean indicating that the close has resulted in a real close\r
+ * of the scan. A held scan will return false if called \r
+ * by closeForEndTransaction(false), otherwise it will \r
+ * return true. A non-held scan will always return true.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ boolean closeForEndTransaction(boolean closeHeldScan)\r
+ throws StandardException;\r
+\r
+ /**\r
+ * Insert all rows that qualify for the current scan into the input\r
+ * Hash table. \r
+ * <p>\r
+ * This routine scans executes the entire scan as described in the \r
+ * openScan call. For every qualifying unique row value an entry is\r
+ * placed into the HashTable. For unique row values the entry in the\r
+ * Hashtable has a key value of the object stored in \r
+ * row[key_column_number], and the value of the data is row. For row \r
+ * values with duplicates, the key value is also row[key_column_number], \r
+ * but the value of the data is a Vector of\r
+ * rows. The caller will have to call "instanceof" on the data value\r
+ * object if duplicates are expected, to determine if the data value\r
+ * of the Hashtable entry is a row or is a Vector of rows.\r
+ * <p>\r
+ * Note, that for this routine to work efficiently the caller must \r
+ * ensure that the object in row[key_column_number] implements \r
+ * the hashCode and equals method as appropriate for it's datatype.\r
+ * <p>\r
+ * It is expected that this call will be the first and only call made in\r
+ * an openscan. Qualifiers and stop position of the openscan are applied\r
+ * just as in a normal scan. This call is logically equivalent to the \r
+ * caller performing the following:\r
+ *\r
+ * import java.util.Hashtable;\r
+ *\r
+ * hash_table = new Hashtable();\r
+ *\r
+ * while (next())\r
+ * {\r
+ * row = create_new_row();\r
+ * fetch(row);\r
+ * if ((duplicate_value = \r
+ * hash_table.put(row[key_column_number], row)) != null)\r
+ * {\r
+ * Vector row_vec;\r
+ *\r
+ * // inserted a duplicate\r
+ * if ((duplicate_value instanceof vector))\r
+ * {\r
+ * row_vec = (Vector) duplicate_value;\r
+ * }\r
+ * else\r
+ * {\r
+ * // allocate vector to hold duplicates\r
+ * row_vec = new Vector(2);\r
+ *\r
+ * // insert original row into vector\r
+ * row_vec.addElement(duplicate_value);\r
+ *\r
+ * // put the vector as the data rather than the row\r
+ * hash_table.put(row[key_column_number], row_vec);\r
+ * }\r
+ * \r
+ * // insert new row into vector\r
+ * row_vec.addElement(row);\r
+ * }\r
+ * }\r
+ * <p>\r
+ * The columns of the row will be the standard columns returned as\r
+ * part of a scan, as described by the validColumns - see openScan for\r
+ * description.\r
+ * RESOLVE - is this ok? or should I hard code somehow the row to\r
+ * be the first column and the row location?\r
+ * <p>\r
+ * No overflow to external storage is provided, so calling this routine\r
+ * on a 1 gigabyte conglomerate will incur at least 1 gigabyte of memory\r
+ * (probably failing with a java out of memory condition). If this\r
+ * routine gets an out of memory condition, or if "max_rowcnt" is \r
+ * exceeded then then the routine will give up, empty the Hashtable, \r
+ * and return "false."\r
+ * <p>\r
+ * On exit from this routine, whether the fetchSet() succeeded or not\r
+ * the scan is complete, it is positioned just the same as if the scan\r
+ * had been drained by calling "next()" until it returns false (ie. \r
+ * fetchNext() and next() calls will return false). \r
+ * reopenScan() can be called to restart the scan.\r
+ * <p>\r
+ *\r
+ * RESOLVE - until we get row counts what should we do for sizing the\r
+ * the size, capasity, and load factor of the hash table.\r
+ * For now it is up to the caller to create the Hashtable,\r
+ * Access does not reset any parameters.\r
+ * <p>\r
+ * RESOLVE - I am not sure if access should be in charge of allocating\r
+ * the new row objects. I know that I can do this in the\r
+ * case of btree's, but I don't think I can do this in heaps.\r
+ * Maybe this is solved by work to be done on the sort \r
+ * interface.\r
+ *\r
+ *\r
+ * @param max_rowcnt The maximum number of rows to insert into the \r
+ * Hash table. Pass in -1 if there is no maximum.\r
+ * @param key_column_numbers The column numbers of the columns in the\r
+ * scan result row to be the key to the Hashtable.\r
+ * "0" is the first column in the scan result\r
+ * row (which may be different than the first\r
+ * row in the table of the scan).\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ void fetchSet(\r
+ long max_rowcnt,\r
+ int[] key_column_numbers,\r
+ BackingStoreHashtable hash_table)\r
+ throws StandardException;\r
+\r
+\r
+ /**\r
+ * Do work necessary to maintain the current position in the scan.\r
+ * <p>\r
+ * The latched page in the conglomerate "congomid" is changing, do\r
+ * whatever is necessary to maintain the current position of the scan.\r
+ * For some conglomerates this may be a no-op.\r
+ * <p>\r
+ *\r
+ * @param conglom Conglomerate object of the conglomerate being changed.\r
+ * @param page Page in the conglomerate being changed.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public void savePosition(Conglomerate conglom, Page page)\r
+ throws StandardException;\r
+}\r