--- /dev/null
+/*\r
+\r
+ Derby - Class org.apache.derby.iapi.store.access.TransactionController\r
+\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to you under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+\r
+ */\r
+\r
+package org.apache.derby.iapi.store.access;\r
+\r
+import java.util.Properties;\r
+\r
+import java.io.Serializable;\r
+\r
+import org.apache.derby.iapi.services.context.ContextManager;\r
+import org.apache.derby.iapi.services.locks.CompatibilitySpace;\r
+import org.apache.derby.iapi.services.property.PersistentSet;\r
+import org.apache.derby.iapi.services.io.Storable;\r
+\r
+import org.apache.derby.iapi.error.StandardException;\r
+\r
+import org.apache.derby.iapi.store.raw.Loggable;\r
+import org.apache.derby.iapi.store.raw.Transaction;\r
+\r
+import org.apache.derby.iapi.types.DataValueDescriptor;\r
+\r
+import org.apache.derby.iapi.store.access.BackingStoreHashtable;\r
+import org.apache.derby.iapi.services.io.FormatableBitSet;\r
+\r
+import org.apache.derby.iapi.store.access.DatabaseInstant;\r
+import org.apache.derby.iapi.error.ExceptionSeverity;\r
+/**\r
+\r
+The TransactionController interface provides methods that an access client\r
+can use to control a transaction, which include the methods for\r
+gaining access to resources (conglomerates, scans, etc.) in the transaction\r
+controller's storage manager. TransactionControllers are obtained\r
+from an AccessFactory via the getTransaction method.\r
+<P>\r
+Each transaction controller is associated with a transaction context which\r
+provides error cleanup when standard exceptions are thrown anywhere in the\r
+system. The transaction context performs the following actions in response\r
+to cleanupOnError:\r
+<UL>\r
+<LI>\r
+If the error is an instance of StandardException that has a severity less\r
+than ExceptionSeverity.TRANSACTION_SEVERITY all resources remain unaffected.\r
+<LI>\r
+If the error is an instance of StandardException that has a severity equal\r
+to ExceptionSeverity.TRANSACTION_SEVERITY, then all resources are released. An attempt\r
+to use any resource obtained from this transaction controller after\r
+such an error will result in an error. The transaction controller itself remains\r
+valid, however.\r
+<LI>\r
+If the error is an instance of StandardException that has a severity greater\r
+than ExceptionSeverity.TRANSACTION_SEVERITY, then all resources are released and the\r
+context is popped from the stack. Attempting to use this controller or any\r
+resources obtained from it will result in an error.\r
+</UL>\r
+Transactions are obtained from an AccessFactory.\r
+@see AccessFactory#getTransaction\r
+@see org.apache.derby.iapi.error.StandardException\r
+@see PersistentSet\r
+\r
+\r
+**/\r
+\r
+public interface TransactionController\r
+ extends PersistentSet\r
+{\r
+\r
+ /**\r
+ * Constant used for the lock_level argument to openConglomerate() and \r
+ * openScan() calls. Pass in MODE_RECORD if you want the conglomerate\r
+ * to be opened with record level locking (but the system may override\r
+ * this choice and provide table level locking instead). \r
+ **/\r
+ static final int MODE_RECORD = 6;\r
+ /**\r
+ * Constant used for the lock_level argument to openConglomerate() and \r
+ * openScan() calls. Pass in MODE_TABLE if you want the conglomerate\r
+ * to be opened with table level locking - if this mode is passed in the\r
+ * system will never use record level locking for the open scan or \r
+ * controller.\r
+ **/\r
+ static final int MODE_TABLE = 7;\r
+\r
+ /**\r
+ * Constants used for the isolation_level argument to openConglomerate() and\r
+ * openScan() calls.\r
+ **/\r
+\r
+ /**\r
+ * \r
+ * No locks are requested for data that is read only. Uncommitted data\r
+ * may be returned. Writes only visible previous to commit.\r
+ * Exclusive transaction length locks are set on data that is written, no\r
+ * lock is set on data that is read. No table level intent lock is held\r
+ * so it is up to caller to insure that table is not dropped while being\r
+ * accessed (RESOLVE - this issue may need to be resolved differently if\r
+ * we can't figure out a non-locked based way to prevent ddl during\r
+ * read uncommitted access).\r
+ *\r
+ * ONLY USED INTERNALLY BY ACCESS, NOT VALID FOR EXTERNAL USERS.\r
+ **/\r
+ static final int ISOLATION_NOLOCK = 0;\r
+\r
+ /**\r
+ * No locks are requested for data that is read only. Uncommitted data\r
+ * may be returned. Writes only visible previous to commit.\r
+ * Exclusive transaction length locks are set on data that is written, no\r
+ * lock is set on data that is read. No table level intent lock is held\r
+ * so it is up to caller to insure that table is not dropped while being\r
+ * accessed (RESOLVE - this issue may need to be resolved differently if\r
+ * we can't figure out a non-locked based way to prevent ddl during\r
+ * read uncommitted access).\r
+ *\r
+ * Note that this is currently only supported in heap scans.\r
+ *\r
+ * TODO - work in progress to support this locking mode in the 5.1 \r
+ * storage system. \r
+ **/\r
+ static final int ISOLATION_READ_UNCOMMITTED = 1;\r
+\r
+ /**\r
+ * No lost updates, no dirty reads, only committed data is returned. \r
+ * Writes only visible when committed. Exclusive transaction\r
+ * length locks are set on data that is written, short term locks (\r
+ * possibly instantaneous duration locks) are set\r
+ * on data that is read. \r
+ **/\r
+ static final int ISOLATION_READ_COMMITTED = 2;\r
+\r
+ /**\r
+ * No lost updates, no dirty reads, only committed data is returned. \r
+ * Writes only visible when committed. Exclusive transaction\r
+ * length locks are set on data that is written, short term locks (\r
+ * possibly instantaneous duration locks) are set\r
+ * on data that is read. Read locks are requested for "zero" duration,\r
+ * thus upon return from access no read row lock is held.\r
+ **/\r
+ static final int ISOLATION_READ_COMMITTED_NOHOLDLOCK = 3;\r
+\r
+ /**\r
+ * Read and write locks are held until end of transaction, but no\r
+ * phantom protection is performed (ie no previous key locking).\r
+ * Writes only visible when committed. \r
+ *\r
+ * Note this constant is currently mapped to ISOLATION_SERIALIZABLE. \r
+ * The constant is provided so that code which only requires repeatable \r
+ * read can be coded with the right isolation level, and will just work when\r
+ * store provided real repeatable read isolation.\r
+ **/\r
+ static final int ISOLATION_REPEATABLE_READ = 4;\r
+\r
+ /**\r
+ * Gray's isolation degree 3, "Serializable, Repeatable Read". Note that\r
+ * some conglomerate implementations may only be able to provide \r
+ * phantom protection under MODE_TABLE, while others can support this\r
+ * under MODE_RECORD.\r
+ **/\r
+ static final int ISOLATION_SERIALIZABLE = 5;\r
+\r
+ /**\r
+ * Constants used for the flag argument to openConglomerate() and \r
+ * openScan() calls.\r
+ *\r
+ * NOTE - The values of these constants must correspond to their associated\r
+ * constants in \r
+ * protocol.Database.Storage.RawStore.Interface.ContainerHandle, do not\r
+ * add constants to this file without first adding there.\r
+ **/\r
+\r
+ /**\r
+ * Use this mode to the openScan() call to indicate the scan should get\r
+ * update locks during scan, and either promote the update locks to \r
+ * exclusive locks if the row is changed or demote the lock if the row\r
+ * is not updated. The lock demotion depends on the isolation level of\r
+ * the scan. If isolation level is ISOLATION_SERIALIZABLE or \r
+ * ISOLATION_REPEATABLE_READ\r
+ * then the lock will be converted to a read lock. If the isolation level \r
+ * ISOLATION_READ_COMMITTED then the lock is released when the scan moves\r
+ * off the row.\r
+ * <p>\r
+ * Note that one must still set OPENMODE_FORUPDATE to be able to change\r
+ * rows in the scan. So to enable update locks for an updating scan one\r
+ * provides (OPENMODE_FORUPDATE | OPENMODE_USE_UPDATE_LOCKS)\r
+ **/\r
+ static final int OPENMODE_USE_UPDATE_LOCKS = 0x00001000;\r
+\r
+ /**\r
+ * Use this mode to the openConglomerate() call which opens the base\r
+ * table to be used in a index to base row probe. This will cause\r
+ * the openConglomerate() call to not get any row locks as part of\r
+ * it's fetches.\r
+ * It is important when using this mode that the secondary index table be\r
+ * successfully opened before opening the base table so that\r
+ * proper locking protocol is followed.\r
+ **/\r
+ static final int OPENMODE_SECONDARY_LOCKED = 0x00002000;\r
+\r
+ /**\r
+ * Use this mode to the openConglomerate() call used to open the\r
+ * secondary indices of a table for inserting new rows in the table.\r
+ * This will let the secondaryindex know that the base row being inserted\r
+ * has already been locked and only previous key locks need be obtained.\r
+ * \r
+ * It is important when using this mode that the base table be\r
+ * successfully opened before opening the secondaryindex so that\r
+ * proper locking protocol is followed.\r
+ **/\r
+ static final int OPENMODE_BASEROW_INSERT_LOCKED = 0x00004000;\r
+\r
+ /**\r
+ * open table for update, if not specified table will be opened for read.\r
+ **/\r
+ static final int OPENMODE_FORUPDATE = 0x00000004;\r
+\r
+ /**\r
+ * Use this mode to the openConglomerate() call used to just get the\r
+ * table lock on the conglomerate without actually doing anything else.\r
+ * Any operations other than close() performed on the "opened" container\r
+ * will fail.\r
+ **/\r
+ static final int OPENMODE_FOR_LOCK_ONLY = 0x00000040;\r
+\r
+ /**\r
+ * The table lock request will not wait.\r
+ * <p>\r
+ * The request to get the table lock (any table lock including intent or\r
+ * "real" table level lock), will not wait if it can't be granted. A\r
+ * lock timeout will be returned. Note that subsequent row locks will\r
+ * wait if the application has not set a 0 timeout and if the call does\r
+ * not have a wait parameter (like OpenConglomerate.fetch().\r
+ **/\r
+ static final int OPENMODE_LOCK_NOWAIT = 0x00000080;\r
+\r
+ /**\r
+ * Constants used for the countOpen() call.\r
+ **/\r
+ public static final int OPEN_CONGLOMERATE = 0x01;\r
+ public static final int OPEN_SCAN = 0x02;\r
+ public static final int OPEN_CREATED_SORTS = 0x03;\r
+ public static final int OPEN_SORT = 0x04;\r
+ public static final int OPEN_TOTAL = 0x05;\r
+\r
+\r
+ static final byte IS_DEFAULT = (byte) 0x00; // initialize the flag\r
+ static final byte IS_TEMPORARY = (byte) 0x01; // conglom is temporary\r
+ static final byte IS_KEPT = (byte) 0x02; // no auto remove\r
+\r
+\r
+ /**************************************************************************\r
+ * Interfaces previously defined in TcAccessIface:\r
+ **************************************************************************\r
+ */\r
+ \r
+ /**\r
+ * Get reference to access factory which started this transaction.\r
+ * <p>\r
+ *\r
+ * @return The AccessFactory which started this transaction.\r
+ **/\r
+ public AccessFactory getAccessManager();\r
+\r
+ /**\r
+ Check whether a conglomerate exists.\r
+\r
+ @param conglomId The identifier of the conglomerate to check for.\r
+\r
+ @return true if the conglomerate exists, false otherwise.\r
+\r
+ @exception StandardException only thrown if something goes\r
+ wrong in the lower levels.\r
+ **/\r
+ boolean conglomerateExists(long conglomId)\r
+ throws StandardException;\r
+\r
+ /**\r
+ Create a conglomerate.\r
+ <p>\r
+ Currently, only "heap"'s and ""btree secondary index"'s are supported, \r
+ and all the features are not completely implemented. \r
+ For now, create conglomerates like this:\r
+ <p>\r
+ <blockquote><pre>\r
+ TransactionController tc;\r
+ long conglomId = tc.createConglomerate(\r
+ "heap", // we're requesting a heap conglomerate\r
+ template, // a populated template is required for heap and btree.\r
+ null, // no column order\r
+ null, // default collation order for all columns\r
+ null, // default properties\r
+ 0); // not temporary\r
+ </blockquote></pre>\r
+\r
+ Each implementation of a conglomerate takes a possibly different set\r
+ of properties. The "heap" implementation currently takes no properties.\r
+\r
+ The "btree secondary index" requires the following set of properties:\r
+ <UL>\r
+ <LI> "baseConglomerateId" (integer). The conglomerate id of the base\r
+ conglomerate is never actually accessed by the b-tree secondary\r
+ index implementation, it only serves as a namespace for row locks.\r
+ This property is required.\r
+ <LI> "rowLocationColumn" (integer). The zero-based index into the row which\r
+ the b-tree secondary index will assume holds a @see RowLocation of\r
+ the base row in the base conglomerate. This value will be used\r
+ for acquiring locks. In this implementation RowLocationColumn must be \r
+ the last key column.\r
+ This property is required.\r
+ <LI>"allowDuplicates" (boolean). If set to true the table will allow \r
+ rows which are duplicate in key column's 0 through (nUniqueColumns - 1).\r
+ Currently only supports "false".\r
+ This property is optional, defaults to false.\r
+ <LI>"nKeyFields" (integer) Columns 0 through (nKeyFields - 1) will be \r
+ included in key of the conglomerate.\r
+ This implementation requires that "nKeyFields" must be the same as the\r
+ number of fields in the conglomerate, including the rowLocationColumn.\r
+ Other implementations may relax this restriction to allow non-key fields\r
+ in the index.\r
+ This property is required.\r
+ <LI>"nUniqueColumns" (integer) Columns 0 through "nUniqueColumns" will be \r
+ used to check for uniqueness. So for a standard SQL non-unique index \r
+ implementation set "nUniqueColumns" to the same value as "nKeyFields"; and\r
+ for a unique index set "nUniqueColumns" to "nKeyFields - 1 (ie. don't \r
+ include the rowLocationColumn in the uniqueness check).\r
+ This property is required.\r
+ <LI>"maintainParentLinks" (boolean)\r
+ Whether the b-tree pages maintain the page number of their parent. Only\r
+ used for consistency checking. It takes a certain amount more effort to\r
+ maintain these links, but they're really handy for ensuring that the index\r
+ is consistent.\r
+ This property is optional, defaults to true.\r
+ </UL>\r
+\r
+ A secondary index i (a, b) on table t (a, b, c) would have rows\r
+ which looked like (a, b, row_location). baseConglomerateId is set to the\r
+ conglomerate id of t. rowLocationColumns is set to 2. allowsDuplicates\r
+ would be set to false. To create a unique\r
+ secondary index set uniquenessColumns to 2, this means that the btree\r
+ code will compare the key values but not the row id when determing\r
+ uniqueness. To create a nonunique secondary index set uniquenessColumns\r
+ to 3, this would mean that the uniqueness test would include the row\r
+ location and since all row locations will be unique all rows inserted\r
+ into the index will be differentiated (at least) by row location.\r
+\r
+ @return The identifier to be used to open the conglomerate later.\r
+\r
+ @param implementation Specifies what kind of conglomerate to create.\r
+ THE WAY THAT THE IMPLEMENTATION IS CHOSEN STILL NEEDS SOME WORK.\r
+ For now, use "BTREE" or "heap" for a local access manager.\r
+\r
+ @param template A row which describes the prototypical\r
+ row that the conglomerate will be holding.\r
+ Typically this row gives the conglomerate\r
+ information about the number and type of\r
+ columns it will be holding. The implementation\r
+ may require a specific subclass of row type.\r
+ Note that the createConglomerate call reads the template and makes a copy\r
+ of any necessary information from the template, no reference to the\r
+ template is kept (and thus this template can be re-used in subsequent\r
+ calls - such as openScan()). This field is required when creating either\r
+ a heap or btree conglomerate.\r
+\r
+ @param columnOrder Specifies the colummns sort order.\r
+ Useful only when the conglomerate is of type BTREE, default\r
+ value is 'null', which means all columns needs to be sorted in \r
+ Ascending order.\r
+\r
+ @param collationIds Specifies the collation id of each of the columns\r
+ in the new conglomerate. Collation id along with format id may be used\r
+ to create DataValueDescriptor's which may subsequently be used for\r
+ comparisons. For instance the correct collation specific order and\r
+ searching is maintained by correctly specifying the collation id of \r
+ the columns in the index when the index is created.\r
+\r
+\r
+ @param properties Implementation-specific properties of the\r
+ conglomerate. \r
+\r
+ @param temporaryFlag\r
+ Where temporaryFlag can have the following values:\r
+ IS_DEFAULT - no bit is set.\r
+ IS_TEMPORARY - if set, the conglomerate is temporary\r
+ IS_KEPT - only looked at if IS_TEMPORARY,\r
+ if set, the temporary container is not\r
+ removed automatically by store when\r
+ transaction terminates.\r
+\r
+ If IS_TEMPORARY is set, the conglomerate is temporary.\r
+ Temporary conglomerates are only visible through the transaction\r
+ controller that created them. Otherwise, they are opened,\r
+ scanned, and dropped in the same way as permanent conglomerates.\r
+ Changes to temporary conglomerates persist across commits, but\r
+ temporary conglomerates are truncated on abort (or rollback\r
+ to savepoint). Updates to temporary conglomerates are not \r
+ locked or logged.\r
+\r
+ A temporary conglomerate is only visible to the transaction\r
+ controller that created it, even if the conglomerate IS_KEPT\r
+ when the transaction termination.\r
+\r
+ All temporary conglomerate is removed by store when the\r
+ conglomerate controller is destroyed, or if it is dropped by an explicit\r
+ dropConglomerate. If Derby reboots, all temporary\r
+ conglomerates are removed.\r
+\r
+ @exception StandardException if the conglomerate could\r
+ not be created for some reason.\r
+ **/\r
+ long createConglomerate(\r
+ String implementation,\r
+ DataValueDescriptor[] template,\r
+ ColumnOrdering[] columnOrder,\r
+ int[] collationIds,\r
+ Properties properties,\r
+ int temporaryFlag)\r
+ throws StandardException;\r
+\r
+ /**\r
+ Create a conglomerate and load (filled) it with rows that comes from the\r
+ row source without loggging. \r
+\r
+ <p>Individual rows that are loaded into the conglomerate are not\r
+ logged. After this operation, the underlying database must be backed up\r
+ with a database backup rather than an transaction log backup (when we have\r
+ them). This warning is put here for the benefit of future generation.\r
+\r
+ <p>\r
+ This function behaves the same as @see createConglomerate except it also\r
+ populates the conglomerate with rows from the row source and the rows that\r
+ are inserted are not logged.\r
+\r
+ @param implementation Specifies what kind of conglomerate to create.\r
+ THE WAY THAT THE IMPLEMENTATION IS CHOSEN STILL NEEDS SOME WORK.\r
+ For now, use "BTREE" or "heap" for a local access manager.\r
+\r
+ @param template A row which describes the prototypical\r
+ row that the conglomerate will be holding.\r
+ Typically this row gives the conglomerate\r
+ information about the number and type of\r
+ columns it will be holding. The implementation\r
+ may require a specific subclass of row type.\r
+ Note that the createConglomerate call reads the template and makes a copy\r
+ of any necessary information from the template, no reference to the\r
+ template is kept (and thus this template can be re-used in subsequent\r
+ calls - such as openScan()). This field is required when creating either\r
+ a heap or btree conglomerate.\r
+\r
+ @param columnOrder Specifies the colummns sort order.\r
+ Useful only when the conglomerate is of type BTREE, default\r
+ value is 'null', which means all columns needs to be sorted in \r
+ Ascending order.\r
+\r
+ @param collationIds Specifies the collation id of each of the columns\r
+ in the new conglomerate. Collation id along with format id may be used\r
+ to create DataValueDescriptor's which may subsequently be used for\r
+ comparisons. For instance the correct collation specific order and\r
+ searching is maintained by correctly specifying the collation id of \r
+ the columns in the index when the index is created.\r
+\r
+ @param properties Implementation-specific properties of the\r
+ conglomerate. \r
+\r
+ @param rowSource the interface to recieve rows to load into the\r
+ conglomerate. \r
+\r
+ @param rowCount - if not null the number of rows loaded into the table\r
+ will be returned as the first element of the array.\r
+\r
+ @exception StandardException if the conglomerate could not be created or\r
+ loaded for some reason. Throws \r
+ SQLState.STORE_CONGLOMERATE_DUPLICATE_KEY_EXCEPTION if\r
+ the conglomerate supports uniqueness checks and has been created to\r
+ disallow duplicates, and one of the rows being loaded had key columns which\r
+ were duplicate of a row already in the conglomerate.\r
+ **/\r
+ long createAndLoadConglomerate(\r
+ String implementation,\r
+ DataValueDescriptor[] template,\r
+ ColumnOrdering[] columnOrder,\r
+ int[] collationIds,\r
+ Properties properties,\r
+ int temporaryFlag,\r
+ RowLocationRetRowSource rowSource,\r
+ long[] rowCount)\r
+ throws StandardException;\r
+\r
+ /**\r
+ Recreate a conglomerate and possibly load it with new rows that come from\r
+ the new row source.\r
+\r
+ <p>\r
+ This function behaves the same as @see createConglomerate except it also\r
+ populates the conglomerate with rows from the row source and the rows that\r
+ are inserted are not logged.\r
+\r
+ <p>Individual rows that are loaded into the conglomerate are not\r
+ logged. After this operation, the underlying database must be backed up\r
+ with a database backup rather than an transaction log backup (when we have\r
+ them). This warning is put here for the benefit of future generation.\r
+\r
+ @param implementation Specifies what kind of conglomerate to create.\r
+ THE WAY THAT THE IMPLEMENTATION IS CHOSEN STILL NEEDS SOME WORK.\r
+ For now, use "BTREE" or "heap" for a local access manager.\r
+\r
+ @param recreate_ifempty If false, and the rowsource used to load the new\r
+ conglomerate returns no rows, then the original\r
+ conglomid will be returned. To the client it will\r
+ be as if no call was made. Underlying \r
+ implementations may actually create and drop a \r
+ container.\r
+ If true, then a new empty container will be \r
+ created and it's conglomid will be returned.\r
+\r
+ @param template A row which describes the prototypical\r
+ row that the conglomerate will be holding.\r
+ Typically this row gives the conglomerate\r
+ information about the number and type of\r
+ columns it will be holding. The implementation\r
+ may require a specific subclass of row type.\r
+ Note that the createConglomerate call reads the template and makes a copy\r
+ of any necessary information from the template, no reference to the\r
+ template is kept (and thus this template can be re-used in subsequent\r
+ calls - such as openScan()). This field is required when creating either\r
+ a heap or btree conglomerate.\r
+\r
+ @param columnOrder Specifies the colummns sort order.\r
+ Useful only when the conglomerate is of type BTREE, default\r
+ value is 'null', which means all columns needs to be sorted in \r
+ Ascending order.\r
+\r
+ @param collationIds Specifies the collation id of each of the columns\r
+ in the new conglomerate. Collation id along with format id may be used\r
+ to create DataValueDescriptor's which may subsequently be used for\r
+ comparisons. For instance the correct collation specific order and\r
+ searching is maintained by correctly specifying the collation id of \r
+ the columns in the index when the index is created.\r
+\r
+ @param properties Implementation-specific properties of the conglomerate. \r
+\r
+ @param temporaryFlag If true, the conglomerate is temporary.\r
+ Temporary conglomerates are only visible through the transaction\r
+ controller that created them. Otherwise, they are opened,\r
+ scanned, and dropped in the same way as permanent conglomerates.\r
+ Changes to temporary conglomerates persist across commits, but\r
+ temporary conglomerates are truncated on abort (or rollback\r
+ to savepoint). Updates to temporary conglomerates are not \r
+ locked or logged.\r
+\r
+ @param orig_conglomId The conglomid of the original conglomerate.\r
+\r
+ @param rowSource interface to receive rows to load into the conglomerate. \r
+\r
+ @param rowCount - if not null the number of rows loaded into the table\r
+ will be returned as the first element of the array.\r
+\r
+ @exception StandardException if the conglomerate could not be created or\r
+ loaded for some reason. Throws \r
+ SQLState.STORE_CONGLOMERATE_DUPLICATE_KEY_EXCEPTION if\r
+ the conglomerate supports uniqueness checks and has been created to\r
+ disallow duplicates, and one of the rows being loaded had key columns which\r
+ were duplicate of a row already in the conglomerate.\r
+ **/\r
+ long recreateAndLoadConglomerate(\r
+ String implementation,\r
+ boolean recreate_ifempty,\r
+ DataValueDescriptor[] template,\r
+ ColumnOrdering[] columnOrder,\r
+ int[] collationIds,\r
+ Properties properties,\r
+ int temporaryFlag,\r
+ long orig_conglomId,\r
+ RowLocationRetRowSource rowSource,\r
+ long[] rowCount\r
+ )\r
+ throws StandardException;\r
+\r
+ /**\r
+ Add a column to a conglomerate. \r
+ \r
+ The Storage system will block this action until it can get an exclusive\r
+ container level lock on the conglomerate. The conglomerate must not be\r
+ open in the current transaction, this means that within the current \r
+ transaction there must be no open ConglomerateController's or \r
+ ScanControllers. It may not be possible in some implementations of the\r
+ system to catch this error in the store, so it is up to the caller to \r
+ insure this.\r
+\r
+ The column can only be added at the spot just after the current set of\r
+ columns. \r
+\r
+ The template_column must be nullable. \r
+ \r
+ After this call has been made, all fetches of this column from rows that\r
+ existed in the table prior to this call will return "null".\r
+\r
+ @param conglomId The identifier of the conglomerate to alter.\r
+ @param column_id The column number to add this column at.\r
+ @param template_column An instance of the column to be added to table.\r
+ @param collation_id Collation id of the added column.\r
+\r
+ @exception StandardException Only some types of conglomerates can support\r
+ adding a column, for instance "heap" conglomerates support adding a \r
+ column while "btree" conglomerates do not. If the column can not be\r
+ added an exception will be thrown.\r
+ **/\r
+ public void addColumnToConglomerate(\r
+ long conglomId, \r
+ int column_id, \r
+ Storable template_column,\r
+ int collation_id)\r
+ throws StandardException;\r
+\r
+\r
+ /**\r
+ Drop a conglomerate. The conglomerate must not be open in\r
+ the current transaction. This also means that there must\r
+ not be any active scans on it.\r
+\r
+ @param conglomId The identifier of the conglomerate to drop.\r
+\r
+ @exception StandardException if the conglomerate could not be\r
+ dropped for some reason.\r
+ **/\r
+ void dropConglomerate(long conglomId)\r
+ throws StandardException;\r
+\r
+ /**\r
+ * For debugging, find the conglomid given the containerid.\r
+ * <p>\r
+ *\r
+ * @return the conglomid, which contains the container with containerid.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ long findConglomid(long containerid)\r
+ throws StandardException;\r
+\r
+ /**\r
+ * For debugging, find the containerid given the conglomid.\r
+ * <p>\r
+ * Will have to change if we ever have more than one container in \r
+ * a conglomerate.\r
+ *\r
+ * @return the containerid of container implementing conglomerate with \r
+ * "conglomid."\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ long findContainerid(long conglomid)\r
+ throws StandardException;\r
+\r
+ /**\r
+ * Get an nested user transaction.\r
+ * <p>\r
+ * A nested user transaction can be used exactly as any other \r
+ * TransactionController, except as follows. For this discussion let the \r
+ * parent transaction be the transaction used to make the \r
+ * startNestedUserTransaction() call, and let the child transaction be the\r
+ * transaction returned by the startNestedUserTransaction() call.\r
+ * <p>\r
+ * A parent transaction can nest a single readonly transaction\r
+ * and a single separate read/write transaction. \r
+ * If a subsequent nested transaction creation is attempted\r
+ * against the parent prior to destroying an existing\r
+ * nested user transaction of the same type, an exception will be thrown. \r
+ * <p>\r
+ * The nesting is limited to one level deep. An exception will be thrown\r
+ * if a subsequent getNestedUserTransaction() is called on the child\r
+ * transaction.\r
+ * <p>\r
+ * The locks in the child transaction of a readOnly nested user transaction\r
+ * will be compatible with the locks of the parent transaction. The\r
+ * locks in the child transaction of a non-readOnly nested user transaction\r
+ * will NOT be compatible with those of the parent transaction - this is\r
+ * necessary for correct recovery behavior.\r
+ * <p>\r
+ * A commit in the child transaction will release locks associated with\r
+ * the child transaction only, work can continue in the parent transaction\r
+ * at this point. \r
+ * <p>\r
+ * Any abort of the child transaction will result in an abort of both\r
+ * the child transaction and parent transaction, either initiated by\r
+ * an explict abort() call or by an exception that results in an abort.\r
+ * <p>\r
+ * A TransactionController.destroy() call should be made on the child\r
+ * transaction once all child work is done, and the caller wishes to \r
+ * continue work in the parent transaction.\r
+ * <p>\r
+ * AccessFactory.getTransaction() will always return the "parent" \r
+ * transaction, never the child transaction. Thus clients using \r
+ * nested user transactions must keep track of the transaction, as there\r
+ * is no interface to query the storage system to get the current\r
+ * child transaction. The idea is that a nested user transaction should\r
+ * be used to for a limited amount of work, committed, and then work\r
+ * continues in the parent transaction.\r
+ * <p>\r
+ * Nested User transactions are meant to be used to implement \r
+ * system work necessary to commit as part of implementing a user's\r
+ * request, but where holding the lock for the duration of the user\r
+ * transaction is not acceptable. 2 examples of this are system catalog\r
+ * read locks accumulated while compiling a plan, and auto-increment.\r
+ * <p>\r
+ * Once the first write of a non-readOnly nested transaction is done,\r
+ * then the nested user transaction must be committed or aborted before\r
+ * any write operation is attempted in the parent transaction. \r
+ *\r
+ * @param readOnly Is transaction readonly? Only 1 non-readonly nested\r
+ * transaction is allowed per transaction.\r
+ *\r
+ * @return The new nested user transaction.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public TransactionController startNestedUserTransaction(boolean readOnly)\r
+ throws StandardException;\r
+\r
+ /**\r
+ * A superset of properties that "users" can specify.\r
+ * <p>\r
+ * A superset of properties that "users" (ie. from sql) can specify. Store\r
+ * may implement other properties which should not be specified by users.\r
+ * Layers above access may implement properties which are not known at\r
+ * all to Access.\r
+ * <p>\r
+ * This list is a superset, as some properties may not be implemented by\r
+ * certain types of conglomerates. For instant an in-memory store may not\r
+ * implement a pageSize property. Or some conglomerates may not support\r
+ * pre-allocation.\r
+ * <p>\r
+ * This interface is meant to be used by the SQL parser to do validation\r
+ * of properties passsed to the create table statement, and also by the\r
+ * various user interfaces which present table information back to the \r
+ * user.\r
+ * <p>\r
+ * Currently this routine returns the following list:\r
+ * derby.storage.initialPages\r
+ * derby.storage.minimumRecordSize\r
+ * derby.storage.pageReservedSpace\r
+ * derby.storage.pageSize\r
+ *\r
+ * @return The superset of properties that "users" can specify.\r
+ *\r
+ **/\r
+ Properties getUserCreateConglomPropList();\r
+\r
+ /**\r
+ * Open a conglomerate for use. \r
+ * <p>\r
+ * The lock level indicates the minimum lock level to get locks at, the\r
+ * underlying conglomerate implementation may actually lock at a higher\r
+ * level (ie. caller may request MODE_RECORD, but the table may be locked\r
+ * at MODE_TABLE instead).\r
+ * <p>\r
+ * The close method is on the ConglomerateController interface.\r
+ *\r
+ * @return a ConglomerateController to manipulate the conglomerate.\r
+ *\r
+ * @param conglomId The identifier of the conglomerate to open.\r
+ *\r
+ * @param hold If true, will be maintained open over commits.\r
+ *\r
+ * @param open_mode Specifiy flags to control opening of table. \r
+ * OPENMODE_FORUPDATE - if set open the table for\r
+ * update otherwise open table shared.\r
+ *\r
+ * @param lock_level One of (MODE_TABLE, MODE_RECORD).\r
+ *\r
+ * @param isolation_level The isolation level to lock the conglomerate at.\r
+ * One of (ISOLATION_READ_COMMITTED, \r
+ * ISOLATION_REPEATABLE_READ or \r
+ * ISOLATION_SERIALIZABLE).\r
+ *\r
+ * @exception StandardException if the conglomerate could not be opened \r
+ * for some reason. Throws \r
+ * SQLState.STORE_CONGLOMERATE_DOES_NOT_EXIST\r
+ * if the conglomId being requested does not\r
+ * exist for some reason (ie. someone has \r
+ * dropped it).\r
+ **/\r
+ ConglomerateController openConglomerate(\r
+ long conglomId, \r
+ boolean hold,\r
+ int open_mode,\r
+ int lock_level,\r
+ int isolation_level)\r
+ throws StandardException;\r
+\r
+ /**\r
+ * Open a conglomerate for use, optionally include "compiled" info. \r
+ * <p>\r
+ * Same as openConglomerate(), except that one can optionally provide\r
+ * "compiled" static_info and/or dynamic_info. This compiled information\r
+ * must have be gotten from getDynamicCompiledConglomInfo() and/or\r
+ * getStaticCompiledConglomInfo() calls on the same conglomid being opened.\r
+ * It is up to caller that "compiled" information is still valid and\r
+ * is appropriately multi-threaded protected.\r
+ * <p>\r
+ *\r
+ * @see TransactionController#openConglomerate\r
+ * @see TransactionController#getDynamicCompiledConglomInfo\r
+ * @see TransactionController#getStaticCompiledConglomInfo\r
+ * @see DynamicCompiledOpenConglomInfo\r
+ * @see StaticCompiledOpenConglomInfo\r
+ *\r
+ * @return The identifier to be used to open the conglomerate later.\r
+ *\r
+ * @param hold If true, will be maintained open over commits.\r
+ * @param open_mode Specifiy flags to control opening of table. \r
+ * @param lock_level One of (MODE_TABLE, MODE_RECORD).\r
+ * @param isolation_level The isolation level to lock the conglomerate at.\r
+ * One of (ISOLATION_READ_COMMITTED, \r
+ * ISOLATION_REPEATABLE_READ or \r
+ * ISOLATION_SERIALIZABLE).\r
+ * @param static_info object returned from \r
+ * getStaticCompiledConglomInfo() call on this id.\r
+ * @param dynamic_info object returned from\r
+ * getDynamicCompiledConglomInfo() call on this id.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ ConglomerateController openCompiledConglomerate(\r
+ boolean hold,\r
+ int open_mode,\r
+ int lock_level,\r
+ int isolation_level,\r
+ StaticCompiledOpenConglomInfo static_info,\r
+ DynamicCompiledOpenConglomInfo dynamic_info)\r
+ throws StandardException;\r
+\r
+\r
+ /**\r
+ * Create a HashSet which contains all rows that qualify for the \r
+ * described scan.\r
+ * <p>\r
+ * All parameters shared between openScan() and this routine are \r
+ * interpreted exactly the same. Logically this routine calls\r
+ * openScan() with the passed in set of parameters, and then places\r
+ * all returned rows into a newly created HashSet and returns, actual\r
+ * implementations will likely perform better than actually calling\r
+ * openScan() and doing this. For documentation of the openScan \r
+ * parameters see openScan().\r
+ * <p>\r
+ *\r
+ * @return the BackingStoreHashtable which was created.\r
+ *\r
+ * @param conglomId see openScan()\r
+ * @param open_mode see openScan()\r
+ * @param lock_level see openScan()\r
+ * @param isolation_level see openScan()\r
+ * @param scanColumnList see openScan()\r
+ * @param startKeyValue see openScan()\r
+ * @param startSearchOperator see openScan()\r
+ * @param qualifier see openScan()\r
+ * @param stopKeyValue see openScan()\r
+ * @param stopSearchOperator see openScan()\r
+ *\r
+ * @param max_rowcnt The maximum number of rows to insert into \r
+ * the HashSet. Pass in -1 if there is no \r
+ * maximum.\r
+ * @param key_column_numbers The column numbers of the columns in the\r
+ * scan result row to be the key to the \r
+ * Hashtable. "0" is the first column in the \r
+ * scan result row (which may be different \r
+ * than the first row in the table of the \r
+ * scan).\r
+ * @param remove_duplicates Should the HashSet automatically remove\r
+ * duplicates, or should it create the Vector \r
+ * of duplicates?\r
+ * @param estimated_rowcnt The number of rows that the caller \r
+ * estimates will be inserted into the sort. \r
+ * -1 indicates that the caller has no idea.\r
+ * Used by the sort to make good choices about\r
+ * in-memory vs. external sorting, and to size\r
+ * merge runs.\r
+ * @param max_inmemory_rowcnt The number of rows at which the underlying\r
+ * Hashtable implementation should cut over\r
+ * from an in-memory hash to a disk based\r
+ * access method.\r
+ * @param initialCapacity If not "-1" used to initialize the java\r
+ * Hashtable.\r
+ * @param loadFactor If not "-1" used to initialize the java\r
+ * Hashtable.\r
+ * @param collect_runtimestats If true will collect up runtime stats during\r
+ * scan processing for retrieval by\r
+ * BackingStoreHashtable.getRuntimeStats().\r
+ * @param skipNullKeyColumns Whether or not to skip rows with 1 or more null key columns\r
+ *\r
+ * @param keepAfterCommit If true then the hash table is kept after a\r
+ * commit\r
+ * @see BackingStoreHashtable\r
+ * @see TransactionController#openScan\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ BackingStoreHashtable createBackingStoreHashtableFromScan(\r
+ long conglomId,\r
+ int open_mode,\r
+ int lock_level,\r
+ int isolation_level,\r
+ FormatableBitSet scanColumnList,\r
+ DataValueDescriptor[] startKeyValue,\r
+ int startSearchOperator,\r
+ Qualifier qualifier[][],\r
+ DataValueDescriptor[] stopKeyValue,\r
+ int stopSearchOperator,\r
+ long max_rowcnt,\r
+ int[] key_column_numbers,\r
+ boolean remove_duplicates,\r
+ long estimated_rowcnt,\r
+ long max_inmemory_rowcnt,\r
+ int initialCapacity,\r
+ float loadFactor,\r
+ boolean collect_runtimestats,\r
+ boolean skipNullKeyColumns,\r
+ boolean keepAfterCommit)\r
+ throws StandardException;\r
+\r
+\r
+ /**\r
+ Open a scan on a conglomerate. The scan will return all\r
+ rows in the conglomerate which are between the\r
+ positions defined by {startKeyValue, startSearchOperator} and\r
+ {stopKeyValue, stopSearchOperator}, which also match the qualifier.\r
+ <P>\r
+ The way that starting and stopping keys and operators are used\r
+ may best be described by example. Say there's an ordered conglomerate\r
+ with two columns, where the 0-th column is named 'x', and the 1st\r
+ column is named 'y'. The values of the columns are as follows:\r
+ <blockquote><pre>\r
+ x: 1 3 4 4 4 5 5 5 6 7 9\r
+ y: 1 1 2 4 6 2 4 6 1 1 1\r
+ </blockquote></pre>\r
+ <P>\r
+ A {start key, search op} pair of {{5.2}, GE} would position on\r
+ {x=5, y=2}, whereas the pair {{5}, GT} would position on {x=6, y=1}.\r
+ <P>\r
+ Partial keys are used to implement partial key scans in SQL.\r
+ For example, the SQL "select * from t where x = 5" would\r
+ open a scan on the conglomerate (or a useful index) of t\r
+ using a starting position partial key of {{5}, GE} and\r
+ a stopping position partial key of {{5}, GT}.\r
+ <P>\r
+ Some more examples:\r
+ <p>\r
+ <blockquote><pre>\r
+ +-------------------+------------+-----------+--------------+--------------+\r
+ | predicate | start key | stop key | rows | rows locked |\r
+ | | value | op | value |op | returned |serialization |\r
+ +-------------------+-------+----+-------+---+--------------+--------------+\r
+ | x = 5 | {5} | GE | {5} |GT |{5,2} .. {5,6}|{4,6} .. {5,6}|\r
+ | x > 5 | {5} | GT | null | |{6,1} .. {9,1}|{5,6} .. {9,1}|\r
+ | x >= 5 | {5} | GE | null | |{5,2} .. {9,1}|{4,6} .. {9,1}|\r
+ | x <= 5 | null | | {5} |GT |{1,1} .. {5,6}|first .. {5,6}|\r
+ | x < 5 | null | | {5} |GE |{1,1} .. {4,6}|first .. {4,6}|\r
+ | x >= 5 and x <= 7 | {5}, | GE | {7} |GT |{5,2} .. {7,1}|{4,6} .. {7,1}|\r
+ | x = 5 and y > 2 | {5,2} | GT | {5} |GT |{5,4} .. {5,6}|{5,2} .. {5,6}|\r
+ | x = 5 and y >= 2 | {5,2} | GE | {5} |GT |{5,2} .. {5,6}|{4,6} .. {5,6}|\r
+ | x = 5 and y < 5 | {5} | GE | {5,5} |GE |{5,2} .. {5,4}|{4,6} .. {5,4}|\r
+ | x = 2 | {2} | GE | {2} |GT | none |{1,1} .. {1,1}|\r
+ +-------------------+-------+----+-------+---+--------------+--------------+\r
+ </blockquote></pre>\r
+ <P>\r
+ As the above table implies, the underlying scan may lock\r
+ more rows than it returns in order to guarantee serialization.\r
+ <P>\r
+ For each row which meets the start and stop position, as described above\r
+ the row is "qualified" to see whether it should be returned. The\r
+ qualification is a 2 dimensional array of @see Qualifiers, which represents\r
+ the qualification in conjunctive normal form (CNF). Conjunctive normal\r
+ form is an "and'd" set of "or'd" Qualifiers.\r
+ <P>\r
+ For example x = 5 would be represented is pseudo code as:\r
+ \r
+ qualifier_cnf[][] = new Qualifier[1];\r
+ qualifier_cnf[0] = new Qualifier[1];\r
+\r
+ qualifier_cnr[0][0] = new Qualifer(x = 5)\r
+\r
+ <P>\r
+ For example (x = 5) or (y = 6) would be represented is pseudo code as:\r
+\r
+ qualifier_cnf[][] = new Qualifier[1];\r
+ qualifier_cnf[0] = new Qualifier[2];\r
+\r
+ qualifier_cnr[0][0] = new Qualifer(x = 5)\r
+ qualifier_cnr[0][1] = new Qualifer(y = 6)\r
+\r
+ <P>\r
+ For example ((x = 5) or (x = 6)) and ((y = 1) or (y = 2)) would be \r
+ represented is pseudo code as:\r
+\r
+ qualifier_cnf[][] = new Qualifier[2];\r
+ qualifier_cnf[0] = new Qualifier[2];\r
+\r
+ qualifier_cnr[0][0] = new Qualifer(x = 5)\r
+ qualifier_cnr[0][1] = new Qualifer(x = 6)\r
+\r
+ qualifier_cnr[0][0] = new Qualifer(y = 5)\r
+ qualifier_cnr[0][1] = new Qualifer(y = 6)\r
+\r
+ <P>\r
+ For each row the CNF qualfier is processed and it is determined whether\r
+ or not the row should be returned to the caller.\r
+\r
+ The following pseudo-code describes how this is done:\r
+\r
+ <blockquote><pre>\r
+ if (qualifier != null)\r
+ {\r
+ <blockquote><pre>\r
+ for (int and_clause; and_clause < qualifier.length; and_clause++)\r
+ {\r
+ boolean or_qualifies = false;\r
+\r
+ for (int or_clause; or_clause < qualifier[and_clause].length; or_clause++)\r
+ {\r
+ <blockquote><pre>\r
+ DataValueDescriptor key = \r
+ qualifier[and_clause][or_clause].getOrderable();\r
+\r
+ DataValueDescriptor row_col = \r
+ get row column[qualifier[and_clause][or_clause].getColumnId()];\r
+\r
+ boolean or_qualifies = \r
+ row_col.compare(qualifier[i].getOperator,\r
+ <blockquote><pre>\r
+ key,\r
+ qualifier[i].getOrderedNulls,\r
+ qualifier[i].getUnknownRV);\r
+ </blockquote></pre>\r
+\r
+ if (or_qualifies)\r
+ {\r
+ break;\r
+ }\r
+ }\r
+\r
+ if (!or_qualifies)\r
+ {\r
+ <blockquote><pre>\r
+ don't return this row to the client - proceed to next row;\r
+ </blockquote></pre>\r
+ }\r
+ </blockquote></pre>\r
+\r
+ }\r
+ </blockquote></pre>\r
+ }\r
+ </blockquote></pre>\r
+\r
+\r
+ @param conglomId The identifier of the conglomerate\r
+ to open the scan for.\r
+\r
+ @param hold If true, this scan will be maintained open over\r
+ commits.\r
+\r
+ @param open_mode Specifiy flags to control opening of table. \r
+ OPENMODE_FORUPDATE - if set open the table for\r
+ update otherwise open table shared.\r
+\r
+ @param lock_level One of (MODE_TABLE, MODE_RECORD).\r
+\r
+ @param isolation_level The isolation level to lock the conglomerate at.\r
+ One of (ISOLATION_READ_COMMITTED, \r
+ ISOLATION_REPEATABLE_READ or \r
+ ISOLATION_SERIALIZABLE).\r
+\r
+ @param scanColumnList A description of which columns to return from \r
+ every fetch in the scan. template, and scanColumnList\r
+ work together to describe the row to be returned by the scan - see RowUtil\r
+ for description of how these three parameters work together to describe\r
+ a "row".\r
+\r
+ @param startKeyValue An indexable row which holds a \r
+ (partial) key value which, in combination with the\r
+ startSearchOperator, defines the starting position of\r
+ the scan. If null, the starting position of the scan\r
+ is the first row of the conglomerate.\r
+ The startKeyValue must only reference columns included\r
+ in the scanColumnList.\r
+ \r
+ @param startSearchOperator an operator which defines\r
+ how the startKeyValue is to be searched for. If \r
+ startSearchOperation is ScanController.GE, the scan starts on\r
+ the first row which is greater than or equal to the \r
+ startKeyValue. If startSearchOperation is ScanController.GT,\r
+ the scan starts on the first row whose key is greater than\r
+ startKeyValue. The startSearchOperation parameter is \r
+ ignored if the startKeyValue parameter is null.\r
+\r
+ @param qualifier A 2 dimensional array encoding a conjunctive normal\r
+ form (CNF) datastructure of of qualifiers which, applied\r
+ to each key, restrict the rows returned by the scan. Rows\r
+ for which the CNF expression returns false are not\r
+ returned by the scan. If null, all rows are returned.\r
+ Qualifiers can only reference columns which are included in the\r
+ scanColumnList. The column id that a qualifier returns is the\r
+ column id the table, not the column id in the partial row being\r
+ returned.\r
+\r
+ For detailed description of 2-dimensional array passing @see Qualifier\r
+\r
+ @param stopKeyValue An indexable row which holds a \r
+ (partial) key value which, in combination with the\r
+ stopSearchOperator, defines the ending position of\r
+ the scan. If null, the ending position of the scan\r
+ is the last row of the conglomerate.\r
+ The stopKeyValue must only reference columns included\r
+ in the scanColumnList.\r
+ \r
+ @param stopSearchOperator an operator which defines\r
+ how the stopKeyValue is used to determine the scan stopping\r
+ position. If stopSearchOperation is ScanController.GE, the scan \r
+ stops just before the first row which is greater than or\r
+ equal to the stopKeyValue. If stopSearchOperation is\r
+ ScanController.GT, the scan stops just before the first row whose\r
+ key is greater than startKeyValue. The stopSearchOperation\r
+ parameter is ignored if the stopKeyValue parameter is null.\r
+\r
+ @exception StandardException if the scan could not be\r
+ opened for some reason. Throws SQLState.STORE_CONGLOMERATE_DOES_NOT_EXIST\r
+ if the conglomId being requested does not exist for some reason (ie. \r
+ someone has dropped it).\r
+\r
+ @see RowUtil\r
+ @see ScanController\r
+ **/\r
+ ScanController openScan(\r
+ long conglomId,\r
+ boolean hold,\r
+ int open_mode,\r
+ int lock_level,\r
+ int isolation_level,\r
+ FormatableBitSet scanColumnList,\r
+ DataValueDescriptor[] startKeyValue,\r
+ int startSearchOperator,\r
+ Qualifier qualifier[][],\r
+ DataValueDescriptor[] stopKeyValue,\r
+ int stopSearchOperator)\r
+ throws StandardException;\r
+\r
+\r
+ /**\r
+ * Open a scan on a conglomerate, optionally providing compiled info.\r
+ * <p>\r
+ * Same as openScan(), except that one can optionally provide\r
+ * "compiled" static_info and/or dynamic_info. This compiled information\r
+ * must have be gotten from getDynamicCompiledConglomInfo() and/or\r
+ * getStaticCompiledConglomInfo() calls on the same conglomid being opened.\r
+ * It is up to caller that "compiled" information is still valid and\r
+ * is appropriately multi-threaded protected.\r
+ * <p>\r
+ *\r
+ * @see TransactionController#openScan\r
+ * @see TransactionController#getDynamicCompiledConglomInfo\r
+ * @see TransactionController#getStaticCompiledConglomInfo\r
+ * @see DynamicCompiledOpenConglomInfo\r
+ * @see StaticCompiledOpenConglomInfo\r
+ *\r
+ * @return The identifier to be used to open the conglomerate later.\r
+ *\r
+ * @param open_mode see openScan()\r
+ * @param lock_level see openScan()\r
+ * @param isolation_level see openScan()\r
+ * @param scanColumnList see openScan()\r
+ * @param startKeyValue see openScan()\r
+ * @param startSearchOperator see openScan()\r
+ * @param qualifier see openScan()\r
+ * @param stopKeyValue see openScan()\r
+ * @param stopSearchOperator see openScan()\r
+ * @param static_info object returned from \r
+ * getStaticCompiledConglomInfo() call on this id.\r
+ * @param dynamic_info object returned from\r
+ * getDynamicCompiledConglomInfo() call on this id.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ ScanController openCompiledScan(\r
+ boolean hold,\r
+ int open_mode,\r
+ int lock_level,\r
+ int isolation_level,\r
+ FormatableBitSet scanColumnList,\r
+ DataValueDescriptor[] startKeyValue,\r
+ int startSearchOperator,\r
+ Qualifier qualifier[][],\r
+ DataValueDescriptor[] stopKeyValue,\r
+ int stopSearchOperator,\r
+ StaticCompiledOpenConglomInfo static_info,\r
+ DynamicCompiledOpenConglomInfo dynamic_info)\r
+ throws StandardException;\r
+\r
+\r
+ /**\r
+ * Open a scan which gets copies of multiple rows at a time.\r
+ * <p>\r
+ * All inputs work exactly as in openScan(). The return is \r
+ * a GroupFetchScanController, which only allows fetches of groups\r
+ * of rows from the conglomerate.\r
+ * <p>\r
+ *\r
+ * @return The GroupFetchScanController to be used to fetch the rows.\r
+ *\r
+ * @param conglomId see openScan()\r
+ * @param open_mode see openScan()\r
+ * @param lock_level see openScan()\r
+ * @param isolation_level see openScan()\r
+ * @param scanColumnList see openScan()\r
+ * @param startKeyValue see openScan()\r
+ * @param startSearchOperator see openScan()\r
+ * @param qualifier see openScan()\r
+ * @param stopKeyValue see openScan()\r
+ * @param stopSearchOperator see openScan()\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ *\r
+ * @see ScanController\r
+ * @see GroupFetchScanController\r
+ **/\r
+ GroupFetchScanController openGroupFetchScan(\r
+ long conglomId,\r
+ boolean hold,\r
+ int open_mode,\r
+ int lock_level,\r
+ int isolation_level,\r
+ FormatableBitSet scanColumnList,\r
+ DataValueDescriptor[] startKeyValue,\r
+ int startSearchOperator,\r
+ Qualifier qualifier[][],\r
+ DataValueDescriptor[] stopKeyValue,\r
+ int stopSearchOperator)\r
+ throws StandardException;\r
+\r
+ /**\r
+ * Compress table in place.\r
+ * <p>\r
+ * Returns a GroupFetchScanController which can be used to move rows\r
+ * around in a table, creating a block of free pages at the end of the\r
+ * table. The process will move rows from the end of the table toward\r
+ * the beginning. The GroupFetchScanController will return the \r
+ * old row location, the new row location, and the actual data of any\r
+ * row moved. Note that this scan only returns moved rows, not an\r
+ * entire set of rows, the scan is designed specifically to be\r
+ * used by either explicit user call of the SYSCS_ONLINE_COMPRESS_TABLE()\r
+ * procedure, or internal background calls to compress the table.\r
+ *\r
+ * The old and new row locations are returned so that the caller can\r
+ * update any indexes necessary.\r
+ *\r
+ * This scan always returns all collumns of the row.\r
+ * \r
+ * All inputs work exactly as in openScan(). The return is \r
+ * a GroupFetchScanController, which only allows fetches of groups\r
+ * of rows from the conglomerate.\r
+ * <p>\r
+ *\r
+ * @return The GroupFetchScanController to be used to fetch the rows.\r
+ *\r
+ * @param conglomId see openScan()\r
+ * @param hold see openScan()\r
+ * @param open_mode see openScan()\r
+ * @param lock_level see openScan()\r
+ * @param isolation_level see openScan()\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ *\r
+ * @see ScanController\r
+ * @see GroupFetchScanController\r
+ **/\r
+ GroupFetchScanController defragmentConglomerate(\r
+ long conglomId,\r
+ boolean online,\r
+ boolean hold,\r
+ int open_mode,\r
+ int lock_level,\r
+ int isolation_level)\r
+ throws StandardException;\r
+\r
+ /**\r
+ * Purge all committed deleted rows from the conglomerate.\r
+ * <p>\r
+ * This call will purge committed deleted rows from the conglomerate,\r
+ * that space will be available for future inserts into the conglomerate.\r
+ * <p>\r
+ *\r
+ * @param conglomId Id of the conglomerate to purge.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ void purgeConglomerate(long conglomId)\r
+ throws StandardException;\r
+\r
+ /**\r
+ * Return free space from the conglomerate back to the OS.\r
+ * <p>\r
+ * Returns free space from the conglomerate back to the OS. Currently\r
+ * only the sequential free pages at the "end" of the conglomerate can\r
+ * be returned to the OS.\r
+ * <p>\r
+ *\r
+ * @param conglomId Id of the conglomerate to purge.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ void compressConglomerate(long conglomId)\r
+ throws StandardException;\r
+\r
+\r
+ /**\r
+ * Retrieve the maximum value row in an ordered conglomerate.\r
+ * <p>\r
+ * Returns true and fetches the rightmost non-null row of an ordered \r
+ * conglomerate into "fetchRow" if there is at least one non-null row in \r
+ * the conglomerate. If there are no non-null rows in the conglomerate it \r
+ * returns false. Any row with\r
+ * a first column with a Null is considered a "null" row.\r
+ * <p>\r
+ * Non-ordered conglomerates will not implement this interface, calls\r
+ * will generate a StandardException.\r
+ * <p>\r
+ * RESOLVE - this interface is temporary, long term equivalent (and more) \r
+ * functionality will be provided by the openBackwardScan() interface. \r
+ * <p>\r
+ * ISOLATION_SERIALIZABLE and MODE_RECORD locking for btree max:\r
+ * The "BTREE" implementation will at the very least get a shared row lock\r
+ * on the max key row and the key previous to the max. \r
+ * This will be the case where the max row exists in the rightmost page of\r
+ * the btree. These locks won't be released. If the row does not exist in\r
+ * the last page of the btree then a scan of the entire btree will be\r
+ * performed, locks acquired in this scan will not be released.\r
+ * <p>\r
+ * Note that under ISOLATION_READ_COMMITTED, all locks on the table\r
+ * are released before returning from this call.\r
+ *\r
+ * @param conglomId The identifier of the conglomerate\r
+ * to open the scan for.\r
+ *\r
+ * @param open_mode Specifiy flags to control opening of table. \r
+ * OPENMODE_FORUPDATE - if set open the table for\r
+ * update otherwise open table shared.\r
+ * @param lock_level One of (MODE_TABLE, MODE_RECORD).\r
+ *\r
+ * @param isolation_level The isolation level to lock the conglomerate at.\r
+ * One of (ISOLATION_READ_COMMITTED, \r
+ * ISOLATION_REPEATABLE_READ or \r
+ * ISOLATION_SERIALIZABLE).\r
+ *\r
+ * @param scanColumnList A description of which columns to return from \r
+ * every fetch in the scan. template, and \r
+ * scanColumnList work together\r
+ * to describe the row to be returned by the scan - \r
+ * see RowUtil for description of how these three \r
+ * parameters work together to describe a "row".\r
+ *\r
+ * @param fetchRow The row to retrieve the maximum value into.\r
+ *\r
+ * @return boolean indicating if a row was found and retrieved or not.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ boolean fetchMaxOnBtree(\r
+ long conglomId,\r
+ int open_mode,\r
+ int lock_level,\r
+ int isolation_level,\r
+ FormatableBitSet scanColumnList,\r
+ DataValueDescriptor[] fetchRow)\r
+ throws StandardException;\r
+\r
+\r
+ /**\r
+ * Return an open StoreCostController for the given conglomid.\r
+ * <p>\r
+ * Return an open StoreCostController which can be used to ask about \r
+ * the estimated row counts and costs of ScanController and \r
+ * ConglomerateController operations, on the given conglomerate.\r
+ * <p>\r
+ *\r
+ * @return The open StoreCostController.\r
+ *\r
+ * @param conglomId The identifier of the conglomerate to open.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ *\r
+ * @see StoreCostController\r
+ **/\r
+ StoreCostController openStoreCost(\r
+ long conglomId)\r
+ throws StandardException;\r
+\r
+\r
+ /**\r
+ * Report on the number of open conglomerates in the transaction.\r
+ * <p>\r
+ * There are 4 types of open "conglomerates" that can be tracked, those\r
+ * opened by each of the following: openConglomerate(), openScan(), \r
+ * createSort(), and openSort(). Scans opened by openSortScan() are \r
+ * tracked the same as those opened by openScan(). This routine can be\r
+ * used to either report on the number of all opens, or may be used to\r
+ * track one particular type of open.\r
+ * <p>\r
+ * This routine is expected to be used for debugging only. An \r
+ * implementation may only track this info under SanityManager.DEBUG mode.\r
+ * If the implementation does not track the info it will return -1 (so\r
+ * code using this call to verify that no congloms are open should check\r
+ * for return <= 0 rather than == 0).\r
+ * <p>\r
+ * The return value depends on the "which_to_count" parameter as follows:\r
+ * <UL>\r
+ * <LI>\r
+ * OPEN_CONGLOMERATE - return # of openConglomerate() calls not close()'d.\r
+ * <LI>\r
+ * OPEN_SCAN - return # of openScan() + openSortScan() calls not\r
+ * close()'d.\r
+ * <LI>\r
+ * OPEN_CREATED_SORTS - return # of sorts created (createSort()) in \r
+ * current xact. There is currently no way to get\r
+ * rid of these sorts before end of transaction.\r
+ * <LI>\r
+ * OPEN_SORT - return # of openSort() calls not close()'d.\r
+ * <LI>\r
+ * OPEN_TOTAL - return total # of all above calls not close()'d.\r
+ * </UL>\r
+ * - note an implementation may return -1 if it does not track the\r
+ * above information.\r
+ * <p>\r
+ * @return The nunber of open's of a type indicated by "which_to_count"\r
+ * parameter.\r
+ *\r
+ * @param which_to_count Which kind of open to report on.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public int countOpens(int which_to_count)\r
+ throws StandardException;\r
+\r
+\r
+ /**\r
+ * Return a string with debug information about opened congloms/scans/sorts.\r
+ * <p>\r
+ * Return a string with debugging information about current opened\r
+ * congloms/scans/sorts which have not been close()'d.\r
+ * Calls to this routine are only valid under code which is conditional\r
+ * on SanityManager.DEBUG.\r
+ * <p>\r
+ *\r
+ * @return String with debugging information.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public String debugOpened() throws StandardException;\r
+\r
+\r
+ /**\r
+ Get an object to handle non-transactional files.\r
+ */\r
+ public FileResource getFileHandler();\r
+\r
+ /**\r
+ * Return an object that when used as the compatibility space for a lock\r
+ * request, <strong>and</strong> the group object is the one returned by a\r
+ * call to <code>getOwner()</code> on that object, guarantees that the lock\r
+ * will be removed on a commit or an abort.\r
+ */\r
+ public CompatibilitySpace getLockSpace();\r
+\r
+ /**\r
+ * Return static information about the conglomerate to be included in a\r
+ * a compiled plan.\r
+ * <p>\r
+ * The static info would be valid until any ddl was executed on the \r
+ * conglomid, and would be up to the caller to throw away when that \r
+ * happened. This ties in with what language already does for other \r
+ * invalidation of static info. The type of info in this would be \r
+ * containerid and array of format id's from which templates can be created.\r
+ * The info in this object is read only and can be shared among as many \r
+ * threads as necessary.\r
+ * <p>\r
+ *\r
+ * @return The static compiled information.\r
+ *\r
+ * @param conglomId The identifier of the conglomerate to open.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public StaticCompiledOpenConglomInfo getStaticCompiledConglomInfo(\r
+ long conglomId)\r
+ throws StandardException;\r
+\r
+ /**\r
+ * Return dynamic information about the conglomerate to be dynamically \r
+ * reused in repeated execution of a statement.\r
+ * <p>\r
+ * The dynamic info is a set of variables to be used in a given \r
+ * ScanController or ConglomerateController. It can only be used in one \r
+ * controller at a time. It is up to the caller to insure the correct \r
+ * thread access to this info. The type of info in this is a scratch \r
+ * template for btree traversal, other scratch variables for qualifier \r
+ * evaluation, ...\r
+ * <p>\r
+ *\r
+ * @return The dynamic information.\r
+ *\r
+ * @param conglomId The identifier of the conglomerate to open.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public DynamicCompiledOpenConglomInfo getDynamicCompiledConglomInfo(\r
+ long conglomId)\r
+ throws StandardException;\r
+\r
+ /**************************************************************************\r
+ * Interfaces previously defined in TcCacheStatIface:\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ Get cache statistics for the specified cache\r
+ */\r
+ public long[] getCacheStats(String cacheName);\r
+\r
+ /**\r
+ Reset the cache statistics for the specified cache\r
+ */\r
+ public void resetCacheStats(String cacheName);\r
+\r
+\r
+ /**************************************************************************\r
+ * Interfaces previously defined in TcLogIface:\r
+ **************************************************************************\r
+ */\r
+ /**\r
+ Log an operation and then action it in the context of this\r
+ transaction.\r
+\r
+ <P>This simply passes the operation to the RawStore which logs and\r
+ does it.\r
+ \r
+\r
+ @param operation the operation that is to be applied\r
+\r
+ @see org.apache.derby.iapi.store.raw.Loggable\r
+ @see org.apache.derby.iapi.store.raw.Transaction#logAndDo\r
+ @exception StandardException Standard Derby exception policy\r
+ **/\r
+ public void logAndDo(Loggable operation) throws StandardException;\r
+\r
+\r
+ /**************************************************************************\r
+ * Interfaces previously defined in TcSortIface:\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ Create a sort. Rows are inserted into the sort with a\r
+ sort controller, and subsequently retrieved with a\r
+ sort scan controller. The rows come out in the order\r
+ specified by the parameters.\r
+ <p>\r
+ Sorts also do aggregation. The input (unaggregated) rows\r
+ have the same format as the aggregated rows, and the\r
+ aggregate results are part of the both rows. The sorter,\r
+ when it notices that a row is a duplicate of another,\r
+ calls a user-supplied aggregation method (see interface\r
+ Aggregator), passing it both rows. One row is known as\r
+ the 'addend' and the other the 'accumulator'. The\r
+ aggregation method is assumed to merge the addend\r
+ into the accumulator. The sort then discards the addend\r
+ row.\r
+ <p>\r
+ So, for the query:\r
+ <pre><blockquote>\r
+ select a, sum(b)\r
+ from t\r
+ group by a\r
+ </blockquote></pre>\r
+ The input row to the sorter would have one column for\r
+ a and another column for sum(b). It is up to the caller\r
+ to get the format of the row correct, and to initialize\r
+ the aggregate values correctly (null for most aggregates,\r
+ 0 for count).\r
+ <p>\r
+ Nulls are always considered to be ordered in a sort, that is,\r
+ null compares equal to null, and less than anything else.\r
+\r
+ @param implParameters Properties which help in choosing\r
+ implementation-specific sort options. If null, a\r
+ "generally useful" sort will be used.\r
+ \r
+ @param template A row which is prototypical for the sort.\r
+ All rows inserted into the sort controller must have \r
+ exactly the same number of columns as the template row.\r
+ Every column in an inserted row must have the same type\r
+ as the corresponding column in the template.\r
+\r
+ @param columnOrdering An array which specifies which columns\r
+ participate in ordering - see interface ColumnOrdering for\r
+ details. The column referenced in the 0th columnOrdering\r
+ object is compared first, then the 1st, etc. To sort on a single\r
+ column specify an array with a single entry.\r
+\r
+ @param sortObserver An object that is used to observe\r
+ the sort. It is used to provide a callback into the sorter.\r
+ If the sortObserver is null, then the sort proceeds as normal.\r
+ If the sortObserver is non null, then it is called as \r
+ rows are loaded into the sorter. It can be used to implement\r
+ a distinct sort, aggregates, etc.\r
+\r
+ @param alreadyInOrder Indicates that the rows inserted into\r
+ the sort controller will already be in order. This is used\r
+ to perform aggregation only.\r
+\r
+ @param estimatedRows The number of rows that the caller \r
+ estimates will be inserted into the sort. -1 indicates that\r
+ the caller has no idea. Used by the sort to make good choices\r
+ about in-memory vs. external sorting, and to size merge runs.\r
+\r
+ @param estimatedRowSize The estimated average row size of the\r
+ rows being sorted. This is the client portion of the rowsize, it should\r
+ not attempt to calculate Store's overhead. -1 indicates that the caller\r
+ has no idea (and the sorter will use 100 bytes in that case. Used by the \r
+ sort to make good choices about in-memory vs. external sorting, and to size\r
+ merge runs. The client is not expected to estimate the per column/\r
+ per row overhead of raw store, just to make a guess about the storage\r
+ associated with each row (ie. reasonable estimates for some implementations\r
+ would be 4 for int, 8 for long, 102 for char(100),\r
+ 202 for varchar(200), a number out of hat for user types, ...).\r
+\r
+ @return The sort identifier which can be used subsequently to\r
+ open sort controllers and scans.\r
+ \r
+ @see SortObserver\r
+ @see ColumnOrdering\r
+ @see ScanController\r
+ @see SortController\r
+\r
+ @exception StandardException From a lower-level exception.\r
+ **/\r
+ long createSort(\r
+ Properties implParameters,\r
+ DataValueDescriptor[] template,\r
+ ColumnOrdering columnOrdering[],\r
+ SortObserver sortObserver,\r
+ boolean alreadyInOrder,\r
+ long estimatedRows,\r
+ int estimatedRowSize)\r
+ throws StandardException;\r
+ /**\r
+ Drop a sort. \r
+ <p>\r
+ Drop a sort created by a call to createSort() within the current \r
+ transaction (sorts are automatically "dropped" at the end of a \r
+ transaction. This call should only be made after all openSortScan()'s\r
+ and openSort()'s have been closed.\r
+\r
+ @param sortid The identifier of the sort to drop, as returned from \r
+ createSort.\r
+ <p>\r
+ @exception StandardException From a lower-level exception.\r
+ **/\r
+ void dropSort(long sortid) throws StandardException;\r
+\r
+ /**\r
+ Open a sort controller for a sort previously created in this\r
+ transaction. Sort controllers are used to insert rows into\r
+ the sort.\r
+ <p>\r
+ There may (in the future) be multiple sort inserters\r
+ for a given sort, the idea being that the various threads of\r
+ a parallel query plan can all insert into the sort. For now,\r
+ however, only a single sort controller per sort is supported.\r
+\r
+ @param id The identifier of the sort to open, as returned from\r
+ createSort.\r
+\r
+ @return A sort controller to use for inserting.\r
+\r
+ @exception StandardException From a lower-level exception.\r
+ **/\r
+ \r
+ SortController openSort(long id)\r
+ throws StandardException;\r
+\r
+ /**\r
+ * Return an open SortCostController.\r
+ * <p>\r
+ * Return an open SortCostController which can be used to ask about \r
+ * the estimated costs of SortController() operations.\r
+ * <p>\r
+ * @param implParameters Properties which help in choosing \r
+ * implementation-specific sort options. If null, a\r
+ * "generally useful" sort will be used.\r
+ *\r
+ * @return The open StoreCostController.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ *\r
+ * @see StoreCostController\r
+ **/\r
+ SortCostController openSortCostController(\r
+ Properties implParameters)\r
+ throws StandardException;\r
+\r
+ /**\r
+ Open a scan for retrieving rows from a sort. Returns a RowSource for\r
+ retrieving rows from the sort.\r
+\r
+ @param id The identifier of the sort to scan, as returned\r
+ from createSort.\r
+\r
+ @return The RowSource\r
+\r
+ @exception StandardException From a lower-level exception.\r
+ **/\r
+ RowLocationRetRowSource openSortRowSource(long id)\r
+ throws StandardException;\r
+\r
+ /**\r
+ Open a scan for retrieving rows from a sort. Returns a\r
+ scan controller for retrieving rows from the sort (NOTE:\r
+ the only legal methods to use on the returned sort controller\r
+ are next() and fetch() - probably there should be scan\r
+ controllers and updatable scan controllers).\r
+ <p>\r
+ In the future, multiple sort scans on the same sort will\r
+ be supported (for parallel execution across a uniqueness\r
+ sort in which the order of the resulting rows is not\r
+ important). Currently, only a single sort scan is allowed\r
+ per sort.\r
+ <p>\r
+ In the future, it will be possible to open a sort scan\r
+ and start retrieving rows before the last row is inserted.\r
+ The sort controller would block till rows were available\r
+ to return. Currently, an attempt to retrieve a row before\r
+ the sort controller is closed will cause an exception.\r
+\r
+ @param id The identifier of the sort to scan, as returned from createSort.\r
+ @param hold If true, this scan will be maintained open over commits.\r
+\r
+ @return The sort controller.\r
+\r
+ @exception StandardException From a lower-level exception.\r
+ **/\r
+\r
+ ScanController openSortScan(\r
+ long id,\r
+ boolean hold)\r
+ throws StandardException;\r
+\r
+\r
+ /**************************************************************************\r
+ * Interfaces previously defined in TcTransactionIface:\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ Return true if any transaction is blocked (even if not by this one).\r
+\r
+ */\r
+ public boolean anyoneBlocked();\r
+\r
+ /**\r
+ Abort all changes made by this transaction since the last commit, abort\r
+ or the point the transaction was started, whichever is the most recent.\r
+ All savepoints within this transaction are released, and all resources\r
+ are released (held or non-held).\r
+\r
+ @exception StandardException Only exceptions with severities greater than\r
+ ExceptionSeverity.TRANSACTION_SEVERITY will be thrown.\r
+ **/\r
+ public void abort()\r
+ throws StandardException;\r
+\r
+ /**\r
+ Commit this transaction. All savepoints within this transaction are \r
+ released. All non-held conglomerates and scans are closed.\r
+\r
+ @exception StandardException Only exceptions with severities greater than\r
+ ExceptionSeverity.TRANSACTION_SEVERITY will be thrown.\r
+ If an exception is thrown, the transaction will not (necessarily) have \r
+ been aborted. The standard error handling mechanism is expected to do the \r
+ appropriate cleanup. In other words, if commit() encounters an error, the \r
+ exception is propagated up to the the standard exception handler, which \r
+ initiates cleanupOnError() processing, which will eventually abort the \r
+ transaction.\r
+ **/\r
+ public void commit()\r
+ throws StandardException;\r
+\r
+ /**\r
+ "Commit" this transaction without sync'ing the log. Everything else is\r
+ identical to commit(), use this at your own risk.\r
+\r
+ <BR>bits in the commitflag can turn on to fine tuned the "commit":\r
+ KEEP_LOCKS - no locks will be released by the \r
+ commit and no post commit processing \r
+ will be initiated. If, for some \r
+ reasons, the locks cannot be kept \r
+ even if this flag is set, then the \r
+ commit will sync the log, i.e., it \r
+ will revert to the normal commit.\r
+\r
+ READONLY_TRANSACTION_INITIALIZATION - Special case used for processing\r
+ while creating the transaction. \r
+ Should only be used by the system\r
+ while creating the transaction to\r
+ commit readonly work that may have\r
+ been done using the transaction\r
+ while getting it setup to be used\r
+ by the user. In the future we should\r
+ instead use a separate tranaction to\r
+ do this initialization. Will fail\r
+ if called on a transaction which\r
+ has done any updates.\r
+ @see TransactionController#commit\r
+\r
+ @exception StandardException Only exceptions with severities greater than\r
+ ExceptionSeverity.TRANSACTION_SEVERITY will be thrown.\r
+ If an exception is thrown, the transaction will not (necessarily) have \r
+ been aborted. The standard error handling mechanism is expected to do the \r
+ appropriate cleanup. In other words, if commit() encounters an error, the \r
+ exception is propagated up to the the standard exception handler, which \r
+ initiates cleanupOnError() processing, which will eventually abort the \r
+ transaction.\r
+ **/\r
+ public DatabaseInstant commitNoSync(int commitflag)\r
+ throws StandardException;\r
+\r
+ public final int RELEASE_LOCKS = 0x1;\r
+ public final int KEEP_LOCKS = 0x2;\r
+ public final int READONLY_TRANSACTION_INITIALIZATION = 0x4;\r
+\r
+ /**\r
+ Abort the current transaction and pop the context.\r
+ **/\r
+ public void destroy();\r
+\r
+ /**\r
+ * Get the context manager that the transaction was created with.\r
+ * <p>\r
+ *\r
+ * @return The context manager that the transaction was created with.\r
+ *\r
+ **/\r
+ public ContextManager getContextManager();\r
+\r
+ /**\r
+ * Get string id of the transaction.\r
+ * <p>\r
+ * This transaction "name" will be the same id which is returned in\r
+ * the TransactionInfo information, used by the lock and transaction\r
+ * vti's to identify transactions.\r
+ * <p>\r
+ * Although implementation specific, the transaction id is usually a number\r
+ * which is bumped every time a commit or abort is issued.\r
+ * <p>\r
+ *\r
+ * @return The a string which identifies the transaction. \r
+ **/\r
+ public String getTransactionIdString();\r
+\r
+ /**\r
+ * Get string id of the transaction that would be when the Transaction\r
+ * is IN active state. This method increments the Tx id of current Tx\r
+ * object if it is in idle state. \r
+ * Note: Use this method only getTransactionIdString() is not suitable.\r
+ * @return The string which identifies the transaction. \r
+ **/\r
+ public String getActiveStateTxIdString();\r
+ \r
+\r
+ /**\r
+ * Reveals whether the transaction has ever read or written data.\r
+ *\r
+ * @return true If the transaction has never read or written data.\r
+ **/\r
+ boolean isIdle();\r
+\r
+ /**\r
+ * Reveals whether the transaction is a global or local transaction.\r
+ *\r
+ * @return true If the transaction was either started by \r
+ * AccessFactory.startXATransaction() or was morphed to a global\r
+ * transaction by calling createXATransactionFromLocalTransaction().\r
+ * \r
+ * @see AccessFactory#startXATransaction\r
+ * @see TransactionController#createXATransactionFromLocalTransaction\r
+ *\r
+ **/\r
+ boolean isGlobal();\r
+\r
+ /**\r
+ * Reveals whether the transaction is read only.\r
+ *\r
+ * @return true If the transaction is read only to this point.\r
+ *\r
+ **/\r
+ boolean isPristine();\r
+\r
+ /**\r
+ Release the save point of the given name. Releasing a savepoint removes all\r
+ knowledge from this transaction of the named savepoint and any savepoints\r
+ set since the named savepoint was set.\r
+\r
+ @param name The user provided name of the savepoint, set by the user\r
+ in the setSavePoint() call.\r
+ @param kindOfSavepoint A NULL value means it is an internal savepoint (ie not a user defined savepoint)\r
+ Non NULL value means it is a user defined savepoint which can be a SQL savepoint or a JDBC savepoint\r
+ A String value for kindOfSavepoint would mean it is SQL savepoint\r
+ A JDBC Savepoint object value for kindOfSavepoint would mean it is JDBC savepoint\r
+ @return returns savepoint position in the stack.\r
+\r
+ @exception StandardException Standard Derby exception policy. A \r
+ statement level exception is thrown if\r
+ no savepoint exists with the given name.\r
+ **/\r
+ public int releaseSavePoint(String name, Object kindOfSavepoint) throws StandardException;\r
+\r
+ /**\r
+ Rollback all changes made since the named savepoint was set. The named\r
+ savepoint is not released, it remains valid within this transaction, and\r
+ thus can be named it future rollbackToSavePoint() calls. Any savepoints\r
+ set since this named savepoint are released (and their changes rolled back).\r
+ <p>\r
+ if "close_controllers" is true then all conglomerates and scans are closed\r
+ (held or non-held). \r
+ <p>\r
+ If "close_controllers" is false then no cleanup is done by the \r
+ TransactionController. It is then the responsibility of the caller to\r
+ close all resources that may have been affected by the statements \r
+ backed out by the call. This option is meant to be used by the Language\r
+ implementation of statement level backout, where the system "knows" what\r
+ could be affected by the scope of the statements executed within the \r
+ statement.\r
+ <p>\r
+\r
+ @param name The identifier of the SavePoint to roll back to.\r
+ @param close_controllers boolean indicating whether or not the controller \r
+ should close open controllers.\r
+ @param kindOfSavepoint A NULL value means it is an internal savepoint (ie not a user defined savepoint)\r
+ Non NULL value means it is a user defined savepoint which can be a SQL savepoint or a JDBC savepoint\r
+ A String value for kindOfSavepoint would mean it is SQL savepoint\r
+ A JDBC Savepoint object value for kindOfSavepoint would mean it is JDBC savepoint\r
+ @return returns savepoint position in the stack.\r
+\r
+ @exception StandardException Standard Derby exception policy. A \r
+ statement level exception is thrown if\r
+ no savepoint exists with the given name.\r
+ **/\r
+ public int rollbackToSavePoint(\r
+ String name,\r
+ boolean close_controllers, Object kindOfSavepoint)\r
+ throws StandardException;\r
+\r
+\r
+ /**\r
+ Set a save point in the current transaction. A save point defines a point in\r
+ time in the transaction that changes can be rolled back to. Savepoints\r
+ can be nested and they behave like a stack. Setting save points "one" and\r
+ "two" and the rolling back "one" will rollback all the changes made since\r
+ "one" (including those made since "two") and release savepoint "two".\r
+\r
+ @param name The user provided name of the savepoint.\r
+ @param kindOfSavepoint A NULL value means it is an internal savepoint (ie not a user defined savepoint)\r
+ Non NULL value means it is a user defined savepoint which can be a SQL savepoint or a JDBC savepoint\r
+ A String value for kindOfSavepoint would mean it is SQL savepoint\r
+ A JDBC Savepoint object value for kindOfSavepoint would mean it is JDBC savepoint\r
+ @return returns savepoint position in the stack.\r
+\r
+ @exception StandardException Standard Derby exception policy. A \r
+ statement level exception is thrown if\r
+ no savepoint exists with the given name.\r
+ **/\r
+ public int setSavePoint(String name, Object kindOfSavepoint) throws StandardException;\r
+\r
+ /**\r
+ * Convert a local transaction to a global transaction.\r
+ * <p>\r
+ * Get a transaction controller with which to manipulate data within\r
+ * the access manager. Tbis controller allows one to manipulate a\r
+ * global XA conforming transaction.\r
+ * <p>\r
+ * Must only be called a previous local transaction was created and exists\r
+ * in the context. Can only be called if the current transaction is in\r
+ * the idle state. Upon return from this call the old tc will be unusable,\r
+ * and all references to it should be dropped (it will have been implicitly\r
+ * destroy()'d by this call.\r
+ * <p>\r
+ * The (format_id, global_id, branch_id) triplet is meant to come exactly\r
+ * from a javax.transaction.xa.Xid. We don't use Xid so that the system\r
+ * can be delivered on a non-1.2 vm system and not require the javax classes\r
+ * in the path. \r
+ *\r
+ * @param global_id the global transaction identifier part of XID - ie.\r
+ * Xid.getGlobalTransactionId().\r
+ * @param branch_id The branch qualifier of the Xid - ie. \r
+ * Xid.getBranchQaulifier()\r
+ * \r
+ * @exception StandardException Standard exception policy.\r
+ * @see TransactionController\r
+ **/\r
+ /* XATransactionController */ Object createXATransactionFromLocalTransaction(\r
+ int format_id,\r
+ byte[] global_id,\r
+ byte[] branch_id)\r
+ throws StandardException;\r
+\r
+}\r