--- /dev/null
+/*\r
+\r
+ Derby - Class org.apache.derby.impl.store.raw.data.StoredPage\r
+\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to you under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+\r
+*/\r
+\r
+package org.apache.derby.impl.store.raw.data;\r
+\r
+import org.apache.derby.iapi.reference.SQLState;\r
+\r
+import org.apache.derby.impl.store.raw.data.BasePage;\r
+\r
+import org.apache.derby.impl.store.raw.data.LongColumnException;\r
+import org.apache.derby.impl.store.raw.data.OverflowInputStream;\r
+import org.apache.derby.impl.store.raw.data.PageVersion;\r
+import org.apache.derby.impl.store.raw.data.RecordId;\r
+import org.apache.derby.impl.store.raw.data.RawField;\r
+import org.apache.derby.impl.store.raw.data.ReclaimSpace;\r
+import org.apache.derby.impl.store.raw.data.StoredFieldHeader;\r
+import org.apache.derby.impl.store.raw.data.StoredRecordHeader;\r
+\r
+import org.apache.derby.iapi.services.io.FormatIdUtil;\r
+import org.apache.derby.iapi.services.io.FormatIdInputStream;\r
+import org.apache.derby.iapi.services.io.FormatIdOutputStream;\r
+import org.apache.derby.iapi.services.io.StoredFormatIds;\r
+import org.apache.derby.iapi.services.io.StreamStorable;\r
+import org.apache.derby.iapi.services.io.TypedFormat;\r
+import org.apache.derby.iapi.services.sanity.SanityManager;\r
+\r
+import org.apache.derby.iapi.store.access.conglomerate.LogicalUndo;\r
+import org.apache.derby.iapi.store.access.Qualifier;\r
+import org.apache.derby.iapi.store.access.RowUtil;\r
+\r
+import org.apache.derby.iapi.store.raw.ContainerHandle;\r
+import org.apache.derby.iapi.store.raw.FetchDescriptor;\r
+import org.apache.derby.iapi.store.raw.Page;\r
+import org.apache.derby.iapi.store.raw.PageKey;\r
+import org.apache.derby.iapi.store.raw.PageTimeStamp;\r
+import org.apache.derby.iapi.store.raw.RawStoreFactory;\r
+import org.apache.derby.iapi.store.raw.RecordHandle;\r
+import org.apache.derby.iapi.store.raw.log.LogInstant;\r
+import org.apache.derby.iapi.store.raw.xact.RawTransaction;\r
+\r
+import org.apache.derby.iapi.error.StandardException;\r
+\r
+import org.apache.derby.iapi.types.DataValueDescriptor;\r
+\r
+import org.apache.derby.iapi.types.Orderable;\r
+\r
+import org.apache.derby.iapi.services.io.ArrayInputStream;\r
+import org.apache.derby.iapi.services.io.ArrayOutputStream;\r
+import org.apache.derby.iapi.services.io.FormatableBitSet;\r
+import org.apache.derby.iapi.util.ByteArray;\r
+import org.apache.derby.iapi.services.io.CompressedNumber;\r
+import org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream;\r
+import org.apache.derby.iapi.services.io.DynamicByteArrayOutputStream;\r
+import org.apache.derby.iapi.services.io.LimitObjectInput;\r
+import org.apache.derby.iapi.services.io.ErrorObjectInput;\r
+\r
+\r
+import java.util.zip.CRC32;\r
+\r
+import java.io.IOException;\r
+import java.io.EOFException;\r
+import java.io.Externalizable;\r
+import java.io.InvalidClassException;\r
+\r
+import java.io.ObjectOutput;\r
+import java.io.ObjectInput;\r
+import java.io.DataInput;\r
+import java.io.DataOutput;\r
+import java.io.InputStream;\r
+import java.io.OutputStream;\r
+import java.io.ByteArrayInputStream;\r
+import java.io.ByteArrayOutputStream;\r
+\r
+\r
+/**\r
+ StoredPage is a sub class of CachedPage that stores page data in a\r
+ fixed size byte array and is designed to be written out to a file\r
+ through a DataInput/DataOutput interface. A StoredPage can exist\r
+ in its clean or dirty state without the FileContainer it was created\r
+ from being in memory.\r
+\r
+ <P><B>Page Format</B><BR>\r
+ The page is broken into five sections\r
+ <PRE>\r
+ +----------+-------------+-------------------+-------------------+----------+\r
+ | formatId | page header | records | slot offset table | checksum |\r
+ +----------+-------------+-------------------+-------------------+----------+\r
+ </PRE>\r
+ <BR><B>FormatId</B><BR>\r
+ The formatId is a 4 bytes array, it contains the format Id of this page.\r
+ <BR><B>Page Header</B><BR>\r
+ The page header is a fixed size, 56 bytes \r
+ <PRE>\r
+ 1 byte boolean is page an overflow page\r
+ 1 byte byte page status (a field maintained in base page)\r
+ 8 bytes long pageVersion (a field maintained in base page)\r
+ 2 bytes unsigned short number of slots in slot offset table\r
+ 4 bytes integer next record identifier\r
+ 4 bytes integer generation number of this page (Future Use)\r
+ 4 bytes integer previous generation of this page (Future Use)\r
+ 8 bytes bipLocation the location of the beforeimage page (Future Use)\r
+ 2 bytes unsigned short number of deleted rows on page. (new release 2.0)\r
+ 2 bytes unsigned short % of the page to keep free for updates\r
+ 2 bytes short spare for future use\r
+ 4 bytes long spare for future use (encryption uses to write \r
+ random bytes here).\r
+ 8 bytes long spare for future use\r
+ 8 bytes long spare for future use\r
+\r
+ </PRE>\r
+\r
+ Note that spare space has been guaranteed to be writen with "0", so\r
+ that future use of field should not either not use "0" as a valid data \r
+ item or pick 0 as a valid default value so that on the fly upgrade can \r
+ assume that 0 means field was never assigned.\r
+\r
+ <BR><B>Records</B>\r
+ The records section contains zero or more records, the format of each record\r
+ follows.\r
+ minimumRecordSize is the minimum user record size, excluding the space we\r
+ use for the record header and field headers. When a record is inserted, it\r
+ is stored in a space at least as large as the sum of the minimumRecordSize\r
+ and total header size.\r
+ For example,\r
+ If minimumRecordSize is 10 bytes,\r
+ the user record is 7 bytes,\r
+ we used 5 bytes for record and field headers,\r
+ this record will take (10 + 5) bytes of space, extra 3 bytes is \r
+ put into reserve.\r
+\r
+ If minimumRecordSize is 10 bytes,\r
+ user record is 17 bytes,\r
+ we used 5 bytes for record and field headers,\r
+ this record will take (17 + 5) bytes of space, no reserve space \r
+ here.\r
+\r
+ minimumRecordSize is defined by user on per container basis.\r
+ The default for minimumRecordSize is set to 1.\r
+\r
+ This implementation always keeps occupied bytes at the low end of the record \r
+ section. Thus removing (purging) a record moves all other records down, and\r
+ their slots are also moved down.\r
+ A page has no empty slot (an empty page has no slot)\r
+\r
+ <BR><B>Record & Field Format</B>\r
+\r
+ Record Header format is defined in the StoredRecordHeader class.\r
+ \r
+<PRE> \r
+ <BR><B>Fields</B>\r
+\r
+ 1 byte Boolean - is null, if true no more data follows.\r
+ 4 bytes Integer - length of field that follows (excludes these four bytes).\r
+\r
+ StoredPage will use the static method provided by StoredFieldHeader\r
+ to read/write field status and field data length.\r
+\r
+ Field Header format is defined in the StoredFieldHeader class.\r
+ <data>\r
+\r
+ </PRE>\r
+ <BR><B>Slot Offset Table</B><BR>\r
+ The slot offset table is a table of 6 or 12 bytes per record, depending on\r
+ the pageSize being less or greater than 64K:\r
+ 2 bytes (unsigned short) or 4 bytes (int) page offset for the record that\r
+ is assigned to the slot, and 2 bytes (unsigned short) or 4 bytes (int) \r
+ for the length of the record on this page.\r
+ 2 bytes (unsigned short) or 4 bytes (int) for the length of the reserved \r
+ number of bytes for this record on this page.\r
+ First slot is slot 0. The slot table grows backwards. Slots are never\r
+ left empty.\r
+ <BR><B>Checksum</B><BR>\r
+ 8 bytes of a java.util.zip.CRC32 checksum of the entire's page contents \r
+ without the 8 bytes representing the checksum.\r
+\r
+ <P><B>Page Access</B>\r
+ The page data is accessed in this class by one of three methods.\r
+ <OL>\r
+ <LI>As a byte array using pageData (field in cachedPage). This is the \r
+ fastest.\r
+ <LI>As an ArrayInputStream (rawDataIn) and ArrayOutputStream (rawDataOut),\r
+ this is used to set limits on any one reading the page logically.\r
+ <LI>Logically through rawDataIn (ArrayInputStream) and \r
+ logicalDataOut (FormatIdOutputStream), this provides the methods to write\r
+ logical data (e.g. booleans and integers etc.) and the ObjectInput\r
+ and ObjectOutput interfaces for DataValueDescriptor's. These logical\r
+ streams are constructed using the array streams.\r
+ </OL>\r
+\r
+ @see java.util.zip.CRC32\r
+ @see ArrayInputStream\r
+ @see ArrayOutputStream\r
+ **/\r
+\r
+public class StoredPage extends CachedPage\r
+{ \r
+ /**************************************************************************\r
+ * static final Fields of the class\r
+ **************************************************************************\r
+ */\r
+\r
+ /*\r
+ * typed format\r
+ */\r
+\r
+ public static final int FORMAT_NUMBER = \r
+ StoredFormatIds.RAW_STORE_STORED_PAGE;\r
+\r
+ /**\r
+ * Return my format identifier.\r
+ **/\r
+ public int getTypeFormatId()\r
+ {\r
+ return StoredFormatIds.RAW_STORE_STORED_PAGE;\r
+ }\r
+\r
+\r
+ /**\r
+ * Constants used to find different portions of data on the page. \r
+ * <p>\r
+ * The page is laid out as follows:\r
+ * The page is broken into five sections\r
+ * +----------+-------------+---------+-------------------+----------+\r
+ * | formatId | page header | records | slot offset table | checksum |\r
+ * +----------+-------------+---------+-------------------+----------+\r
+ *\r
+ * offset size section\r
+ * ------ ------------------- --------------------------\r
+ * 0 PAGE_FORMAT_ID_SIZE formatId\r
+ * PAGE_FORMAT_ID_SIZE: PAGE_HEADER_SIZE (56) page header\r
+ * RECORD_SPACE_OFFSET: variable records\r
+ **/\r
+\r
+\r
+ /**\r
+ * Start of page, formatId must fit in 4 bytes.\r
+ * <p>\r
+ * where the page header starts - page format is mandated by cached page\r
+ **/\r
+ protected static final int PAGE_HEADER_OFFSET = PAGE_FORMAT_ID_SIZE;\r
+\r
+\r
+ /**\r
+ * Fixed size of the page header\r
+ **/\r
+ protected static final int PAGE_HEADER_SIZE = 56;\r
+\r
+\r
+ /** \r
+ Start of the record storage area\r
+ */\r
+ /**\r
+ * Start of the record storage area.\r
+ * <p>\r
+ * Note: a subclass may change the start of the record storage area. \r
+ * Don't always count on this number.\r
+ **/\r
+ protected static final int RECORD_SPACE_OFFSET = \r
+ PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE;\r
+\r
+ /**\r
+ * offset of the page version number\r
+ **/\r
+ protected static final int PAGE_VERSION_OFFSET = PAGE_HEADER_OFFSET + 2;\r
+\r
+ /**\r
+ * SMALL_SLOT_SIZE are for pages smaller than 64K,\r
+ * LARGE_SLOT_SIZE is for pages bigger than 64K.\r
+ **/\r
+ protected static final int SMALL_SLOT_SIZE = 2;\r
+ protected static final int LARGE_SLOT_SIZE = 4;\r
+\r
+ /**\r
+ * Size of the checksum stored on the page.\r
+ *\r
+ * The checksum is stored in the last 8 bytes of the page, the slot table\r
+ * grows backward up the page starting at the end of the page just before\r
+ * the checksum.\r
+ **/\r
+ protected static final int CHECKSUM_SIZE = 8;\r
+\r
+ /**\r
+ * OVERFLOW_POINTER_SIZE - Number of bytes to reserve for overflow pointer\r
+ * \r
+ * The overflow pointer is the pointer that the takes the place of the \r
+ * last column of a row if the row can't fit on the page. The pointer\r
+ * then points to another page where the next column of the row can be\r
+ * found. The overflow pointer can be bigger than a row, so when \r
+ * overflowing a row the code must overflow enough columns so that there\r
+ * is enough free space to write the row. Note this means that the\r
+ * minimum space a row can take on a page must allow for at least the\r
+ * size of the overflow pointers so that if the row is updated it can \r
+ * write the over flow pointer.\r
+ *\r
+ **/\r
+ protected static final int OVERFLOW_POINTER_SIZE = 12;\r
+\r
+ /**\r
+ * OVERFLOW_PTR_FIELD_SIZE - Number of bytes of an overflow field\r
+ * \r
+ * This is the length to reserve for either an column or row overflow\r
+ * pointer field. It includes the size of the field header plus the \r
+ * maxium length of the overflow pointer (it could be shorter due to\r
+ * compressed storage).\r
+ *\r
+ * The calcualtion is:\r
+ *\r
+ * OVERFLOW_PTR_FIELD_SIZE = \r
+ * OVERFLOW_POINTER_SIZE + \r
+ * sizeof(status byte) + \r
+ * sizeof(field length field for a field which is just an overflow ptr)\r
+ * \r
+ *\r
+ **/\r
+ protected static final int OVERFLOW_PTR_FIELD_SIZE = \r
+ OVERFLOW_POINTER_SIZE + 1 + 1;\r
+\r
+ /**\r
+ * In memory buffer used as scratch space for streaming columns.\r
+ **/\r
+ ByteHolder bh = null;\r
+\r
+ /**************************************************************************\r
+ * Fields of the class\r
+ **************************************************************************\r
+ */\r
+\r
+\r
+ /**\r
+ * Constants used in call to logColumn.\r
+ * <p>\r
+ * Action taken in this routine is determined by the kind of column as\r
+ * specified in the columnFlag:\r
+ * COLUMN_NONE - the column is insignificant\r
+ * COLUMN_FIRST - this is the first column in a logRow() call\r
+ * COLUMN_LONG - this is a known long column, therefore we will \r
+ * store part of the column on the current page and \r
+ * overflow the rest if necessary.\r
+ **/\r
+ protected static final int COLUMN_NONE = 0;\r
+ protected static final int COLUMN_FIRST = 1;\r
+ protected static final int COLUMN_LONG = 2;\r
+\r
+\r
+ /**\r
+ * maxFieldSize is a worst case calculation for the size of a record\r
+ * on an empty page, with a single field, but still allow room for \r
+ * an overflow pointer if another field is to be added. See initSpace().\r
+ * maxFieldSize is a worst case calculation for the size of a record\r
+ * \r
+ * This is used as the threshold for a long column.\r
+ * \r
+ * maxFieldSize = \r
+ * totalSpace - slotEntrySize - 16 - OVERFLOW_POINTER_SIZE;\r
+ **/\r
+ private int maxFieldSize;\r
+\r
+\r
+ /**\r
+ * The page header is a fixed size, 56 bytes, following are variables used\r
+ * to access the fields in the header:\r
+ * <p>\r
+ * 1 byte boolean isOverflowPage is page an overflow page\r
+ * 1 byte byte pageStatus page status (field in base page)\r
+ * 8 bytes long pageVersion page version (field in base page)\r
+ * 2 bytes ushort slotsInUse number of slots in slot offset table\r
+ * 4 bytes integer nextId next record identifier\r
+ * 4 bytes integer generation generation number of this page(FUTURE USE)\r
+ * 4 bytes integer prevGeneration previous generation of page (FUTURE USE)\r
+ * 8 bytes long bipLocation the location of the BI page (FUTURE USE)\r
+ * 2 bytes ushort deletedRowCount number of deleted rows on page.(rel 2.0)\r
+ * 2 bytes long spare for future use\r
+ * 4 bytes long spare (encryption writes random bytes)\r
+ * 8 bytes long spare for future use\r
+ * 8 bytes long spare for future use\r
+ *\r
+ * Note that spare space has been guaranteed to be writen with "0", so\r
+ * that future use of field should not either not use "0" as a valid data \r
+ * item or pick 0 as a valid default value so that on the fly upgrade can \r
+ * assume that 0 means field was never assigned.\r
+ *\r
+ **/\r
+ private boolean isOverflowPage; // is page an overflow page?\r
+ private int slotsInUse; // number of slots in slot offset table.\r
+ private int nextId; // next record identifier\r
+ private int generation; // (Future Use) generation number of this page\r
+ private int prevGeneration; // (Future Use) previous generation of page\r
+ private long bipLocation; // (Future Use) the location of the BI page\r
+ private int deletedRowCount; // number of deleted rows on page.\r
+\r
+ /**\r
+ * Is the header in the byte array out of date wrt the fields.\r
+ * <p>\r
+ * this field must be set to true whenever one of the above header fields \r
+ * is modified. Ie any of (isOverflowPage, slotsInUse, nextId, generation,\r
+ * prevGeneration, bipLocation, deletedRowCount)\r
+ **/\r
+ private boolean headerOutOfDate;\r
+\r
+ /**\r
+ * holder for the checksum.\r
+ **/\r
+ private CRC32 checksum;\r
+\r
+ /**\r
+ * Minimum space to reserve for record portion length of row.\r
+ * <p>\r
+ * minimumRecordSize is stored in the container handle. It is used to \r
+ * reserved minimum space for recordPortionLength. Default is 1. To\r
+ * get the value from the container handle: \r
+ * myContainer.getMinimumRecordSize();\r
+ *\r
+ * minimumRecordSize is the minimum user record size, excluding the space we\r
+ * use for the record header and field headers. When a record is inserted,\r
+ * it is stored in a space at least as large as the sum of the \r
+ * minimumRecordSize and total header size.\r
+ *\r
+ * For example,\r
+ * If minimumRecordSize is 10 bytes,\r
+ * the user record is 7 bytes,\r
+ * we used 5 bytes for record and field headers,\r
+ * this record will take (10 + 5) bytes of space, extra 3 bytes is \r
+ * put into reserve.\r
+ *\r
+ * If minimumRecordSize is 10 bytes,\r
+ * user record is 17 bytes,\r
+ * we used 5 bytes for record and field headers,\r
+ * this record will take (17 + 5) bytes of space, no reserve space \r
+ * here.\r
+ *\r
+ * minimumRecordSize is defined by user on per container basis.\r
+ * The default for minimumRecordSize is set to 1.\r
+ *\r
+ **/\r
+ protected int minimumRecordSize;\r
+\r
+ /**\r
+ * scratch variable used to keep track of the total user size for the row.\r
+ * the information is used by logRow to maintain minimumRecordSize\r
+ * on Page. minimumRecordSize is only considered for main data pages,\r
+ * therefore, the page must be latched during an insert operation.\r
+ **/\r
+ private int userRowSize;\r
+ \r
+ /**\r
+ * slot field and slot entry size.\r
+ * <p>\r
+ * The size of these fields is dependant on the page size.\r
+ * These 2 variables should be set when pageSize is determined, and should\r
+ * not be changed for that page.\r
+ *\r
+ * Each slot entry contains 3 fields (slotOffet, recordPortionLength and\r
+ * reservedSpace) for the record the slot is pointing to.\r
+ * slotFieldSize is the size for each of the slot field.\r
+ * slotEntrySize is the total space used for a single slot entry.\r
+ **/\r
+ private int slotFieldSize;\r
+ private int slotEntrySize;\r
+\r
+ /**\r
+ * Offset of the first entry in the slot table.\r
+ * <p>\r
+ * Offset table is located at end of page, just before checksum. It\r
+ * grows backward as an array from this point toward the middle of the\r
+ * page.\r
+ * <p>\r
+ * slotTableOffsetToFirstEntry is the offset to the beginning of the\r
+ * first entry (slot[0]) in the slot table. This allows the following\r
+ * math to get to the offset of N'th entry in the slot table:\r
+ *\r
+ * offset of slot[N] = slotTableOffsetToFirstEntry + (N * slotEntrySize)\r
+ **/\r
+ private int slotTableOffsetToFirstEntry;\r
+\r
+ /**\r
+ * Offset of the record length entry in the 1st slot table entry.\r
+ * <p>\r
+ * Offset table is located at end of page, just before checksum. It\r
+ * grows backward as an array from this point toward the middle of the\r
+ * page. The record length is stored as the second "field" of the \r
+ * slot table entry.\r
+ * <p>\r
+ * slotTableOffsetToFirstRecordLengthField is the offset to the beginning \r
+ * of the record length field in the first entry (slot[0]) in the slot \r
+ * table. This allows the following\r
+ * math to get to the record length field of N'th entry in the slot table:\r
+ *\r
+ * offset of record length of slot[N] slot entry = \r
+ * slotTableOffsetToFirstRecordLengthField + (N * slotEntrySize)\r
+ **/\r
+ private int slotTableOffsetToFirstRecordLengthField;\r
+\r
+\r
+ /**\r
+ * Offset of the reserved space length entry in the 1st slot table entry.\r
+ * <p>\r
+ * Offset table is located at end of page, just before checksum. It\r
+ * grows backward as an array from this point toward the middle of the\r
+ * page. The reserved space length is stored as the third "field" of the \r
+ * slot table entry.\r
+ * <p>\r
+ * slotTableOffsetToFirstReservedSpaceField is the offset to the beginning \r
+ * of the reserved space field in the first entry (slot[0]) in the slot \r
+ * table. This allows the following\r
+ * math to get to the reserved space field of N'th entry in the slot table:\r
+ *\r
+ * offset of reserved space of slot[N] slot entry = \r
+ * slotTableOffsetToFirstReservedSpaceField + (N * slotEntrySize)\r
+ **/\r
+ private int slotTableOffsetToFirstReservedSpaceField;\r
+\r
+ /**\r
+ * total usable space on a page.\r
+ * <p>\r
+ * This is the space not taken by page hdr, page table, and existing\r
+ * slot entries/rows.\r
+ **/\r
+ protected int totalSpace; // total usable space on a page\r
+\r
+ // freeSpace and firstFreeByte are initliazed to a minimum value.\r
+ protected int freeSpace = Integer.MIN_VALUE; // free space on the page\r
+ private int firstFreeByte = Integer.MIN_VALUE; // 1st free byte on page\r
+\r
+\r
+ /**\r
+ * % of page to keep free for updates.\r
+ * <p>\r
+ * How much of a head page should be reserved as "free" so that the space\r
+ * can be used by update which expands the row without needing to overflow\r
+ * it. 1 means save 1% of the free space for expansion.\r
+ **/\r
+ protected int spareSpace;\r
+\r
+ /**\r
+ * Scratch variable used when you need a overflowRecordHeader. Declared\r
+ * globally so that object is only allocated once per page.\r
+ **/\r
+ private StoredRecordHeader overflowRecordHeader;\r
+\r
+ /**\r
+ * Input streams used to read/write bytes to/from the page byte array.\r
+ **/\r
+ protected ArrayInputStream rawDataIn;\r
+ protected ArrayOutputStream rawDataOut;\r
+ protected FormatIdOutputStream logicalDataOut;\r
+\r
+\r
+ /**************************************************************************\r
+ * Constructors for This class:\r
+ **************************************************************************\r
+ */\r
+\r
+\r
+ /**\r
+ * Simple no-arg constructor for StoredPage.\r
+ **/\r
+ public StoredPage()\r
+ {\r
+ super();\r
+ }\r
+\r
+ /**************************************************************************\r
+ * Private/Protected methods of This class:\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ * get scratch space for over flow record header.\r
+ * <p>\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ \r
+ private StoredRecordHeader getOverFlowRecordHeader()\r
+ throws StandardException\r
+ {\r
+ return(\r
+ overflowRecordHeader != null ? \r
+ overflowRecordHeader : \r
+ (overflowRecordHeader = new StoredRecordHeader()));\r
+ }\r
+\r
+ /**\r
+ * Initialize the StoredPage.\r
+ * <p>\r
+ * Initialize the object, ie. perform work normally perfomed in constructor.\r
+ * Called by setIdentity() and createIdentity() - the Cacheable interfaces\r
+ * which are used to move a page in/out of cache.\r
+ **/\r
+ protected void initialize()\r
+ {\r
+ super.initialize();\r
+\r
+ if (rawDataIn == null) \r
+ {\r
+ rawDataIn = new ArrayInputStream();\r
+ checksum = new CRC32();\r
+ }\r
+\r
+ if (pageData != null)\r
+ rawDataIn.setData(pageData);\r
+ }\r
+\r
+\r
+ /**\r
+ * Create the output streams.\r
+ * <p>\r
+ * Create the output streams, these are created on demand\r
+ * to avoid creating unrequired objects for pages that are\r
+ * never modified during their lifetime in the cache.\r
+ * <p>\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private void createOutStreams()\r
+ {\r
+ rawDataOut = new ArrayOutputStream();\r
+ rawDataOut.setData(pageData);\r
+\r
+ logicalDataOut = new FormatIdOutputStream(rawDataOut);\r
+ }\r
+\r
+ /**\r
+ * Tie the logical output stream to a passed in OutputStream.\r
+ * <p>\r
+ * Tie the logical output stream to a passed in OutputStream with\r
+ * no limit as to the number of bytes that can be written.\r
+ **/\r
+ private void setOutputStream(OutputStream out)\r
+ {\r
+ if (rawDataOut == null)\r
+ createOutStreams();\r
+\r
+ logicalDataOut.setOutput(out);\r
+ }\r
+\r
+ /**\r
+ * Reset the logical output stream.\r
+ * <p>\r
+ * Reset the logical output stream (logicalDataOut) to be attached\r
+ * to the page array stream as is the norm, no limits are placed \r
+ * on any writes.\r
+ *\r
+ **/\r
+ private void resetOutputStream()\r
+ {\r
+\r
+ logicalDataOut.setOutput(rawDataOut);\r
+ }\r
+\r
+ /**************************************************************************\r
+ * Protected Methods of CachedPage class: (create, read and write a page.)\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ * use this passed in page buffer as this object's page data.\r
+ * <p>\r
+ * The page content may not have been read in from disk yet.\r
+ * For pagesize smaller than 64K:\r
+ * Size of the record offset stored in a slot (unsigned short)\r
+ * Size of the record portion length stored in a slot (unsigned short)\r
+ * Size of the record portion length stored in a slot (unsigned short)\r
+ * For pagesize greater than 64K, but less than 2gig:\r
+ * Size of the record offset stored in a slot (int)\r
+ * Size of the record portion length stored in a slot (int)\r
+ * Size of the record portion length stored in a slot (int)\r
+ * <p>\r
+ *\r
+ * @param pageBuffer The array of bytes to use as the page buffer.\r
+ **/\r
+ protected void usePageBuffer(byte[] pageBuffer)\r
+ {\r
+ pageData = pageBuffer;\r
+\r
+ int pageSize = pageData.length;\r
+ if (rawDataIn != null)\r
+ rawDataIn.setData(pageData);\r
+\r
+ // Note that the slotFieldSize and slotEntrySize need to be\r
+ // calculated BEFORE initSpace() is called, because the\r
+ // maxFieldSize computation in initSpace() includes these\r
+ // values in its calculations. (DERBY-3099)\r
+ slotFieldSize = calculateSlotFieldSize(pageSize);\r
+ slotEntrySize = 3 * slotFieldSize;\r
+\r
+ initSpace();\r
+\r
+ // offset of slot table entry[0]\r
+ slotTableOffsetToFirstEntry = \r
+ (pageSize - CHECKSUM_SIZE - slotEntrySize);\r
+\r
+ // offset of record length field in slot table entry[0]\r
+ slotTableOffsetToFirstRecordLengthField = \r
+ slotTableOffsetToFirstEntry + slotFieldSize;\r
+\r
+ // offset of reserved space field in slot table entry[0]\r
+ slotTableOffsetToFirstReservedSpaceField =\r
+ slotTableOffsetToFirstEntry + (2 * slotFieldSize);\r
+\r
+ if (rawDataOut != null)\r
+ rawDataOut.setData(pageData);\r
+ }\r
+\r
+ /**\r
+ * Calculate the slot field size from the page size.\r
+ *\r
+ * @param pageSize page size in bytes\r
+ * @return slot field size in bytes\r
+ */\r
+ private int calculateSlotFieldSize(int pageSize) {\r
+ if (pageSize < 65536) {\r
+ // slots are 2 bytes (unsigned short data type) for pages <64KB\r
+ return SMALL_SLOT_SIZE;\r
+ } else {\r
+ // slots are 4 bytes (int data type) for pages >=64KB\r
+ return LARGE_SLOT_SIZE;\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Create a new StoredPage.\r
+ * <p>\r
+ * Make this object represent a new page (ie. a page that never existed\r
+ * before, as opposed to reading in an existing page from disk).\r
+ * <p>\r
+ *\r
+ * @param newIdentity The key describing page (segment,container,page).\r
+ * @param args information stored about the page, once in the \r
+ * container header and passed in through the object.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ protected void createPage(PageKey newIdentity, PageCreationArgs args)\r
+ throws StandardException\r
+ {\r
+\r
+ spareSpace = args.spareSpace;\r
+ minimumRecordSize = args.minimumRecordSize;\r
+\r
+ setPageArray(args.pageSize);\r
+\r
+ cleanPage(); // clean up the page array\r
+\r
+ setPageVersion(0); // page is being created for the first time\r
+\r
+ nextId = RecordHandle.FIRST_RECORD_ID; // first record Id\r
+ generation = 0;\r
+ prevGeneration = 0; // there is no previous generation\r
+ bipLocation = 0L;\r
+\r
+ createOutStreams();\r
+ }\r
+\r
+ /**\r
+ * Initialize the page from values in the page buffer.\r
+ * <p>\r
+ * Initialize in memory structure using the buffer in pageData. This\r
+ * is how a StoredPage object is intialized to represent page read in\r
+ * from disk.\r
+ * <p>\r
+ *\r
+ * @param myContainer The container to read the page in from.\r
+ * @param newIdentity The key representing page being read in (segment,\r
+ * container, page number)\r
+ *\r
+ * @exception StandardException If the page cannot be read correctly, \r
+ * or is inconsistent.\r
+ **/\r
+ protected void initFromData(\r
+ FileContainer myContainer, \r
+ PageKey newIdentity)\r
+ throws StandardException \r
+ {\r
+ if (myContainer != null)\r
+ {\r
+ // read in info about page stored once in the container header.\r
+\r
+ spareSpace = myContainer.getSpareSpace();\r
+ minimumRecordSize = myContainer.getMinimumRecordSize();\r
+ }\r
+\r
+ // if it is null, assume spareSpace and minimumRecordSize is the\r
+ // same. We would only call initFromData after a restore then.\r
+\r
+ try \r
+ {\r
+ readPageHeader();\r
+ initSlotTable(newIdentity);\r
+ }\r
+ catch (IOException ioe) \r
+ {\r
+ // i/o methods on the byte array have thrown an IOException\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, newIdentity));\r
+ }\r
+\r
+ try\r
+ {\r
+ validateChecksum(newIdentity);\r
+ }\r
+ catch (StandardException se)\r
+ {\r
+ if (se.getMessageId().equals(SQLState.FILE_BAD_CHECKSUM))\r
+ {\r
+ // it is remotely possible that the disk transfer got garbled, \r
+ // i.e., the page is actually fine on disk but the version we\r
+ // got has some rubbish on it. Double check.\r
+ int pagesize = getPageSize();\r
+ byte[] corruptPage = pageData;\r
+ pageData = null; // clear this\r
+\r
+ // set up the new page array\r
+ setPageArray(pagesize);\r
+\r
+ try \r
+ {\r
+ myContainer.readPage(newIdentity.getPageNumber(), pageData);\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, newIdentity));\r
+ }\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.DEBUG_CLEAR("TEST_BAD_CHECKSUM");\r
+ }\r
+ \r
+ // see if this read confirms the checksum error\r
+ try\r
+ {\r
+ validateChecksum(newIdentity);\r
+ }\r
+ catch (StandardException sse)\r
+ {\r
+ // really bad\r
+ throw dataFactory.markCorrupt(se);\r
+ }\r
+\r
+ // If we got here, this means the first read is bad but the\r
+ // second read is good. This could be due to disk I/O error or\r
+ // a bug in the way the file pointer is mis-managed.\r
+ String firstImage = pagedataToHexDump(corruptPage);\r
+ String secondImage = \r
+ (SanityManager.DEBUG) ? \r
+ toString() : pagedataToHexDump(corruptPage);\r
+\r
+ throw StandardException.newException(\r
+ SQLState.FILE_IO_GARBLED, se,\r
+ newIdentity, firstImage, secondImage);\r
+ }\r
+ else\r
+ {\r
+ throw se;\r
+ }\r
+ }\r
+ \r
+\r
+ }\r
+\r
+ /**\r
+ * Validate the check sum on the page.\r
+ * <p>\r
+ * Compare the check sum stored in the page on disk with the checksum\r
+ * calculated from the bytes on the page.\r
+ * <p>\r
+ *\r
+ * @param id The key that describes the page.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ protected void validateChecksum(PageKey id) \r
+ throws StandardException\r
+ {\r
+ long onDiskChecksum;\r
+\r
+ try \r
+ {\r
+ // read the checksum stored on the page on disk. It is stored\r
+ // in the last "CHECKSUM_SIZE" bytes of the page, and is a long.\r
+\r
+ rawDataIn.setPosition(getPageSize() - CHECKSUM_SIZE);\r
+ onDiskChecksum = rawDataIn.readLong();\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+\r
+ // i/o methods on the byte array have thrown an IOException\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, id));\r
+ }\r
+\r
+ // Force the checksum to be recalculated based on the current page.\r
+ checksum.reset();\r
+ checksum.update(pageData, 0, getPageSize() - CHECKSUM_SIZE);\r
+ \r
+ // force a bad checksum error\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if (SanityManager.DEBUG_ON("TEST_BAD_CHECKSUM"))\r
+ {\r
+ // set on disk checksum to wrong value\r
+ onDiskChecksum = 123456789; \r
+ }\r
+ }\r
+\r
+ if (onDiskChecksum != checksum.getValue())\r
+ {\r
+ // try again using new checksum object to be doubly sure\r
+ CRC32 newChecksum = new CRC32();\r
+ newChecksum.reset();\r
+ newChecksum.update(pageData, 0, getPageSize()-CHECKSUM_SIZE);\r
+ if (onDiskChecksum != newChecksum.getValue())\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.FILE_BAD_CHECKSUM,\r
+ id, \r
+ new Long(checksum.getValue()), \r
+ new Long(onDiskChecksum), \r
+ pagedataToHexDump(pageData));\r
+ }\r
+ else\r
+ {\r
+ // old one is bad, get rid of it\r
+ if (SanityManager.DEBUG)\r
+ SanityManager.THROWASSERT("old checksum gets wrong value");\r
+\r
+ checksum = newChecksum;\r
+ }\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Recalculate checksum and write it to the page array.\r
+ * <p>\r
+ * Recalculate the checksum of the page, and write the result back into\r
+ * the last bytes of the page.\r
+ *\r
+ * @exception IOException if writing to end of array fails.\r
+ **/\r
+ protected void updateChecksum() throws IOException\r
+ {\r
+ checksum.reset();\r
+ checksum.update(pageData, 0, getPageSize() - CHECKSUM_SIZE);\r
+\r
+ rawDataOut.setPosition(getPageSize() - CHECKSUM_SIZE);\r
+ logicalDataOut.writeLong(checksum.getValue());\r
+ }\r
+\r
+ /**\r
+ * Write information about page from variables into page byte array.\r
+ * <p>\r
+ * This routine insures that all information about the page is reflected\r
+ * in the page byte buffer. This involves moving information from local\r
+ * variables into encoded version on the page in page header and checksum.\r
+ * <p>\r
+ *\r
+ * @param identity The key of this page.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ protected void writePage(PageKey identity) \r
+ throws StandardException \r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ // some consistency checks on fields of the page, good to check\r
+ // before we write them into the page.\r
+\r
+ if ((freeSpace < 0) || \r
+ (firstFreeByte + freeSpace) != (getSlotOffset(slotsInUse - 1))) \r
+ {\r
+ // make sure free space is not negative and does not overlap\r
+ // used space.\r
+\r
+ SanityManager.THROWASSERT("slotsInUse = " + slotsInUse\r
+ + ", firstFreeByte = " + firstFreeByte\r
+ + ", freeSpace = " + freeSpace \r
+ + ", slotOffset = " + (getSlotOffset(slotsInUse - 1))\r
+ + ", page = " + this);\r
+ }\r
+\r
+ if ((slotsInUse == 0) &&\r
+ (firstFreeByte != (getPageSize() - totalSpace - CHECKSUM_SIZE))) \r
+ {\r
+ SanityManager.THROWASSERT("slotsInUse = " + slotsInUse\r
+ + ", firstFreeByte = " + firstFreeByte\r
+ + ", freeSpace = " + freeSpace \r
+ + ", slotOffset = " + (getSlotOffset(slotsInUse - 1))\r
+ + ", page = " + this);\r
+ }\r
+\r
+ }\r
+\r
+ try \r
+ {\r
+ if (headerOutOfDate)\r
+ {\r
+ updatePageHeader();\r
+ }\r
+ else\r
+ {\r
+ // page version always need to be updated if page is dirty,\r
+ // either do it in updatePageHeader or by itself\r
+ updatePageVersion();\r
+ }\r
+\r
+ updateChecksum();\r
+\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ // i/o methods on the byte array have thrown an IOException\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, identity));\r
+ }\r
+\r
+ }\r
+\r
+ /**\r
+ * Write out the format id of this page\r
+ *\r
+ * @param identity The key of this page.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ protected void writeFormatId(PageKey identity) throws StandardException\r
+ {\r
+ try\r
+ {\r
+ if (rawDataOut == null)\r
+ createOutStreams();\r
+\r
+ rawDataOut.setPosition(0);\r
+\r
+ FormatIdUtil.writeFormatIdInteger(\r
+ logicalDataOut, getTypeFormatId());\r
+\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ // i/o methods on the byte array have thrown an IOException\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, identity));\r
+ }\r
+ }\r
+\r
+\r
+ /**************************************************************************\r
+ * Protected Methods of Cacheable Interface:\r
+ **************************************************************************\r
+ */\r
+\r
+ /**************************************************************************\r
+ * Protected OverRidden Methods of BasePage:\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ * Ensure that the page is released from the cache when it is unlatched.\r
+ *\r
+ * @see org.apache.derby.impl.store.raw.data.BasePage#releaseExclusive\r
+ *\r
+ **/\r
+ protected void releaseExclusive()\r
+ {\r
+ super.releaseExclusive();\r
+\r
+ pageCache.release(this);\r
+ }\r
+\r
+\r
+ /**\r
+ * Return the total number of bytes used, reserved, or wasted by the\r
+ * record at this slot.\r
+ * <p>\r
+ * The amount of space the record on this slot is currently taking on the \r
+ * page.\r
+ *\r
+ * If there is any reserve space or wasted space, count that in also\r
+ * Do NOT count the slot entry size\r
+ * <p>\r
+ *\r
+ * @return The number of bytes used by the row at slot "slot".\r
+ *\r
+ * @param slot look at row at this slot.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public int getTotalSpace(int slot) \r
+ throws StandardException\r
+ {\r
+ try \r
+ {\r
+ // A slot entry looks like the following:\r
+ // 1st field: offset of the record on the page\r
+ // 2nd field: length of the record on the page\r
+ // 3rd field: amount of space reserved for the record to grow.\r
+\r
+ // position the read at the beginning of the 2nd field.\r
+ rawDataIn.setPosition(getSlotOffset(slot) + slotFieldSize);\r
+\r
+ // return the size of the record + size of the reserved space. \r
+ // the size of the fields to read is determined by slotFieldSize.\r
+\r
+ return(\r
+ ((slotFieldSize == SMALL_SLOT_SIZE) ?\r
+ (rawDataIn.readUnsignedShort() + \r
+ rawDataIn.readUnsignedShort()) :\r
+ (rawDataIn.readInt() + \r
+ rawDataIn.readInt())));\r
+ \r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Is there minimal space for insert?\r
+ * <p>\r
+ * Does quick calculation to see if average size row on this page could\r
+ * be inserted on the page. This is done because the actual row size\r
+ * being inserted isn't known until we actually copy the columns from\r
+ * their object form into their on disk form which is expensive. So\r
+ * we use this calculation so that in the normal case we only do one \r
+ * copy of the row directly onto the page.\r
+ * <p>\r
+ *\r
+ * @return true if we think the page will allow an insert, false otherwise.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public boolean spaceForInsert() \r
+ throws StandardException\r
+ {\r
+ // is this an empty page\r
+ if (slotsInUse == 0)\r
+ return(true);\r
+\r
+ if (!allowInsert())\r
+ return(false);\r
+\r
+ int usedSpace = totalSpace - freeSpace;\r
+ int bytesPerRow = usedSpace / slotsInUse;\r
+\r
+ return(bytesPerRow <= freeSpace);\r
+ }\r
+\r
+ /**\r
+ * Is row guaranteed to be inserted successfully on this page?\r
+ * <p>\r
+ * Return true if this record is guaranteed to be inserted successfully \r
+ * using insert() or insertAtSlot(). This guarantee is only valid while\r
+ * the row remains unchanged and the page latch is held.\r
+ * <p>\r
+ *\r
+ * @return bolean indicating if row can be inserted on this page.\r
+ *\r
+ * @param row The row to check for insert.\r
+ * @param validColumns bit map to interpret valid columns in row.\r
+ * @param overflowThreshold The percentage of the page to use for the\r
+ * insert. 100 means use 100% of the page,\r
+ * 50 means use 50% of page (ie. make sure\r
+ * 2 rows fit per page).\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public boolean spaceForInsert(\r
+ Object[] row, \r
+ FormatableBitSet validColumns, \r
+ int overflowThreshold)\r
+ throws StandardException\r
+ {\r
+\r
+ // is this an empty page\r
+ if (slotsInUse == 0)\r
+ return true;\r
+ \r
+ // does the estimate think it won't fit, if not return false to avoid\r
+ // cost of calling logRow() just to figure out if the row will fit.\r
+ if (!allowInsert())\r
+ return false;\r
+\r
+ DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();\r
+\r
+ try \r
+ {\r
+ // This is a public call, start column is rawstore only. \r
+ // set the starting Column for the row to be 0.\r
+ logRow(\r
+ 0, true, nextId, row, validColumns, out, \r
+ 0, Page.INSERT_DEFAULT, -1, -1, overflowThreshold);\r
+\r
+ } \r
+ catch (NoSpaceOnPage nsop) \r
+ {\r
+ return false;\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);\r
+ }\r
+\r
+ return true;\r
+ }\r
+\r
+ /**\r
+ * Is row guaranteed to be inserted successfully on this page?\r
+ * <p>\r
+ * Return true if this record is guaranteed to be inserted successfully \r
+ * using insert() or insertAtSlot(). This guarantee is only valid while\r
+ * the row remains unchanged and the page latch is held.\r
+ * <p>\r
+ * This is a private call only used when calculating whether an overflow\r
+ * page can be used to insert part of an overflow row/column.\r
+ *\r
+ * @return bolean indicating if row can be inserted on this page.\r
+ *\r
+ * @param row The row to check for insert.\r
+ * @param validColumns bit map to interpret valid columns in row.\r
+ * @param overflowThreshold The percentage of the page to use for the\r
+ * insert. 100 means use 100% of the page,\r
+ * 50 means use 50% of page (ie. make sure\r
+ * 2 rows fit per page).\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private boolean spaceForInsert(\r
+ Object[] row, \r
+ FormatableBitSet validColumns,\r
+ int spaceNeeded,\r
+ int startColumn, \r
+ int overflowThreshold)\r
+ throws StandardException \r
+ {\r
+ if (!(spaceForInsert() && (freeSpace >= spaceNeeded)))\r
+ return false;\r
+\r
+ DynamicByteArrayOutputStream out = new DynamicByteArrayOutputStream();\r
+\r
+ try \r
+ {\r
+ logRow(\r
+ 0, true, nextId, row, validColumns, out, startColumn, \r
+ Page.INSERT_DEFAULT, -1, -1, overflowThreshold);\r
+\r
+ } \r
+ catch (NoSpaceOnPage nsop) \r
+ {\r
+ return false;\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);\r
+ }\r
+\r
+ return true;\r
+ }\r
+\r
+ /**\r
+ * Is this page unfilled?\r
+ * <p>\r
+ * Returns true if page is relatively unfilled, \r
+ * which means the page is < 1/2 full and has enough space to insert an\r
+ * "average" sized row onto the page.\r
+ * <p>\r
+ *\r
+ * @return true if page is relatively unfilled.\r
+ **/\r
+ public boolean unfilled()\r
+ {\r
+ return (allowInsert() && (freeSpace > (getPageSize() / 2)));\r
+ }\r
+\r
+ /**\r
+ * Is there enough space on the page to insert a minimum size row?\r
+ * <p>\r
+ * Calculate whether there is enough space on the page to insert a \r
+ * minimum size row. The calculation includes maintaining the required\r
+ * reserved space on the page for existing rows to grow on the page.\r
+ * <p>\r
+ *\r
+ * @return boolean indicating if a minimum sized row can be inserted.\r
+ **/\r
+ public boolean allowInsert()\r
+ {\r
+ // is this an empty page\r
+ if (slotsInUse == 0)\r
+ return true;\r
+\r
+ int spaceAvailable = freeSpace;\r
+\r
+ spaceAvailable -= slotEntrySize; // need to account new slot entry\r
+\r
+ if (spaceAvailable < minimumRecordSize)\r
+ return false;\r
+\r
+ // see that we reserve enough space for existing rows to grow on page\r
+ if (((spaceAvailable * 100) / totalSpace) < spareSpace)\r
+ return false;\r
+\r
+ return true;\r
+ }\r
+\r
+ /**\r
+ * Does this page have enough space to insert the input rows?\r
+ * <p>\r
+ * Can the rows with lengths spaceNeeded[0..num_rows-1] be copied onto\r
+ * this page?\r
+ * <p>\r
+ *\r
+ * @return true if the sum of the lengths will fit on the page.\r
+ *\r
+ * @param num_rows number of rows to check for.\r
+ * @param spaceNeeded array of lengths of the rows to insert.\r
+ **/\r
+ public boolean spaceForCopy(int num_rows, int[] spaceNeeded)\r
+ {\r
+ // determine how many more bytes are needed for the slot entries\r
+ int bytesNeeded = slotEntrySize * num_rows;\r
+\r
+ for (int i = 0; i < num_rows; i++) \r
+ {\r
+ if (spaceNeeded[i] > 0) \r
+ {\r
+ // add up the space needed by the rows, add in minimumRecordSize\r
+ // if length of actual row is less than minimumRecordSize.\r
+\r
+ bytesNeeded += \r
+ (spaceNeeded[i] >= minimumRecordSize ? \r
+ spaceNeeded[i] : minimumRecordSize);\r
+ }\r
+ }\r
+\r
+ return((freeSpace - bytesNeeded) >= 0);\r
+ }\r
+\r
+ protected boolean spaceForCopy(int spaceNeeded)\r
+ {\r
+ // add up the space needed by the rows, add in minimumRecordSize\r
+ // if length of actual row is less than minimumRecordSize.\r
+ int bytesNeeded = slotEntrySize + \r
+ (spaceNeeded >= minimumRecordSize ? \r
+ spaceNeeded : minimumRecordSize);\r
+\r
+ return((freeSpace - bytesNeeded) >= 0);\r
+ }\r
+\r
+ /**\r
+ * Read the record at the given slot into the given row.\r
+ * <P>\r
+ * This reads and initializes the columns in the row array from the raw \r
+ * bytes stored in the page associated with the given slot. If validColumns\r
+ * is non-null then it will only read those columns indicated by the bit\r
+ * set, otherwise it will try to read into every column in row[]. \r
+ * <P>\r
+ * If there are more columns than entries in row[] then it just stops after\r
+ * every entry in row[] is full.\r
+ * <P>\r
+ * If there are more entries in row[] than exist on disk, the requested \r
+ * excess columns will be set to null by calling the column's object's\r
+ * restoreToNull() routine (ie. ((Object) column).restoreToNull() ).\r
+ * <P>\r
+ * If a qualifier list is provided then the row will only be read from\r
+ * disk if all of the qualifiers evaluate true. Some of the columns may\r
+ * have been read into row[] in the process of evaluating the qualifier.\r
+ * <p>\r
+ * This routine should only be called on the head portion of a row, it\r
+ * will call a utility routine to read the rest of the row if it is a\r
+ * long row.\r
+ *\r
+ *\r
+ * @param slot the slot number\r
+ * @param row (out) filled in sparse row\r
+ * @param fetchDesc Information describing fetch, including what\r
+ * columns to fetch and qualifiers.\r
+ * @param recordToLock the record handle for the row at top level,\r
+ * and is used in OverflowInputStream to lock the \r
+ * row for Blobs/Clobs.\r
+ * @param isHeadRow The row on this page includes the head record\r
+ * handle. Will be false for the overflow portions\r
+ * of a "long" row, where columns of a row span\r
+ * multiple pages.\r
+ *\r
+ * @return false if a qualifier_list is provided and the row does not \r
+ * qualifier (no row read in that case), else true.\r
+ *\r
+ * @exception StandardException Standard Derby error policy\r
+ **/\r
+ protected boolean restoreRecordFromSlot(\r
+ int slot, \r
+ Object[] row, \r
+ FetchDescriptor fetchDesc,\r
+ RecordHandle recordToLock,\r
+ StoredRecordHeader recordHeader,\r
+ boolean isHeadRow)\r
+ throws StandardException\r
+ {\r
+ try \r
+ {\r
+ int offset_to_row_data = \r
+ getRecordOffset(slot) + recordHeader.size();\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if (getRecordOffset(slot) < \r
+ (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE)) \r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "Incorrect offset. offset = " + \r
+ getRecordOffset(slot) + \r
+ ", offset should be < " + \r
+ "(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) = " + \r
+ (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) + \r
+ ", current slot = " + slot + \r
+ ", total slotsInUse = " + slotsInUse);\r
+ }\r
+\r
+ SanityManager.ASSERT(\r
+ isHeadRow, "restoreRecordFromSlot called on a non-headrow");\r
+ SanityManager.ASSERT(\r
+ !isOverflowPage(), \r
+ "restoreRecordFromSlot called on an overflow page.");\r
+ }\r
+\r
+ // position the array reading stream at beginning of row data just\r
+ // past the record header.\r
+ ArrayInputStream lrdi = rawDataIn;\r
+ lrdi.setPosition(offset_to_row_data);\r
+\r
+ if (!recordHeader.hasOverflow())\r
+ {\r
+ if (isHeadRow)\r
+ {\r
+ if (fetchDesc != null && \r
+ fetchDesc.getQualifierList() != null)\r
+ {\r
+ fetchDesc.reset();\r
+\r
+ if (!qualifyRecordFromSlot(\r
+ row, \r
+ offset_to_row_data, \r
+ fetchDesc,\r
+ recordHeader,\r
+ recordToLock))\r
+ {\r
+ return(false);\r
+ }\r
+ else\r
+ {\r
+ // reset position back for subsequent record read.\r
+ lrdi.setPosition(offset_to_row_data);\r
+ }\r
+ }\r
+ }\r
+\r
+ // call routine to do the real work. Note that \r
+ // readRecordFromStream() may return false for non-overflow\r
+ // record, this is in the case where caller requests more \r
+ // columns than exist on disk. In that case we still return\r
+ // true at this point as there are no more columns that we\r
+ // can return.\r
+ if (fetchDesc != null)\r
+ {\r
+ readRecordFromArray(\r
+ row, \r
+ (fetchDesc.getValidColumns() == null) ?\r
+ row.length -1 : fetchDesc.getMaxFetchColumnId(), \r
+ fetchDesc.getValidColumnsArray(), \r
+ fetchDesc.getMaterializedColumns(),\r
+ lrdi, \r
+ recordHeader,\r
+ recordToLock);\r
+ }\r
+ else\r
+ {\r
+ readRecordFromArray(\r
+ row, \r
+ row.length - 1,\r
+ (int[]) null,\r
+ (int[]) null,\r
+ lrdi, \r
+ recordHeader,\r
+ recordToLock);\r
+ }\r
+\r
+ return(true);\r
+ }\r
+ else\r
+ {\r
+ if (fetchDesc != null)\r
+ {\r
+ if (fetchDesc.getQualifierList() != null)\r
+ {\r
+ fetchDesc.reset();\r
+ }\r
+\r
+ readRecordFromArray(\r
+ row, \r
+ (fetchDesc.getValidColumns() == null) ?\r
+ row.length - 1 : fetchDesc.getMaxFetchColumnId(), \r
+ fetchDesc.getValidColumnsArray(), \r
+ fetchDesc.getMaterializedColumns(),\r
+ lrdi, \r
+ recordHeader,\r
+ recordToLock);\r
+ }\r
+ else\r
+ {\r
+ readRecordFromArray(\r
+ row, \r
+ row.length - 1,\r
+ (int[]) null,\r
+ (int[]) null,\r
+ lrdi, \r
+ recordHeader,\r
+ recordToLock);\r
+ }\r
+\r
+ // call routine to loop through all the overflow portions of\r
+ // the row, reading it into "row".\r
+ while (recordHeader != null)\r
+ {\r
+ // The record is a long row, loop callng code to read the \r
+ // pieces of the row located in a linked list of rows on \r
+ // overflow pages.\r
+ StoredPage overflowPage = \r
+ getOverflowPage(recordHeader.getOverflowPage());\r
+ \r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if (overflowPage == null)\r
+ SanityManager.THROWASSERT(\r
+ "cannot get overflow page");\r
+ }\r
+\r
+ // This call reads in the columns of the row that reside\r
+ // on "overflowPage", and if there is another piece it\r
+ // returns the recordHeader of the row on overFlowPage,\r
+ // from which we can find the next piece of the row. A\r
+ // null return means that we have read in the entire row,\r
+ // and are done.\r
+ recordHeader = \r
+ overflowPage.restoreLongRecordFromSlot(\r
+ row, \r
+ fetchDesc,\r
+ recordToLock,\r
+ recordHeader);\r
+\r
+ overflowPage.unlatch();\r
+ overflowPage = null;\r
+ }\r
+\r
+ // for overflow rows just apply qualifiers at end for now.\r
+\r
+ if ((fetchDesc != null) && \r
+ (fetchDesc.getQualifierList() != null))\r
+ {\r
+ if (!qualifyRecordFromRow(\r
+ row, fetchDesc.getQualifierList()))\r
+ {\r
+ return(false);\r
+ }\r
+ }\r
+\r
+ return(true);\r
+ }\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if (pageData == null)\r
+ {\r
+ SanityManager.DEBUG_PRINT("DEBUG_TRACE",\r
+ "caught an IOException in restoreRecordFromSlot " +\r
+ (PageKey)getIdentity() + " slot " + slot + \r
+ ", pageData is null");\r
+ }\r
+ else\r
+ {\r
+ SanityManager.DEBUG_PRINT("DEBUG_TRACE",\r
+ "caught an IOException in reestoreRecordFromSlot, " + \r
+ (PageKey)getIdentity() + " slot " + slot + \r
+ ", pageData.length = " + \r
+ pageData.length + " pageSize = " + getPageSize());\r
+ SanityManager.DEBUG_PRINT("DEBUG_TRACE",\r
+ "Hex dump of pageData \n " +\r
+ "--------------------------------------------------\n" +\r
+ pagedataToHexDump(pageData) + \r
+ "--------------------------------------------------\n");\r
+ SanityManager.DEBUG_PRINT("DEBUG_TRACE",\r
+ "Attempt to dump page " + this.toString());\r
+ }\r
+ }\r
+\r
+ // i/o methods on the byte array have thrown an IOException\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));\r
+ }\r
+ }\r
+\r
+ private StoredRecordHeader restoreLongRecordFromSlot(\r
+ Object[] row, \r
+ FetchDescriptor fetchDesc,\r
+ RecordHandle recordToLock,\r
+ StoredRecordHeader parent_recordHeader)\r
+ throws StandardException\r
+ {\r
+\r
+ int slot = \r
+ findRecordById(\r
+ parent_recordHeader.getOverflowId(), Page.FIRST_SLOT_NUMBER);\r
+\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+\r
+ try \r
+ {\r
+ int offset_to_row_data = \r
+ getRecordOffset(slot) + recordHeader.size();\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if (getRecordOffset(slot) < \r
+ (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE)) \r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "Incorrect offset. offset = " + \r
+ getRecordOffset(slot) + \r
+ ", offset should be < " + \r
+ "(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) = " + \r
+ (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) + \r
+ ", current slot = " + slot + \r
+ ", total slotsInUse = " + slotsInUse);\r
+ }\r
+ }\r
+\r
+ // position the array reading stream at beginning of row data \r
+ // just past the record header.\r
+ ArrayInputStream lrdi = rawDataIn;\r
+ lrdi.setPosition(offset_to_row_data);\r
+\r
+ if (fetchDesc != null)\r
+ {\r
+ if (fetchDesc.getQualifierList() != null)\r
+ {\r
+ fetchDesc.reset();\r
+ }\r
+\r
+ readRecordFromArray(\r
+ row, \r
+ (fetchDesc.getValidColumns() == null) ?\r
+ row.length - 1 : fetchDesc.getMaxFetchColumnId(), \r
+ fetchDesc.getValidColumnsArray(), \r
+ fetchDesc.getMaterializedColumns(),\r
+ lrdi, \r
+ recordHeader,\r
+ recordToLock);\r
+ }\r
+ else\r
+ {\r
+ readRecordFromArray(\r
+ row, \r
+ row.length - 1,\r
+ (int[]) null,\r
+ (int[]) null,\r
+ lrdi, \r
+ recordHeader,\r
+ recordToLock);\r
+ }\r
+\r
+ return(recordHeader.hasOverflow() ? recordHeader : null);\r
+ }\r
+ catch (IOException ioe) \r
+ {\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if (pageData == null)\r
+ {\r
+ SanityManager.DEBUG_PRINT("DEBUG_TRACE",\r
+ "caught an IOException in restoreRecordFromSlot " +\r
+ (PageKey)getIdentity() + " slot " + slot + \r
+ ", pageData is null");\r
+ }\r
+ else\r
+ {\r
+ SanityManager.DEBUG_PRINT("DEBUG_TRACE",\r
+ "caught an IOException in reestoreRecordFromSlot, " + \r
+ (PageKey)getIdentity() + " slot " + slot + \r
+ ", pageData.length = " + \r
+ pageData.length + " pageSize = " + getPageSize());\r
+ SanityManager.DEBUG_PRINT("DEBUG_TRACE",\r
+ "Hex dump of pageData \n " +\r
+ "--------------------------------------------------\n" +\r
+ pagedataToHexDump(pageData) + \r
+ "--------------------------------------------------\n");\r
+ SanityManager.DEBUG_PRINT("DEBUG_TRACE",\r
+ "Attempt to dump page " + this.toString());\r
+ }\r
+ }\r
+\r
+ // i/o methods on the byte array have thrown an IOException\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Create a new record handle.\r
+ * <p>\r
+ * Return the next record id for allocation. Callers of this interface\r
+ * expect the next id to get bumped some where else - probably by\r
+ * storeRecordForInsert().\r
+ * <p>\r
+ *\r
+ * @return The next id to assing to a row.\r
+ **/\r
+ public int newRecordId()\r
+ {\r
+ return nextId;\r
+ }\r
+\r
+ /**\r
+ * Create a new record handle, and bump the id.\r
+ * <p>\r
+ * Create a new record handle, and bump the id while holding the latch\r
+ * so that no other user can ever see this record id. This will lead\r
+ * to unused record id's in the case where an insert fails because there\r
+ * is not enough space on the page.\r
+ * <p>\r
+ *\r
+ * @return The next id to assing to a row.\r
+ **/\r
+ public int newRecordIdAndBump()\r
+ {\r
+ // headerOutOfDate must be bumped as nextId is changing, and must\r
+ // eventually be updated in the page array.\r
+ headerOutOfDate = true; \r
+ \r
+ return nextId++;\r
+ }\r
+\r
+\r
+ /**\r
+ * Create a new record id based on current one passed in.\r
+ * <p>\r
+ * This interface is used for the "copy" insert interface of raw store\r
+ * where multiple rows are inserted into a page in a single logged \r
+ * operation. We don't want to bump the id until the operation is logged\r
+ * so we just allocated each id in order and then bump the next id at\r
+ * the end of the operation.\r
+ * <p>\r
+ *\r
+ * @return the next id based on the input id.\r
+ *\r
+ * @param recordId The id caller just used, return the next one.\r
+ *\r
+ **/\r
+ protected int newRecordId(int recordId)\r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(\r
+ recordId >= nextId, \r
+ "should not create a record Id that is already given out");\r
+ }\r
+\r
+ return recordId + 1;\r
+ }\r
+\r
+ public boolean isOverflowPage()\r
+ {\r
+ return isOverflowPage;\r
+ }\r
+\r
+\r
+\r
+ /**************************************************************************\r
+ * Public Methods specific to StoredPage:\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ * Get the full size of the page.\r
+ **/\r
+ public final int getPageSize()\r
+ {\r
+ return pageData.length;\r
+ }\r
+\r
+\r
+ /**\r
+ * Zero out a portion of the page.\r
+ * <p>\r
+ **/\r
+ protected final void clearSection(int offset, int length)\r
+ {\r
+ int endOffset = offset + length;\r
+\r
+ while (offset < endOffset) \r
+ pageData[offset++] = 0;\r
+ }\r
+\r
+ /**\r
+ * The maximum free space on this page possible.\r
+ * <p>\r
+ * The the maximum amount of space that can be used on the page\r
+ * for the records and the slot offset table.\r
+ * NOTE: subclass may have overwitten it to report less freeSpace\r
+ *\r
+ * @return the maximum free space on this page possible.\r
+ *\r
+ **/\r
+ protected int getMaxFreeSpace()\r
+ {\r
+ return getPageSize() - RECORD_SPACE_OFFSET - CHECKSUM_SIZE;\r
+ }\r
+\r
+ /**\r
+ * The current free space on the page.\r
+ **/\r
+ protected int getCurrentFreeSpace()\r
+ {\r
+ return freeSpace;\r
+ }\r
+\r
+ /**************************************************************************\r
+ * Page header routines\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ * Read the page header from the page array.\r
+ * <p>\r
+ * Read the page header from byte form in the page array into in memory\r
+ * variables.\r
+ **/\r
+ private void readPageHeader() \r
+ throws IOException\r
+ {\r
+ // these reads are always against the page array\r
+ ArrayInputStream lrdi = rawDataIn;\r
+\r
+ lrdi.setPosition(PAGE_HEADER_OFFSET);\r
+ long spare;\r
+\r
+ isOverflowPage = lrdi.readBoolean();\r
+ setPageStatus (lrdi.readByte());\r
+ setPageVersion (lrdi.readLong());\r
+ slotsInUse = lrdi.readUnsignedShort();\r
+ nextId = lrdi.readInt();\r
+ generation = lrdi.readInt(); // page generation (Future Use)\r
+ prevGeneration = lrdi.readInt(); // previous generation (Future Use)\r
+ bipLocation = lrdi.readLong(); // BIPage location (Future Use)\r
+\r
+ // number of deleted rows on page, we start to store this release 2.0.\r
+ // for upgrade reasons, a 0 on disk means -1, so, we subtract one here.\r
+ deletedRowCount = lrdi.readUnsignedShort() - 1;\r
+\r
+ // the next 4 (total 22 bytes) are reserved for future\r
+ spare = lrdi.readUnsignedShort(); \r
+ spare = lrdi.readInt(); // used by encryption\r
+ spare = lrdi.readLong();\r
+ spare = lrdi.readLong();\r
+ }\r
+\r
+\r
+ /**\r
+ * Update the page header in the page array.\r
+ * <p>\r
+ * Write the bytes of the page header, taking the values from those \r
+ * in the in memory variables.\r
+ **/\r
+ private void updatePageHeader() \r
+ throws IOException\r
+ {\r
+ rawDataOut.setPosition(PAGE_HEADER_OFFSET);\r
+\r
+ logicalDataOut.writeBoolean(isOverflowPage);\r
+ logicalDataOut.writeByte(getPageStatus());\r
+ logicalDataOut.writeLong(getPageVersion());\r
+ logicalDataOut.writeShort(slotsInUse);\r
+ logicalDataOut.writeInt(nextId);\r
+ logicalDataOut.writeInt(generation); // page generation (Future Use)\r
+ logicalDataOut.writeInt(prevGeneration); // previous generation (Future Use)\r
+ logicalDataOut.writeLong(bipLocation); // BIPage location (Future Use)\r
+\r
+ // number of deleted rows on page, we start to store this release 2.0.\r
+ // for upgrade reasons, a 0 on disk means -1, so, we add one when we \r
+ // write it to disk.\r
+ logicalDataOut.writeShort(deletedRowCount + 1);\r
+\r
+ logicalDataOut.writeShort(0); // reserved for future\r
+ logicalDataOut.writeInt(\r
+ dataFactory.random()); // random bytes for encryption \r
+ logicalDataOut.writeLong(0); // reserved for future\r
+ logicalDataOut.writeLong(0); // reserved for future\r
+\r
+ // we put a random value int into the page if the database is encrypted\r
+ // so that the checksum will be very different even with the same\r
+ // page image, when we encrypt or decrypt the page, we move the\r
+ // checksum to the front so that the encrypted page will look very\r
+ // different even with just the one int difference. We never look at\r
+ // the value of the random number and we could have put it anywhere in\r
+ // the page as long as it doesn't obscure real data.\r
+ \r
+ headerOutOfDate = false;\r
+ }\r
+\r
+ /**\r
+ * Update the page version number in the byte array\r
+ **/\r
+ private void updatePageVersion() \r
+ throws IOException \r
+ {\r
+ rawDataOut.setPosition(PAGE_VERSION_OFFSET);\r
+ logicalDataOut.writeLong(getPageVersion());\r
+ }\r
+\r
+ /**************************************************************************\r
+ * Slot Offset & Length table manipulation\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ * Get the page offset of a given slot entry.\r
+ * <p>\r
+ * Get the page offset of a slot entry, this is not the offset of\r
+ * the record stored in the slot, but the offset of the actual slot.\r
+ *\r
+ * @return The page offset of a given slot entry.\r
+ *\r
+ * @param slot The array entry of the slot to find.\r
+ **/\r
+ private int getSlotOffset(int slot)\r
+ {\r
+ // slot table grows backward from the spot at the end of the page just\r
+ // before the checksum which is located in the last 8 bytes of the page.\r
+\r
+ return(slotTableOffsetToFirstEntry - (slot * slotEntrySize));\r
+ }\r
+\r
+ /**\r
+ * Get the page offset of the record associated with the input slot.\r
+ * <p>\r
+ * This is the actual offset on the page of the beginning of the record.\r
+ *\r
+ * @return The page offset of the record associated with the input slot.\r
+ *\r
+ * @param slot The array entry of the slot to find.\r
+ **/\r
+ private int getRecordOffset(int slot) \r
+ {\r
+ byte[] data = pageData;\r
+ int offset = slotTableOffsetToFirstEntry - (slot * slotEntrySize);\r
+\r
+ // offset on the page of the record is stored in the first 2 or 4 bytes\r
+ // of the slot table entry. Code has been inlined for performance\r
+ // critical low level routine.\r
+ //\r
+ // return( \r
+ // (slotFieldSize == SMALL_SLOT_SIZE) ?\r
+ // readUnsignedShort() : readInt());\r
+\r
+ return(\r
+ (slotFieldSize == SMALL_SLOT_SIZE) ?\r
+\r
+ ((data[offset++] & 0xff) << 8) | \r
+ (data[offset] & 0xff) :\r
+\r
+ (((data[offset++] & 0xff) << 24) |\r
+ ((data[offset++] & 0xff) << 16) |\r
+ ((data[offset++] & 0xff) << 8) |\r
+ ((data[offset] & 0xff) )));\r
+ }\r
+\r
+ /**\r
+ * Set the page offset of the record associated with the input slot.\r
+ * <p>\r
+ * This is the actual offset on the page of the beginning of the record.\r
+ *\r
+ * @param slot The array entry of the slot to set.\r
+ * @param recordOffset the new offset to set.\r
+ **/\r
+ private void setRecordOffset(int slot, int recordOffset) \r
+ throws IOException\r
+ {\r
+ rawDataOut.setPosition(getSlotOffset(slot));\r
+\r
+ if (slotFieldSize == SMALL_SLOT_SIZE)\r
+ logicalDataOut.writeShort(recordOffset);\r
+ else\r
+ logicalDataOut.writeInt(recordOffset);\r
+ }\r
+\r
+ /**\r
+ * Return length of row on this page.\r
+ * <p>\r
+ * Return the total length of data and header stored on this page for \r
+ * this record. This length is stored as the second "field" of the\r
+ * slot table entry.\r
+ *\r
+ * @return The length of the row on this page.\r
+ *\r
+ * @param slot the slot of the row to look up the length of.\r
+ *\r
+ **/\r
+ protected int getRecordPortionLength(int slot) \r
+ throws IOException\r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(getRecordOffset(slot) != 0);\r
+ }\r
+\r
+ // these reads are always against the page array\r
+ ArrayInputStream lrdi = rawDataIn;\r
+\r
+ lrdi.setPosition(\r
+ slotTableOffsetToFirstRecordLengthField - (slot * slotEntrySize));\r
+\r
+ return( \r
+ (slotFieldSize == SMALL_SLOT_SIZE) ?\r
+ lrdi.readUnsignedShort() : lrdi.readInt());\r
+ }\r
+\r
+ /**\r
+ * Return reserved length of row on this page.\r
+ * <p>\r
+ * Return the reserved length of this record. \r
+ * This length is stored as the third "field" of the slot table entry.\r
+ *\r
+ * @return The reserved length of the row on this page.\r
+ *\r
+ * @param slot the slot of the row to look up the length of.\r
+ *\r
+ **/\r
+ public int getReservedCount(int slot) throws IOException\r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(getRecordOffset(slot) != 0);\r
+ }\r
+\r
+ // these reads are always against the page array\r
+ ArrayInputStream lrdi = rawDataIn;\r
+\r
+ lrdi.setPosition(\r
+ slotTableOffsetToFirstReservedSpaceField - (slot * slotEntrySize));\r
+\r
+ return( \r
+ (slotFieldSize == SMALL_SLOT_SIZE) ?\r
+ lrdi.readUnsignedShort() : lrdi.readInt());\r
+ }\r
+\r
+\r
+ /**\r
+ Update the length of data stored on this page for this record\r
+ */\r
+ /**\r
+ * Update the length of data stored on this page for this record\r
+ * <p>\r
+ * Update both the record length "field" and the reserved space "field"\r
+ * of the slot table entry associated with "slot". This length is stored \r
+ * as the second "field" of the slot table entry. The changes to these\r
+ * 2 fields are represented as the delta to apply to each field as input\r
+ * in "delta" and "reservedDelta."\r
+ * <p>\r
+ *\r
+ * @param slot the slot of the record to set.\r
+ * @param delta The amount the record length changed.\r
+ * @param reservedDelta The amount the reserved length changed.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private void updateRecordPortionLength(\r
+ int slot, \r
+ int delta, \r
+ int reservedDelta)\r
+ throws IOException\r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(getRecordOffset(slot) != 0);\r
+\r
+ if ((delta + reservedDelta) < 0)\r
+ SanityManager.THROWASSERT(\r
+ "total space of record is not allowed to shrink, delta == "\r
+ + delta + " reservedDelta = " + reservedDelta);\r
+\r
+ if ((getRecordPortionLength(slot) + delta) < 0)\r
+ SanityManager.THROWASSERT(\r
+ "record portion length cannot be < 0.recordPortionLength = "\r
+ + getRecordPortionLength(slot) + " delta = " + delta);\r
+\r
+ if ((getReservedCount(slot) + reservedDelta) < 0)\r
+ SanityManager.THROWASSERT(\r
+ "reserved space for record cannot be < 0. reservedCount = "\r
+ + getReservedCount(slot) + " reservedDelta = "\r
+ + reservedDelta);\r
+ }\r
+\r
+ // position the stream to beginning of 2nd field of slot entry.\r
+ rawDataOut.setPosition(\r
+ slotTableOffsetToFirstRecordLengthField - (slot * slotEntrySize));\r
+\r
+ // write the new record length to 2nd field\r
+ if (slotFieldSize == SMALL_SLOT_SIZE)\r
+ logicalDataOut.writeShort(getRecordPortionLength(slot) + delta);\r
+ else\r
+ logicalDataOut.writeInt(getRecordPortionLength(slot) + delta);\r
+\r
+ // if necessary, write the 3rd field - above write has positioned the\r
+ // stream to the 3rd field.\r
+ if (reservedDelta != 0) \r
+ {\r
+ if (slotFieldSize == SMALL_SLOT_SIZE)\r
+ {\r
+ logicalDataOut.writeShort(\r
+ getReservedCount(slot) + reservedDelta);\r
+ }\r
+ else\r
+ {\r
+ logicalDataOut.writeInt(\r
+ getReservedCount(slot) + reservedDelta);\r
+ }\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Initialize the in-memory slot table.\r
+ * <p>\r
+ * Initialize the in-memory slot table, ie. that of our super-class \r
+ * BasePage. Go through all the records on the page and set the \r
+ * freeSpace and firstFreeByte on page.\r
+ * <p>\r
+ * @param newIdentity The identity of the page we are trying to \r
+ * initialize, since we are in the middle of trying\r
+ * to build the page existing info in the class is\r
+ * not set up yet (like getIdentity()). \r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private void initSlotTable(\r
+ PageKey newIdentity)\r
+ throws StandardException\r
+ {\r
+ int localSlotsInUse = slotsInUse;\r
+\r
+ // must initialize the header now\r
+ initializeHeaders(localSlotsInUse);\r
+\r
+ // mark all the space on the page as free\r
+ clearAllSpace();\r
+ \r
+ // first count the space occupied by the slot table\r
+ freeSpace -= localSlotsInUse * slotEntrySize;\r
+\r
+ int lastSlotOnPage = -1;\r
+ int lastRecordOffset = -1;\r
+ \r
+ try \r
+ {\r
+ for (int slot = 0; slot < localSlotsInUse; slot++) \r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if (!isOverflowPage() && \r
+ minimumRecordSize > getTotalSpace(slot))\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ " slot " + slot +\r
+ " minimumRecordSize = " + minimumRecordSize + \r
+ " totalSpace = " + getTotalSpace(slot) + \r
+ "recordPortionLength = " + \r
+ getRecordPortionLength(slot) \r
+ + " reservedCount = " + getReservedCount(slot));\r
+ }\r
+ }\r
+\r
+ int recordOffset = getRecordOffset(slot);\r
+\r
+ // check that offset points into the record space area.\r
+ if ((recordOffset < RECORD_SPACE_OFFSET) || \r
+ (recordOffset >= (getPageSize() - CHECKSUM_SIZE))) \r
+ {\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, newIdentity));\r
+ }\r
+\r
+ if (recordOffset > lastRecordOffset) \r
+ {\r
+ lastRecordOffset = recordOffset;\r
+ lastSlotOnPage = slot;\r
+ }\r
+ }\r
+\r
+ bumpRecordCount(localSlotsInUse);\r
+\r
+ if (lastSlotOnPage != -1) \r
+ {\r
+ // Calculate the firstFreeByte for the page, \r
+ // and the freeSpace on Page\r
+\r
+ firstFreeByte = \r
+ lastRecordOffset + getTotalSpace(lastSlotOnPage);\r
+ freeSpace -= firstFreeByte - RECORD_SPACE_OFFSET;\r
+ }\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if ((freeSpace < 0) || \r
+ ((firstFreeByte + freeSpace) != \r
+ (getSlotOffset(slotsInUse - 1)))) \r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "firstFreeByte = " + firstFreeByte\r
+ + ", freeSpace = " + freeSpace\r
+ + ", slotOffset = " + (getSlotOffset(slotsInUse - 1))\r
+ + ", slotsInUse = " + localSlotsInUse);\r
+ }\r
+\r
+ if (localSlotsInUse == 0)\r
+ {\r
+ SanityManager.ASSERT(\r
+ firstFreeByte == \r
+ (getPageSize() - totalSpace - CHECKSUM_SIZE));\r
+ }\r
+ }\r
+\r
+ // upgrade issue. Pre 1.5 release, we do not store deletedRowCount\r
+ // therefore, if we are accessing an older database,\r
+ // we need to calculate the deletedRowCount here.\r
+ if (deletedRowCount == -1) \r
+ {\r
+ int count = 0;\r
+ int maxSlot = slotsInUse;\r
+ for (int slot = FIRST_SLOT_NUMBER ; slot < maxSlot; slot++) \r
+ {\r
+ if (isDeletedOnPage(slot))\r
+ count++;\r
+ }\r
+ deletedRowCount = count;\r
+ }\r
+\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ // i/o methods on the byte array have thrown an IOException\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, newIdentity));\r
+ }\r
+ }\r
+\r
+ \r
+ /**\r
+ * Set up a new slot entry.\r
+ * <p>\r
+ *\r
+ * @param slot the slot to initialize.\r
+ * @param recordOffset the offset on the page to find the record.\r
+ * @param recordPortionLength the actual length of record+hdr on page.\r
+ * @param reservedSpace the reserved length associated with record.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private void setSlotEntry(\r
+ int slot, \r
+ int recordOffset, \r
+ int recordPortionLength, \r
+ int reservedSpace) \r
+ throws IOException\r
+ {\r
+ rawDataOut.setPosition(getSlotOffset(slot));\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if ((recordPortionLength < 0) || \r
+ (reservedSpace < 0) || \r
+ (recordPortionLength >= getPageSize()) || \r
+ (reservedSpace >= getPageSize())) \r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "recordPortionLength and reservedSpace must " + \r
+ "be > 0, and < page size."\r
+ + " slot = " + slot\r
+ + ", in use = " + slotsInUse\r
+ + ", recordOffset = " + recordOffset\r
+ + ", recordPortionLength = " + recordPortionLength\r
+ + ", reservedSpace = " + reservedSpace);\r
+ }\r
+\r
+ if (recordOffset < (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE)) \r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "Record offset must be after the page header."\r
+ + " slot = " + slot\r
+ + ", in use = " + slotsInUse\r
+ + ", recordOffset = " + recordOffset\r
+ + ", recordPortionLength = " + recordPortionLength\r
+ + ", reservedSpace = " + reservedSpace);\r
+ }\r
+ }\r
+\r
+ if (slotFieldSize == SMALL_SLOT_SIZE) \r
+ {\r
+ logicalDataOut.writeShort(recordOffset);\r
+ logicalDataOut.writeShort(recordPortionLength);\r
+ logicalDataOut.writeShort(reservedSpace);\r
+ } \r
+ else \r
+ {\r
+ logicalDataOut.writeInt(recordOffset);\r
+ logicalDataOut.writeInt(recordPortionLength);\r
+ logicalDataOut.writeInt(reservedSpace);\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Insert a new slot entry into the current slot array.\r
+ * <p>\r
+ * Shift the existing slots from slot to (slotsInUse - 1) up by one.\r
+ * Up here means from low slot to high slot (e.g from slot 2 to slot 3).\r
+ * Our slot table grows backward so we have to be careful here.\r
+ *\r
+ * @param slot Position the new slot will take\r
+ * @param recordOffset Offset of the record for the new slot\r
+ * @param recordPortionLength Length of the record stored in the new slot\r
+ * @param reservedSpace Length of reserved space of record in slot\r
+ *\r
+ **/\r
+ private void addSlotEntry(\r
+ int slot, \r
+ int recordOffset, \r
+ int recordPortionLength, \r
+ int reservedSpace)\r
+ throws IOException\r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if ((slot < 0) || (slot > slotsInUse))\r
+ SanityManager.THROWASSERT("invalid slot " + slot);\r
+ \r
+ if ((recordPortionLength < 0) || (reservedSpace < 0))\r
+ SanityManager.THROWASSERT(\r
+ "recordPortionLength and reservedSpace must be > 0." +\r
+ "recordPortionLength = " + recordPortionLength + \r
+ " reservedSpace = " + reservedSpace);\r
+\r
+ if (recordOffset < (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE)) \r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "Record offset must be after the page header."\r
+ + " slot = " + slot\r
+ + ", in use = " + slotsInUse\r
+ + ", recordOffset = " + recordOffset\r
+ + ", recordPortionLength = " + recordPortionLength\r
+ + ", reservedSpace = " + reservedSpace);\r
+ }\r
+ }\r
+\r
+ int newSlotOffset;\r
+\r
+ // TODO - (mikem) - I think the math below could be slightly optimized.\r
+\r
+ if (slot < slotsInUse) \r
+ {\r
+ // inserting a slot into the middle of array so shift all the \r
+ // slots from "slot" logically up by one\r
+\r
+ int startOffset = \r
+ getSlotOffset(slotsInUse - 1);\r
+\r
+ int length = \r
+ (getSlotOffset(slot) + slotEntrySize) - startOffset;\r
+\r
+ newSlotOffset = getSlotOffset(slotsInUse);\r
+\r
+ System.arraycopy(\r
+ pageData, startOffset, pageData, newSlotOffset, length);\r
+ } \r
+ else \r
+ {\r
+ // We are adding at end of slot table, so no moving necessary.\r
+ newSlotOffset = getSlotOffset(slot); \r
+ }\r
+\r
+ freeSpace -= slotEntrySize;\r
+\r
+ slotsInUse++;\r
+ headerOutOfDate = true; // headerOutOfDate must be set after setDirty\r
+ // because isDirty may be called unlatched\r
+\r
+ setSlotEntry(slot, recordOffset, recordPortionLength, reservedSpace);\r
+ }\r
+\r
+ /**\r
+ * Remove slot entry from slot array.\r
+ * <p>\r
+ * Remove a storage slot at slot. Shift the existing slots from\r
+ * slot+1 to (slotsInUse - 1) down by one..\r
+ * Down here means from high slot to low slot (e.g from slot 3 to slot 2)\r
+ *\r
+ * @param slot The slot to delete.\r
+ *\r
+ **/\r
+ private void removeSlotEntry(int slot) \r
+ throws IOException \r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if ((slot < 0) || (slot >= slotsInUse))\r
+ SanityManager.THROWASSERT("invalid slot " + slot);\r
+ }\r
+\r
+ int oldEndOffset = getSlotOffset(slotsInUse - 1);\r
+ int newEndOffset = getSlotOffset(slotsInUse - 2);\r
+\r
+ if (slot != slotsInUse - 1) \r
+ {\r
+ // if not removing the last slot, need to shift \r
+\r
+ // now shift all the slots logically down by one\r
+ // from (slot+1 to slotsInUse-1) to (slot and slotsInUse-2)\r
+ int length = getSlotOffset(slot) - oldEndOffset;\r
+\r
+ System.arraycopy(\r
+ pageData, oldEndOffset, pageData, newEndOffset, length);\r
+ }\r
+\r
+ // clear out the last slot\r
+ clearSection(oldEndOffset, slotEntrySize);\r
+\r
+ // mark the space as free after we have removed the slot \r
+ // no need to keep the space reserved for rollback as this is only\r
+ // called for purge.\r
+ freeSpace += slotEntrySize;\r
+\r
+ slotsInUse--;\r
+\r
+ headerOutOfDate = true; // headerOutOfDate must be set after setDirty\r
+ // because isDirty maybe called unlatched\r
+ }\r
+\r
+ /**\r
+ * create the record header for the specific slot.\r
+ * <p>\r
+ * Create a new record header object, initialize it, and add it\r
+ * to the array of cache'd record headers on this page. Finally return\r
+ * reference to the initialized record header.\r
+ *\r
+ * @return The record header for the specific slot.\r
+ *\r
+ * @param slot return record header of this slot.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public StoredRecordHeader recordHeaderOnDemand(int slot)\r
+ {\r
+ StoredRecordHeader recordHeader = \r
+ new StoredRecordHeader(pageData, getRecordOffset(slot));\r
+\r
+ setHeaderAtSlot(slot, recordHeader);\r
+\r
+ return recordHeader;\r
+ }\r
+\r
+ /**************************************************************************\r
+ * Record based routines.\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ * Is entire record on the page?\r
+ * <p>\r
+ *\r
+ * @return true if the entire record at slot is on this page, \r
+ * i.e, no overflow row or long columns.\r
+ *\r
+ * @param slot Check record at this slot.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public boolean entireRecordOnPage(int slot)\r
+ throws StandardException\r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(isLatched());\r
+ }\r
+\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+\r
+ if (recordHeader.hasOverflow())\r
+ return false;\r
+\r
+ // the row chain does not overflow, we need to walk all the fields to\r
+ // make sure they are not long columns.\r
+\r
+ try \r
+ {\r
+\r
+ int offset = getRecordOffset(slot);\r
+ \r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if (offset < (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE)) \r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "Incorrect offset. offset = " + offset + \r
+ ", offset should be < " +\r
+ "(PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) = " + \r
+ (PAGE_HEADER_OFFSET + PAGE_HEADER_SIZE) + \r
+ ", current slot = " + slot + \r
+ ", total slotsInUse = " + slotsInUse);\r
+ }\r
+\r
+ SanityManager.ASSERT(recordHeader.getFirstField() == 0,\r
+ "Head row piece should start at field 0 but is not");\r
+ }\r
+\r
+ int numberFields = recordHeader.getNumberFields();\r
+\r
+ // these reads are always against the page array\r
+ ArrayInputStream lrdi = rawDataIn;\r
+\r
+ // position after the record header, at 1st column.\r
+ lrdi.setPosition(offset + recordHeader.size());\r
+ \r
+ for (int i = 0; i < numberFields; i++) \r
+ {\r
+ int fieldStatus = StoredFieldHeader.readStatus(lrdi);\r
+ if (StoredFieldHeader.isOverflow(fieldStatus))\r
+ return false;\r
+\r
+ int fieldLength = \r
+ StoredFieldHeader.readFieldDataLength(\r
+ lrdi, fieldStatus, slotFieldSize);\r
+\r
+ if (fieldLength != 0)\r
+ lrdi.setPosition(lrdi.getPosition() + fieldLength);\r
+ }\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));\r
+ }\r
+\r
+ // we have examined all the fields on this page and none overflows\r
+ return true;\r
+ }\r
+\r
+ /**\r
+ * Purge one row on an overflow page. \r
+ * <p>\r
+ * HeadRowHandle is the recordHandle pointing to the head row piece.\r
+ * <p>\r
+ *\r
+ * @param slot slot number of row to purge.\r
+ * @param headRowHandle recordHandle of the head row piece.\r
+ * @param needDataLogged when true data is logged for purges otherwise just headers.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ protected void purgeOverflowAtSlot(\r
+ int slot, \r
+ RecordHandle headRowHandle,\r
+ boolean needDataLogged)\r
+ throws StandardException\r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(isLatched());\r
+ SanityManager.ASSERT(isOverflowPage());\r
+ }\r
+\r
+ if ((slot < 0) || (slot >= slotsInUse))\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_SLOT_NOT_ON_PAGE);\r
+ }\r
+\r
+ // TODO (mikem) - should a global scratch variable be used?\r
+\r
+ // this is an overflow page purge, no need to lock the head row (it\r
+ // has already been locked, hopefully). No need to check for long rows\r
+ // (they have already been deleted, hopefully).\r
+ RawTransaction t = owner.getTransaction();\r
+ int[] recordId = new int[1];\r
+\r
+ recordId[0] = getHeaderAtSlot(slot).getId();\r
+\r
+ owner.getActionSet().actionPurge(t, this, slot, 1, recordId, needDataLogged);\r
+ }\r
+\r
+ /**\r
+ * Purge the column chain that starts at overflowPageId, overflowRecordId\r
+ * <p>\r
+ * Purge just the column chain that starts at the input address.\r
+ * The long column chain is pointed at by a field in a row. The long\r
+ * column is then chained as a sequence of "rows", the last column then\r
+ * points to the next segment of the chain on each page.\r
+ * Long columns chains currently are only one row per page so the next\r
+ * slot of a row in a long row chain should always be the first slot.\r
+ * <p>\r
+ *\r
+ * @param overflowPageId The page where the long column chain starts.\r
+ * @param overflowRecordId The record id where long column chain starts.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private void purgeOneColumnChain(\r
+ long overflowPageId, \r
+ int overflowRecordId)\r
+ throws StandardException\r
+ {\r
+ StoredPage pageOnColumnChain = null;\r
+ boolean removePageHappened = false;\r
+\r
+ try\r
+ {\r
+ while (overflowPageId != ContainerHandle.INVALID_PAGE_NUMBER) \r
+ {\r
+\r
+ // Now loop over the column chain and get all the column pieces.\r
+ pageOnColumnChain = getOverflowPage(overflowPageId);\r
+ removePageHappened = false;\r
+\r
+ if (pageOnColumnChain == null) \r
+ {\r
+ if (SanityManager.DEBUG)\r
+ SanityManager.THROWASSERT(\r
+ "got null page following long column chain. " +\r
+ "Head column piece at " + getIdentity() + \r
+ " null page at " + overflowPageId);\r
+\r
+ break; // Don't know what to do here, the column chain\r
+ // is broken. Don't bomb, go to the next field.\r
+ }\r
+ \r
+ int overflowSlotId = FIRST_SLOT_NUMBER;\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ int checkSlot = \r
+ pageOnColumnChain.findRecordById(\r
+ overflowRecordId, FIRST_SLOT_NUMBER);\r
+\r
+ if (overflowSlotId != checkSlot)\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "Long column is not at the expected " +\r
+ FIRST_SLOT_NUMBER + " slot, instead at slot " + \r
+ checkSlot);\r
+ }\r
+\r
+ SanityManager.ASSERT(pageOnColumnChain.recordCount() == 1,\r
+ "long column page has > 1 record");\r
+ }\r
+\r
+ // Hold on to the pointer to next page on the chain before\r
+ // we remove the long column page.\r
+ RecordHandle nextColumnPiece =\r
+ pageOnColumnChain.getNextColumnPiece(overflowSlotId); \r
+\r
+ if (pageOnColumnChain.recordCount() == 1)\r
+ {\r
+ removePageHappened = true;\r
+ owner.removePage(pageOnColumnChain);\r
+ }\r
+ else\r
+ {\r
+ if (SanityManager.DEBUG)\r
+ SanityManager.THROWASSERT(\r
+ "page on column chain has more then one record" +\r
+ pageOnColumnChain.toString()); \r
+\r
+ pageOnColumnChain.unlatch();\r
+ pageOnColumnChain = null;\r
+ }\r
+\r
+ // Chase the column chain pointer.\r
+ if (nextColumnPiece != null) \r
+ {\r
+ overflowPageId = nextColumnPiece.getPageNumber();\r
+ overflowRecordId = nextColumnPiece.getId();\r
+ } \r
+ else\r
+ {\r
+ // terminate the loop\r
+ overflowPageId = ContainerHandle.INVALID_PAGE_NUMBER;\r
+ }\r
+ }\r
+ } \r
+ finally \r
+ {\r
+ // if we raised an exception before the page is removed, make sure\r
+ // we unlatch the page \r
+ \r
+ if (!removePageHappened && pageOnColumnChain != null) \r
+ {\r
+ pageOnColumnChain.unlatch();\r
+ pageOnColumnChain = null;\r
+ }\r
+ }\r
+ }\r
+\r
+ /**\r
+ * purge long columns chains which eminate from this page.\r
+ * <p>\r
+ * Purge all the long column chains emanating from the record on this slot\r
+ * of this page. The headRowHandle is the record handle of the head row\r
+ * piece of this row - if this page is the head row, then headRowHandle is\r
+ * the record handle at the slot. Otherwise, headRowHandle points to a\r
+ * row on a different page, i.e., the head page.\r
+ * <p>\r
+ *\r
+ * @param t The raw transaction doing the purging.\r
+ * @param slot The slot of the row to purge.\r
+ * @param headRowHandle The RecordHandle of the head row.\r
+ *\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private void purgeColumnChains(\r
+ RawTransaction t, \r
+ int slot, \r
+ RecordHandle headRowHandle)\r
+ throws StandardException\r
+ {\r
+ try\r
+ {\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot); \r
+\r
+ int numberFields = recordHeader.getNumberFields();\r
+\r
+ // these reads are always against the page array\r
+ ArrayInputStream lrdi = rawDataIn;\r
+\r
+ // position the stream to just after record header.\r
+ int offset = getRecordOffset(slot) + recordHeader.size();\r
+ lrdi.setPosition(offset);\r
+\r
+ for (int i = 0; i < numberFields; i++) \r
+ {\r
+ int fieldStatus = StoredFieldHeader.readStatus(lrdi);\r
+ int fieldLength = \r
+ StoredFieldHeader.readFieldDataLength(\r
+ lrdi, fieldStatus, slotFieldSize);\r
+\r
+ if (!StoredFieldHeader.isOverflow(fieldStatus)) \r
+ {\r
+ // skip this field, it is not an long column\r
+ if (fieldLength != 0)\r
+ lrdi.setPosition(lrdi.getPosition() + fieldLength);\r
+ continue;\r
+ }\r
+ else\r
+ {\r
+\r
+ // Got an overflow field. The column value is the \r
+ // <pageId, recordId> pair where the next column piece is \r
+ // residing \r
+\r
+ long overflowPageId = \r
+ CompressedNumber.readLong((InputStream)lrdi);\r
+ int overflowRecordId = \r
+ CompressedNumber.readInt((InputStream)lrdi);\r
+\r
+ purgeOneColumnChain(overflowPageId, overflowRecordId);\r
+ }\r
+ }\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Purge all the overflow columns and overflow rows of the record at slot.\r
+ * <p>\r
+ * Purge all the overflow columns and overflow rows of the record at slot.\r
+ * This is called by BasePage.purgeAtSlot, the head row piece is purged\r
+ * there. \r
+ * <p>\r
+ *\r
+ * @param t The raw transaction doing the purging.\r
+ * @param slot The slot of the row to purge.\r
+ * @param headRowHandle The RecordHandle of the head row.\r
+ * @param needDataLogged when true data is logged for purges otherwise just headers.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ protected void purgeRowPieces(\r
+ RawTransaction t, \r
+ int slot, \r
+ RecordHandle headRowHandle,\r
+ boolean needDataLogged) \r
+ throws StandardException\r
+ {\r
+ if (SanityManager.DEBUG)\r
+ SanityManager.ASSERT(isOverflowPage() == false,\r
+ "not expected to call purgeRowPieces on a overflow page");\r
+\r
+ // purge the long columns which start on this page.\r
+ purgeColumnChains(t, slot, headRowHandle);\r
+\r
+ // drive this loop from the head page. Walk each "long" row piece in \r
+ // the row chain.\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+\r
+ while (recordHeader.hasOverflow()) \r
+ {\r
+\r
+ // nextPageInRowChain, is the page with the next row piece\r
+ StoredPage nextPageInRowChain = \r
+ getOverflowPage(recordHeader.getOverflowPage());\r
+\r
+ if (nextPageInRowChain == null) \r
+ {\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "got null page following long row chain. " +\r
+ "Head row piece at " + getIdentity() + " slot " +\r
+ slot + " headRecord " + headRowHandle + \r
+ ". Broken row chain at " +\r
+ recordHeader.getOverflowPage() + ", " +\r
+ recordHeader.getOverflowId());\r
+\r
+ }\r
+\r
+ break; // Don't know what to do here, the row chain is\r
+ // broken. Don't bomb, just return.\r
+ }\r
+\r
+ try \r
+ {\r
+\r
+ int nextPageSlot = \r
+ getOverflowSlot(nextPageInRowChain, recordHeader);\r
+\r
+ // First get rid of all long columns from the next row piece.\r
+ nextPageInRowChain.purgeColumnChains(\r
+ t, nextPageSlot, headRowHandle);\r
+\r
+ // Before we purge the next row piece, get the row header to\r
+ // see if we need to continue the loop.\r
+ recordHeader = nextPageInRowChain.getHeaderAtSlot(nextPageSlot);\r
+\r
+ // Lastly, purge the next row piece. If the next row piece is\r
+ // the only thing in the entire page, just deallocate the page.\r
+ // We can do this because the page is deallocated in this\r
+ // transaction. If we defer this to post commit processing,\r
+ // then we have to first purge the row piece and also remember\r
+ // the page time stamp.\r
+\r
+ if (nextPageSlot == 0 && nextPageInRowChain.recordCount() == 1)\r
+ {\r
+ // This is an overflow page and we just purged the last row.\r
+ // Free the page. Cannot do it in post commit because the\r
+ // head row is gone and cannot be locked at post commit to\r
+ // stablelize the row chain.\r
+\r
+ try \r
+ {\r
+ owner.removePage(nextPageInRowChain);\r
+ }\r
+ finally \r
+ {\r
+ // Remove Page guarantees to unlatch the page even\r
+ // if an exception is thrown, need not unlatch it\r
+ // again. \r
+ nextPageInRowChain = null;\r
+ }\r
+ }\r
+ else\r
+ {\r
+ nextPageInRowChain.purgeOverflowAtSlot(\r
+ nextPageSlot, headRowHandle, needDataLogged);\r
+\r
+ nextPageInRowChain.unlatch();\r
+ nextPageInRowChain = null;\r
+ }\r
+ } \r
+ finally \r
+ {\r
+ // Unlatch the next row piece before getting the next page in\r
+ // the row chain.\r
+ if (nextPageInRowChain != null) \r
+ {\r
+ nextPageInRowChain.unlatch();\r
+ nextPageInRowChain = null;\r
+ }\r
+ }\r
+ }\r
+ }\r
+\r
+\r
+ /**\r
+ * Remove a column chain that may have been orphaned by an update. \r
+ * <p>\r
+ * Remove a column chain that may have been orphaned by an update. This\r
+ * is executed as a post commit operation. This page is the head page of\r
+ * the row which used to point to the column chain in question. The\r
+ * location of the orphaned column chain is in the ReclaimSpace record.\r
+ * <BR>\r
+ * MT - latched. No lock will be gotten, the head record must already be\r
+ * locked exclusive with no outstanding changes that can be rolled back.\r
+ * <p>\r
+ *\r
+ * @param work object describing the chain to remove.\r
+ * @param containerHdl open container handle to use to remove chain.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ /* package */ \r
+ void removeOrphanedColumnChain(\r
+ ReclaimSpace work, \r
+ ContainerHandle containerHdl)\r
+ throws StandardException\r
+ {\r
+ // First we need to make sure that this is the first and only time\r
+ // this long column is begin reclaimed, to do this we get the first\r
+ // page on the long column chain and compare its page time stamp.\r
+ // If it is different, don't do anything.\r
+ //\r
+ // Next we need to make sure the update operation commits - we do\r
+ // this by finding the row headed by headRecord, go to the column\r
+ // in question and see if it points to the first page of the long\r
+ // column chain we want to reclaim. If it does then the update\r
+ // operation has rolled back and we don't want to reclaim it.\r
+ //\r
+ // After we do the above 2 checks, we can reclaim the column\r
+ // chain.\r
+ StoredPage headOfChain =\r
+ (StoredPage)containerHdl.getPageNoWait(work.getColumnPageId());\r
+\r
+ // If someone has it latched, not reclaimable\r
+ if (headOfChain == null) \r
+ return;\r
+\r
+ // If the column has been touched, it is not orphaned. Not reclaimable.\r
+ boolean pageUnchanged = \r
+ headOfChain.equalTimeStamp(work.getPageTimeStamp());\r
+\r
+ headOfChain.unlatch(); // unlatch it for now.\r
+\r
+ if (pageUnchanged == false)\r
+ return;\r
+\r
+ // Now get to the column in question and make sure it is no longer\r
+ // pointing to the column chain.\r
+\r
+ RecordHandle headRowHandle = work.getHeadRowHandle();\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ // System.out.println("Executing in removeOrphanedColumnChain.");\r
+ // System.out.println("work = " + work);\r
+ // System.out.println("head = " + headOfChain);\r
+ // System.out.println("this = " + this);\r
+\r
+ SanityManager.ASSERT(isLatched());\r
+ SanityManager.ASSERT(\r
+ headRowHandle.getPageNumber() == getPageNumber(), \r
+ "got wrong head page");\r
+ } \r
+\r
+ // First get the row.\r
+ int slot = \r
+ findRecordById(\r
+ headRowHandle.getId(), headRowHandle.getSlotNumberHint());\r
+\r
+ // If slot < 0, it means the whole record is gone, the column chain is\r
+ // definitely orphaned.\r
+\r
+ if (slot >= 0) \r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if (isOverflowPage())\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "Page " + getPageNumber() + " is overflow " +\r
+ "\nwork = " + work +\r
+ "\nhead = " + headOfChain +\r
+ "\nthis = " + this);\r
+ }\r
+ } \r
+\r
+ // Find the page with the column in question on it.\r
+ StoredPage pageInRowChain = this; // Start with the head page.\r
+\r
+ try \r
+ {\r
+\r
+ int columnId = work.getColumnId();\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot); \r
+\r
+ if (SanityManager.DEBUG)\r
+ SanityManager.ASSERT(recordHeader.getFirstField() == 0,\r
+ "Head row piece should start at field 0 but is not");\r
+\r
+ // See if columnId is on pageInRowChain.\r
+ while ((recordHeader.getNumberFields() +\r
+ recordHeader.getFirstField()) <= columnId) \r
+ {\r
+ // The column in question is not on pageInRowChain.\r
+\r
+ if (pageInRowChain != this) \r
+ {\r
+ // Keep the head page latched.\r
+ pageInRowChain.unlatch();\r
+ pageInRowChain = null;\r
+ }\r
+\r
+ if (recordHeader.hasOverflow()) \r
+ {\r
+ // Go to the next row piece\r
+ pageInRowChain = \r
+ getOverflowPage(recordHeader.getOverflowPage());\r
+ recordHeader = \r
+ pageInRowChain.getHeaderAtSlot(\r
+ getOverflowSlot(pageInRowChain, recordHeader));\r
+ } \r
+ else \r
+ {\r
+ // Don't know why, but this is the last column.\r
+ // Anyway, the column chain is definite orphaned.\r
+ // This can happen if the update, or subsequent\r
+ // updates, shrink the number of columns in the row. \r
+ break;\r
+ }\r
+ }\r
+\r
+ if ((recordHeader.getNumberFields() + \r
+ recordHeader.getFirstField()) > columnId) \r
+ {\r
+ // RecordHeader is the record header of the row piece on\r
+ // pageInRowChain. The column in question exists and is in\r
+ // that row piece.\r
+ if (!pageInRowChain.isColumnOrphaned(\r
+ recordHeader, columnId, \r
+ work.getColumnPageId(), work.getColumnRecordId()))\r
+ {\r
+ // The column is not orphaned, row still points to it.\r
+ if (pageInRowChain != this) \r
+ {\r
+ // Keep the head page latched.\r
+ pageInRowChain.unlatch();\r
+ pageInRowChain = null;\r
+ }\r
+ return;\r
+ }\r
+ }\r
+\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);\r
+ } \r
+ finally \r
+ {\r
+ if (pageInRowChain != this && pageInRowChain != null)\r
+ pageInRowChain.unlatch();\r
+ }\r
+ }\r
+\r
+ // If we get this far, we have verified that the column chain is indeed\r
+ // orphaned. Get rid of the column chain.\r
+\r
+ long nextPageId = work.getColumnPageId();\r
+ int nextRecordId = work.getColumnRecordId();\r
+\r
+ purgeOneColumnChain(nextPageId, nextRecordId);\r
+ }\r
+\r
+ /**\r
+ * See if there is a orphaned long colum chain or not. \r
+ * <p>\r
+ * See if there is a orphaned long colum chain or not. This is a helper\r
+ * function for removeOrphanedChain. This page, which may be a head page\r
+ * or overflow page, contains the column specified in columnId. It used to\r
+ * point to a long column chain at oldPageId and oldRecordId. Returns true\r
+ * if it no longer points to that long column chain.\r
+ * <p>\r
+ *\r
+ * @return true if page no longer points to the long column chain.\r
+ *\r
+ * @param recordHeader record header which used to point at the long column\r
+ * @param columnId column id of the long column in head.\r
+ * @param oldPageId the page id where the long column used to be.\r
+ * @param oldRecordId the record id where the long column used to be.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private boolean isColumnOrphaned(\r
+ StoredRecordHeader recordHeader, \r
+ int columnId,\r
+ long oldPageId, \r
+ long oldRecordId)\r
+ throws StandardException, IOException\r
+ {\r
+ int slot = findRecordById(recordHeader.getId(), Page.FIRST_SLOT_NUMBER);\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(slot >= 0, "overflow row chain truncated");\r
+\r
+ SanityManager.ASSERT(\r
+ columnId >= recordHeader.getFirstField(),\r
+ "first column on page > expected");\r
+ }\r
+\r
+ // these reads are always against the page array\r
+ ArrayInputStream lrdi = rawDataIn;\r
+\r
+ // set read position to data portion of record to check.\r
+ int offset = getRecordOffset(slot);\r
+ lrdi.setPosition(offset + recordHeader.size());\r
+\r
+ // skip until you get to the record in question.\r
+ for (int i = recordHeader.getFirstField(); i < columnId; i++)\r
+ skipField(lrdi);\r
+\r
+ // read in the info of the column we are interested in.\r
+ int fieldStatus = StoredFieldHeader.readStatus(lrdi);\r
+ int fieldLength = StoredFieldHeader.readFieldDataLength\r
+ (lrdi, fieldStatus, slotFieldSize);\r
+\r
+ if (StoredFieldHeader.isOverflow(fieldStatus)) \r
+ {\r
+ // it is still an overflow field, check if it still points to \r
+ // overflow column in question.\r
+\r
+ long ovflowPage = CompressedNumber.readLong((InputStream) lrdi);\r
+ int ovflowRid = CompressedNumber.readInt((InputStream) lrdi);\r
+\r
+ if (ovflowPage == oldPageId && ovflowRid == oldRecordId) \r
+ { \r
+ // This field still points to the column chain, the\r
+ // update must have rolled back.\r
+ return false;\r
+ }\r
+ }\r
+\r
+ // Else, either the field is no longer a long column, or it doesn't\r
+ // point to oldPageId, oldRecordId. The column chain is orphaned. \r
+ return true;\r
+ }\r
+\r
+ /**\r
+ @return a recordHandle pointing to the next piece of the column chain.\r
+ This page must be an overflow page that is in a column chain. If this\r
+ is the last piece of the overflow colum, return null.\r
+\r
+ @param slot the slot number where the current piece of overflow column\r
+ is at.\r
+ @exception StandardException Derby Standard Error Policy\r
+ */\r
+ /**\r
+ * Return the next recordHandle in a long column chain.\r
+ * <p>\r
+ * Return a recordHandle pointing to the next piece of the column chain.\r
+ * This page must be an overflow page that is in a column chain. If this\r
+ * is the last piece of the overflow colum, return null.\r
+ * <p>\r
+ *\r
+ * @return The next record handle in a long column chain.\r
+ *\r
+ * @param slot The slot of the current long column piece.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private RecordHandle getNextColumnPiece(int slot) \r
+ throws StandardException\r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(isLatched());\r
+ SanityManager.ASSERT(isOverflowPage(), \r
+ "not expected to call getNextColumnPiece on non-overflow page");\r
+\r
+ if (recordCount() != 1)\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "getNextColumnPiece called on a page with " +\r
+ recordCount() + " rows");\r
+ }\r
+ }\r
+\r
+ try \r
+ {\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+ int numberFields = \r
+ recordHeader.getNumberFields();\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if ((numberFields > 2) || (numberFields < 1))\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "longColumn record header must have 1 or 2 fields." +\r
+ " numberFields = " + numberFields);\r
+ }\r
+ }\r
+\r
+ if (numberFields != 2) // End of column chain.\r
+ return null;\r
+\r
+ // these reads are always against the page array\r
+ ArrayInputStream lrdi = rawDataIn;\r
+\r
+ // The 2nd field is the pointer to the next page in column chain.\r
+\r
+ int offset = getRecordOffset(slot) + recordHeader.size();\r
+ lrdi.setPosition(offset);\r
+\r
+ // skip the first field\r
+ skipField(lrdi);\r
+\r
+ // the 2nd field should be <pageId, recordId> pair, return the\r
+ // pageId part and skip over the length.\r
+ int fieldStatus = StoredFieldHeader.readStatus(lrdi);\r
+ int fieldLength = StoredFieldHeader.readFieldDataLength\r
+ (lrdi, fieldStatus, slotFieldSize);\r
+\r
+ long ovflowPage = CompressedNumber.readLong((InputStream) lrdi);\r
+ int ovflowRid = CompressedNumber.readInt((InputStream) lrdi);\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if (!StoredFieldHeader.isOverflow(fieldStatus)) \r
+ {\r
+ // In version 1.5, the first field is overflow and the\r
+ // second is not. In version 2.0 onwards, the first field\r
+ // is not overflow and the second is overflow (the overflow\r
+ // bit goes with the overflow pointer). Check first field\r
+ // to make sure its overflow bit is set on. \r
+ // Offset still points to the first column.\r
+ lrdi.setPosition(offset);\r
+ fieldStatus = StoredFieldHeader.readStatus(lrdi);\r
+ SanityManager.ASSERT(\r
+ StoredFieldHeader.isOverflow(fieldStatus));\r
+ }\r
+ }\r
+\r
+ // RESOLVE : this new can get expensive if the column chain is very\r
+ // long. The reason we do this is because we need to return the\r
+ // page number and the rid, if we assume that the long column is\r
+ // always at slot 0, we can return only the page.\r
+\r
+ return owner.makeRecordHandle(ovflowPage, ovflowRid);\r
+\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));\r
+ }\r
+ }\r
+ \r
+\r
+ /**************************************************************************\r
+ * Page space usage\r
+ **************************************************************************\r
+ */\r
+\r
+ /**\r
+ * initialize the in memory variables associated with space maintenance.\r
+ * <p>\r
+ * Get the total available space on an empty page.\r
+ * initSlotTable() must be called after the page has been read in.\r
+ **/\r
+ private void initSpace()\r
+ {\r
+ // NOTE: subclass may have overwitten it to report less freeSpace,\r
+ // always call getMaxFreeSpace() to get total space.\r
+ totalSpace = getMaxFreeSpace();\r
+\r
+ // estimate RH will be about 16 bytes:\r
+ // (1 - status, 1 - id, 1 - #fields, 1 - 1stField, 12 - overflow ptr)\r
+\r
+ // RESOLVED: track# 3370, 3368\r
+ // In the old code below, spareSpace/100 is integer division. This means\r
+ // that you get a value of 0 for it as long as spareSpace is between 0\r
+ // and 99. But if spareSpace is 100 you get a value of 1. This resulted\r
+ // in a negative value for maxFieldSize. This caused e.g. the isLong \r
+ // method to behave incorrectly when spareSpace is 100.\r
+ //\r
+ // RESOLVED: track# 4385\r
+ // maxFieldSize is a worst case calculation for the size of a record\r
+ // on an empty page, with a single field, but still allow room for \r
+ // an overflow pointer if another field is to be added. If you don't\r
+ // account for the overflow pointer then you can get into the situation\r
+ // where the code puts the field on the page (not making it a long \r
+ // column), then runs out of space on next column but can't fit overflow\r
+ // pointer, so backs up and removes the column from page, and tries\r
+ // again on next overflow page - looping forever.\r
+ //\r
+ // maxFieldSize = \r
+ // totalSpace * (1 - spareSpace/100) - slotEntrySize \r
+ // - 16 - OVERFLOW_POINTER_SIZE;\r
+\r
+ maxFieldSize = totalSpace - slotEntrySize - 16 - OVERFLOW_POINTER_SIZE;\r
+\r
+ if (SanityManager.DEBUG) {\r
+ SanityManager.ASSERT(maxFieldSize >= 0);\r
+ // DERBY-3099: maxFieldSize was calculated before slotFieldSize and\r
+ // slotEntrySize had been initialized.\r
+ int expectedFieldSize = calculateSlotFieldSize(pageData.length);\r
+ SanityManager.ASSERT(slotFieldSize == expectedFieldSize,\r
+ "slotFieldSize uninitialized");\r
+ SanityManager.ASSERT(slotEntrySize == 3 * expectedFieldSize,\r
+ "slotEntrySize uninitialized");\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Initialize the freeSpace count and set the firstFreeByte on page\r
+ **/\r
+ private void clearAllSpace()\r
+ {\r
+ freeSpace = totalSpace;\r
+ firstFreeByte = getPageSize() - totalSpace - CHECKSUM_SIZE;\r
+ }\r
+\r
+ /**\r
+ * Compress out the space specified by startByte and endByte.\r
+ * <p>\r
+ * As part of moving rows, updating rows, purging rows compact the space\r
+ * left between rows.\r
+ * <p>\r
+ *\r
+ * @param startByte compress out space starting at startByte offset\r
+ * @param endByte compress out space ending at endByte offset\r
+ *\r
+ **/\r
+ private void compressPage(\r
+ int startByte, \r
+ int endByte) \r
+ throws IOException\r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if (((endByte + 1) > firstFreeByte) || (startByte > firstFreeByte))\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "startByte = " + startByte + " endByte = " + endByte +\r
+ " firstFreeByte = " + firstFreeByte);\r
+ }\r
+ }\r
+\r
+ int lengthToClear = endByte + 1 - startByte;\r
+\r
+ // see if these were not the last occupied record space on the page\r
+ if ((endByte + 1) != firstFreeByte) \r
+ {\r
+ // Shift everything down the page.\r
+ int moveLength = (firstFreeByte - endByte - 1);\r
+\r
+ System.arraycopy(\r
+ pageData, (endByte + 1), pageData, startByte, moveLength);\r
+\r
+ // fix the page offsets of the rows further down the page\r
+ for (int slot = 0; slot < slotsInUse; slot++) \r
+ {\r
+ int offset = getRecordOffset(slot);\r
+\r
+ if (offset >= (endByte + 1)) \r
+ {\r
+ offset -= lengthToClear;\r
+ setRecordOffset(slot, offset);\r
+ }\r
+ }\r
+ }\r
+ \r
+ freeSpace += lengthToClear;\r
+ firstFreeByte -= lengthToClear;\r
+\r
+ clearSection(firstFreeByte, lengthToClear);\r
+ }\r
+\r
+ /**\r
+ * Free up required bytes by shifting rows "down" the page.\r
+ * <p>\r
+ * Expand page, move all the data from start Offset down the page by\r
+ * the amount required to free up the required bytes.\r
+ *\r
+ * @param startOffset offset on page to begin the shift\r
+ * @param requiredBytes the number of bytes that must be freed.\r
+ *\r
+ * @exception IOException If IOException is raised during the page mod.\r
+ **/\r
+ protected void expandPage(\r
+ int startOffset, \r
+ int requiredBytes) \r
+ throws IOException\r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(requiredBytes <= freeSpace);\r
+ SanityManager.ASSERT(startOffset <= firstFreeByte);\r
+ }\r
+\r
+ int totalLength = firstFreeByte - startOffset;\r
+\r
+ if (totalLength > 0)\r
+ {\r
+ System.arraycopy(\r
+ pageData, startOffset, \r
+ pageData, startOffset + requiredBytes, totalLength);\r
+\r
+ // fix the page offsets of the rows further down the page\r
+ for (int slot = 0; slot < slotsInUse; slot++) \r
+ {\r
+ int offset = getRecordOffset(slot);\r
+ if (offset >= startOffset) \r
+ {\r
+ offset += requiredBytes;\r
+ setRecordOffset(slot, offset);\r
+ }\r
+ }\r
+ }\r
+\r
+ freeSpace -= requiredBytes;\r
+ firstFreeByte += requiredBytes;\r
+ }\r
+\r
+ /**\r
+ * Shrink page. \r
+ * <p>\r
+ * move all the data from start Offset up the page by the amount shrunk. \r
+ *\r
+ *\r
+ * @param startOffset offset on page to begin the shift\r
+ * @param shrinkBytes the number of bytes that must be moved.\r
+ *\r
+ * @exception IOException some IOException is raised during the page mod,\r
+ * (unlikely as this is just writing to array).\r
+ **/\r
+ private void shrinkPage(int startOffset, int shrinkBytes) \r
+ throws IOException \r
+ {\r
+ // the number of bytes that needs to be moved up.\r
+ int totalLength = firstFreeByte - startOffset;\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.DEBUG(\r
+ "shrinkPage", "page " + getIdentity() + \r
+ " shrinking " + shrinkBytes + \r
+ " from offset " + startOffset +\r
+ " to offset " + (startOffset-shrinkBytes) +\r
+ " moving " + totalLength + \r
+ " bytes. FirstFreeByte at " + firstFreeByte);\r
+\r
+ SanityManager.ASSERT(\r
+ totalLength >= 0, "firstFreeByte - startOffset <= 0");\r
+\r
+ SanityManager.ASSERT(\r
+ (startOffset-shrinkBytes) > RECORD_SPACE_OFFSET ,\r
+ "shrinking too much ");\r
+\r
+ if (startOffset != firstFreeByte)\r
+ {\r
+ // make sure startOffset is at the beginning of a record\r
+ boolean foundslot = false;\r
+ for (int slot = 0; slot < slotsInUse; slot++) \r
+ {\r
+ if (getRecordOffset(slot) == startOffset) \r
+ {\r
+ foundslot = true;\r
+ break;\r
+ }\r
+ }\r
+\r
+ if (!foundslot)\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "startOffset " + startOffset + \r
+ " not at the beginning of a record");\r
+ }\r
+ }\r
+ }\r
+\r
+ if (totalLength > 0) \r
+ {\r
+ System.arraycopy(\r
+ pageData, startOffset,\r
+ pageData, startOffset-shrinkBytes , totalLength);\r
+\r
+ // fix the page offsets of the rows further down the page\r
+ for (int slot = 0; slot < slotsInUse; slot++) \r
+ {\r
+ int offset = getRecordOffset(slot);\r
+ if (offset >= startOffset) \r
+ {\r
+ offset -= shrinkBytes;\r
+ setRecordOffset(slot, offset);\r
+ }\r
+ }\r
+ }\r
+\r
+ freeSpace += shrinkBytes;\r
+ firstFreeByte -= shrinkBytes;\r
+ }\r
+\r
+ public int getRecordLength(int slot) throws IOException\r
+ {\r
+ return getRecordPortionLength(slot);\r
+ }\r
+ protected boolean getIsOverflow(int slot) throws IOException\r
+ {\r
+ return getHeaderAtSlot(slot).hasOverflow();\r
+ }\r
+\r
+ /**\r
+ Log a row into the StoreOuput stream.\r
+\r
+ <P>\r
+\r
+ @exception StandardException Standard Derby error policy\r
+ @exception IOException RESOLVE\r
+\r
+ */\r
+ /**\r
+ * Log a row into the StoreOuput stream.\r
+ * <p>\r
+ * Write the row in its record format to the stream. Record format is a \r
+ * record header followed by each field with its field header. See this \r
+ * class's description for the specifics of these headers.\r
+ *\r
+ * startColumn is used to specified which column for this logRow to \r
+ * start logging. When realStartColumn is specified, that means part of \r
+ * the row has already been logged. startColumn here indicates that the \r
+ * first column was logged in the logBuffer, need to continue log the rest\r
+ * of the row starting at realStartColumn.\r
+ *\r
+ * This is used when a longColumn is encountered during a long row.\r
+ * After done logging the long column, we need to continue logging the \r
+ * rest of the row.\r
+ * A -1 value for realStartColumn, means that it is not significant.\r
+ *\r
+ * logRow will not throw an noSpaceOnPage exception, if it is an overflow \r
+ * page, and the record we are inserting is the only record on the page.\r
+ * We are supporting rows expanding multiple pages through this mechanism.\r
+ *\r
+ * logRow expects row to be a sparse row.\r
+ * <p>\r
+ *\r
+ * @return the "realStartColumn" value, -1 if not a long row.\r
+ *\r
+ * @param slot the slot of the row being logged.\r
+ * @param forInsert this is logging an insert (not update/delete).\r
+ * @param recordId record id of the row being logged.\r
+ * @param row actual data of row in object form. If row is\r
+ * null then we are logging an overflow pointer.\r
+ * @param validColumns bit map describing valid columns in row.\r
+ * @param out stream to log to.\r
+ * @param startColumn what column to start with (see above for detail)\r
+ * @param insertFlag flag indicating mode we are in, \r
+ * INSERT_DEFAULT - default insert\r
+ * INSERT_SPLIT - splitting a row/column \r
+ * across pages.\r
+ * @param realStartColumn If -1 ignore variable, else part of row has\r
+ * already been logged, and should continue with\r
+ * this column.\r
+ * @param realSpaceOnPage Use this as space on page if realStartColumn\r
+ * is not -1.\r
+ * @param overflowThreshold How much of the page to use before deciding\r
+ * to overflow a row.\r
+ *\r
+ * @exception IOException RESOLVE\r
+ * @exception StandardException Standard exception policy.\r
+ *\r
+ * @see BasePage#logRow\r
+ **/\r
+ public int logRow(\r
+ int slot, \r
+ boolean forInsert, \r
+ int recordId,\r
+ Object[] row, \r
+ FormatableBitSet validColumns, \r
+ DynamicByteArrayOutputStream out,\r
+ int startColumn, \r
+ byte insertFlag, \r
+ int realStartColumn, \r
+ int realSpaceOnPage,\r
+ int overflowThreshold)\r
+ throws StandardException, IOException\r
+ {\r
+ // Is this an update that just went through handleIncompleteLogRow\r
+ // and handleIncompleteLogRow threw an excepiton. In this case the\r
+ // buffer is already finished.\r
+ if (!forInsert) \r
+ {\r
+ if ((realStartColumn != -1) && (realSpaceOnPage == -1)) \r
+ {\r
+ return realStartColumn;\r
+ }\r
+ }\r
+\r
+ int spaceAvailable = freeSpace;\r
+ setOutputStream(out);\r
+ int beginPosition = out.getPosition();\r
+\r
+ // if we are inserting in the headPage,\r
+ // we need to make sure that there is enough room\r
+ // on the page for the reserve space.\r
+ userRowSize = 0;\r
+ boolean calcMinimumRecordSize = false;\r
+\r
+ if (realStartColumn != (-1)) \r
+ {\r
+ // in the middle of logging a long row/column.\r
+\r
+ spaceAvailable = realSpaceOnPage;\r
+ beginPosition = out.getBeginPosition();\r
+ } \r
+ else \r
+ {\r
+ // logging row part that is on head page.\r
+\r
+ if (!forInsert) \r
+ {\r
+ // an update can use the total space of the record,\r
+ // even if not all of the fields are being updated.\r
+ // If the updated columns will not fit then some\r
+ // columns will move off the page to a new chunk.\r
+ spaceAvailable += getTotalSpace(slot);\r
+\r
+ } \r
+ else \r
+ {\r
+ // need to account for the slot table using extra space...\r
+ spaceAvailable -= slotEntrySize;\r
+\r
+ if (startColumn == 0)\r
+ calcMinimumRecordSize = true;\r
+ }\r
+\r
+ // <= is ok here as we know we want to write at least one more byte\r
+ if (spaceAvailable <= 0)\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+ }\r
+\r
+ try \r
+ {\r
+ if (row == null) \r
+ {\r
+ // if the row is null, we must be writing an overflow pointer.\r
+\r
+ return(logOverflowRecord(slot, spaceAvailable, out));\r
+ } \r
+\r
+ int numberFields = 0;\r
+ StoredRecordHeader recordHeader;\r
+\r
+ if (forInsert) \r
+ {\r
+ recordHeader = new StoredRecordHeader();\r
+ } \r
+ else \r
+ {\r
+ // Get a full copy of the record header since we might change \r
+ // it, and we can't modify the one on the page\r
+ recordHeader = \r
+ new StoredRecordHeader(getHeaderAtSlot(slot));\r
+\r
+ // an update always starts at the first column on this page\r
+ startColumn = recordHeader.getFirstField();\r
+ }\r
+\r
+ if (validColumns == null)\r
+ {\r
+ // all columns in row[] are valid, we will be logging them all.\r
+\r
+ numberFields = row.length - startColumn;\r
+ }\r
+ else \r
+ {\r
+ // RESOLVE (mikem) - counting on validColumns.length may be bad\r
+ // for performance.\r
+\r
+ for (int i = validColumns.getLength() - 1; \r
+ i >= startColumn; \r
+ i--) \r
+ {\r
+ if (validColumns.isSet(i)) \r
+ {\r
+ numberFields = i + 1 - startColumn;\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
+ int onPageNumberFields = -1; // only valid for update\r
+\r
+ if (forInsert) \r
+ {\r
+ recordHeader.setId(recordId);\r
+ recordHeader.setNumberFields(numberFields);\r
+ } \r
+ else \r
+ {\r
+ // an update\r
+\r
+ onPageNumberFields = recordHeader.getNumberFields();\r
+\r
+ if (numberFields > onPageNumberFields) \r
+ {\r
+ // number of fields *might* be increasing\r
+ if (recordHeader.hasOverflow()) \r
+ {\r
+ // other fields will be handled in next portion update\r
+ \r
+ numberFields = onPageNumberFields;\r
+ } \r
+ else \r
+ {\r
+ // number of fields is increasing\r
+\r
+ recordHeader.setNumberFields(numberFields);\r
+ }\r
+ } \r
+ else if (numberFields < onPageNumberFields) \r
+ {\r
+ if (validColumns == null) \r
+ {\r
+ // number of fields is decreasing,\r
+ // but only allowed when the complete\r
+ // row is being updated.\r
+ recordHeader.setNumberFields(numberFields);\r
+\r
+ // RESOLVE - \r
+ // need some post commit work if row has overflow\r
+\r
+ // if (recordHeader.hasOverflow()) {\r
+ // remove overflow portion after commit.\r
+ // }\r
+\r
+ } \r
+ else \r
+ {\r
+ // we process all the fields, the unchanged ones\r
+ // at the end will have a single byte written out\r
+ // indicating they are unchanged (nonexistent)\r
+ numberFields = onPageNumberFields;\r
+ }\r
+ }\r
+ }\r
+\r
+ int endFieldExclusive = startColumn + numberFields;\r
+\r
+ if (realStartColumn >= endFieldExclusive) \r
+ {\r
+ // The realStartColumn is greater than the last column we need\r
+ // to log, so we are done.\r
+ return (-1);\r
+ }\r
+\r
+ if ((insertFlag & Page.INSERT_DEFAULT) != Page.INSERT_DEFAULT) \r
+ {\r
+ // if this is not logging the part of the row being inserted\r
+ // on the main page, then use startColumn as first field.\r
+ recordHeader.setFirstField(startColumn);\r
+ } \r
+\r
+ // what column to start with?\r
+\r
+ int firstColumn = realStartColumn;\r
+ if (realStartColumn == (-1)) \r
+ {\r
+ // logging on the head page.\r
+\r
+ int recordHeaderLength = recordHeader.write(logicalDataOut);\r
+\r
+ spaceAvailable -= recordHeaderLength;\r
+ if (spaceAvailable < 0)\r
+ {\r
+ // ran out of space just writing the record header.\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+ }\r
+\r
+ firstColumn = startColumn;\r
+ }\r
+\r
+\r
+ boolean monitoringOldFields = false;\r
+ int validColumnsSize = \r
+ (validColumns == null) ? 0 : validColumns.getLength();\r
+ \r
+ if (validColumns != null) \r
+ {\r
+ if (!forInsert) \r
+ {\r
+ // we monitor the length of the old fields by skipping them\r
+ // but only on a partial update.\r
+ if ((validColumns != null) && \r
+ (firstColumn < (startColumn + onPageNumberFields)))\r
+ {\r
+ rawDataIn.setPosition(\r
+ getFieldOffset(slot, firstColumn));\r
+\r
+ monitoringOldFields = true;\r
+ }\r
+ }\r
+ }\r
+\r
+ int lastSpaceAvailable = spaceAvailable;\r
+ int recordSize = 0;\r
+ int lastColumnPositionAllowOverflow = out.getPosition();\r
+ int lastColumnAllowOverflow = startColumn;\r
+\r
+ if (spaceAvailable > OVERFLOW_POINTER_SIZE)\r
+ lastColumnPositionAllowOverflow = -1;\r
+ int columnFlag = COLUMN_FIRST;\r
+\r
+ for (int i = firstColumn; i < endFieldExclusive; i++) \r
+ {\r
+ Object ref = null;\r
+ boolean ignoreColumn = false;\r
+\r
+\r
+ // should we log this column or not?\r
+ if ((validColumns == null) || \r
+ (validColumnsSize > i && validColumns.isSet(i))) \r
+ {\r
+ if (i < row.length)\r
+ ref = row[i];\r
+ } \r
+ else if (!forInsert) \r
+ {\r
+ // field is not supplied, log as non-existent\r
+ ignoreColumn = true;\r
+ }\r
+\r
+ if (spaceAvailable > OVERFLOW_POINTER_SIZE) \r
+ {\r
+ lastColumnPositionAllowOverflow = out.getPosition();\r
+ lastColumnAllowOverflow = i;\r
+ }\r
+\r
+ lastSpaceAvailable = spaceAvailable;\r
+\r
+ if (ignoreColumn) \r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(\r
+ ref == null, \r
+ "ref should be null for an ignored column");\r
+\r
+ SanityManager.ASSERT(\r
+ validColumns != null, \r
+ "validColumns should be non-null for ignored col");\r
+ }\r
+\r
+ if (i < (startColumn + onPageNumberFields)) \r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(\r
+ monitoringOldFields, \r
+ "monitoringOldFields must be true");\r
+ }\r
+\r
+ // need to keep track of the old field lengths\r
+ // as they are remaining in the row.\r
+ int oldOffset = rawDataIn.getPosition();\r
+ skipField(rawDataIn);\r
+ int oldFieldLength = \r
+ rawDataIn.getPosition() - oldOffset;\r
+\r
+ if (oldFieldLength <= spaceAvailable) \r
+ {\r
+ // if field doesn't fit, \r
+ // spaceAvailable must be left unchanged.\r
+\r
+ logColumn(\r
+ null, 0, out, Integer.MAX_VALUE, \r
+ COLUMN_NONE, overflowThreshold);\r
+\r
+ spaceAvailable -= oldFieldLength;\r
+ }\r
+\r
+ } \r
+ else \r
+ {\r
+ // this is an update that is increasing the number of \r
+ // columns but not providing any value, strange ...\r
+\r
+ spaceAvailable = \r
+ logColumn(\r
+ null, 0, out, spaceAvailable, \r
+ columnFlag, overflowThreshold);\r
+ }\r
+\r
+ } \r
+ else \r
+ {\r
+ // ignoreColumn is false, we are logging this column.\r
+\r
+ if (monitoringOldFields && \r
+ (i < (startColumn + onPageNumberFields))) \r
+ {\r
+ // skip the old version of the field so that\r
+ // rawDataIn is correctly positioned.\r
+ skipField(rawDataIn);\r
+ }\r
+\r
+\r
+ try \r
+ {\r
+ if (ref == null)\r
+ {\r
+ // no new value to provide, use the on page value.\r
+ spaceAvailable = \r
+ logColumn(\r
+ null, 0, out, spaceAvailable, \r
+ columnFlag, overflowThreshold);\r
+ }\r
+ else\r
+ {\r
+ // log the value provided in the row[i]\r
+ spaceAvailable = \r
+ logColumn(\r
+ row, i, out, spaceAvailable, \r
+ columnFlag, overflowThreshold);\r
+ }\r
+\r
+ } \r
+ catch (LongColumnException lce) \r
+ {\r
+ // logColumn determined that the column would not fit\r
+ // and that the column length exceeded the long column\r
+ // threshold so turn this column into a long column.\r
+ \r
+\r
+ if ((insertFlag & Page.INSERT_DEFAULT) == \r
+ Page.INSERT_DEFAULT) \r
+ {\r
+ // if default insert, just throw no space exception.\r
+\r
+ // if the lce has throw the column as an InputStream,\r
+ // in the following 2 situations\r
+ // 1. If column came in 'row[i]' as InputStream\r
+ // 2. If the object stream of 'row[i]' is not \r
+ // null, which means that the object state of\r
+ // the column is null.\r
+ //\r
+ // we need to set the original InputStream column to\r
+ // the column that has been thrown by lce. It is a\r
+ // store formated InputStream which remembers all \r
+ // the bytes that has been read, but not yet stored.\r
+ // Therefore, we will not lose any bytes.\r
+ //\r
+ // In any other situation, we should not change the\r
+ // state of the column,\r
+ // i.e. if 'row[i]' has an object state, it should\r
+ // not be turned into an InputStream.\r
+\r
+ if ((lce.getColumn() instanceof InputStream)\r
+ && (row[i] instanceof StreamStorable) ) \r
+ {\r
+ if ((row[i] instanceof InputStream) || \r
+ (((StreamStorable) row[i]).returnStream() \r
+ != null) ) \r
+ {\r
+ // change state of stream so that it uses\r
+ // the stream just created by the lce - \r
+ // which is remembering the bytes it has\r
+ // already read from the stream but couldn't\r
+ // log as there was not enough room on \r
+ // current page.\r
+\r
+ ((StreamStorable) row[i]).setStream(\r
+ (InputStream) lce.getColumn());\r
+ }\r
+ }\r
+\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+ }\r
+\r
+ // When one of the following two conditions is true,\r
+ // we will allow the insert of the long column:\r
+ //\r
+ // 1. if this is the last field,\r
+ // and overflow field header fits on page.\r
+ // 2. if it is not the last field,\r
+ // and overflow field header fits on page (for col)\r
+ // and another overflow ptr fits (for row).\r
+ // \r
+ // \r
+\r
+ if (((spaceAvailable >= OVERFLOW_PTR_FIELD_SIZE) && \r
+ (i == (endFieldExclusive - 1))) || \r
+ ((spaceAvailable >= (OVERFLOW_PTR_FIELD_SIZE * 2))&&\r
+ (i < (endFieldExclusive - 1)))) \r
+ {\r
+ // If the column is a long column, it must be a \r
+ // InputStream. We have made the input stream into\r
+ // a RememberBytesInputStream, have to set the \r
+ // column to that, in order to preserve the bytes\r
+ // we already read off the stream.\r
+\r
+ // caught a long column exception, \r
+ // set the variables, and rethrow the error\r
+ out.setBeginPosition(beginPosition);\r
+ lce.setExceptionInfo(out, i, spaceAvailable);\r
+ throw (lce);\r
+ }\r
+ }\r
+ }\r
+\r
+ int nextColumn;\r
+\r
+ recordSize += (lastSpaceAvailable - spaceAvailable);\r
+ boolean recordIsLong = \r
+ (overflowThreshold == 100) ? \r
+ false : isLong(recordSize, overflowThreshold);\r
+\r
+ // get the no overflow case out of the way asap\r
+ if ((lastSpaceAvailable == spaceAvailable) || recordIsLong) \r
+ {\r
+ if ((insertFlag & Page.INSERT_DEFAULT) == \r
+ Page.INSERT_DEFAULT) \r
+ {\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+ }\r
+\r
+ if (recordIsLong) \r
+ {\r
+ // if the record is long because of threshold, \r
+ // then, we need to reset the logicalOut.\r
+ // set position to the end of the previous field\r
+\r
+ out.setPosition(out.getPosition() - recordSize);\r
+ }\r
+\r
+ // did not write this column\r
+ nextColumn = i;\r
+ } \r
+ else \r
+ {\r
+ // assume that all fields will be written to this page.\r
+ nextColumn = endFieldExclusive;\r
+ }\r
+\r
+ // See if we have enough room to write an overflow field if the\r
+ // row needs to overflow. We need overflow if we need to \r
+ // write another portion or another portion already exists and \r
+ // we will need to point to it.\r
+\r
+ if ((lastSpaceAvailable == spaceAvailable) ||\r
+ ((insertFlag & Page.INSERT_FOR_SPLIT) == \r
+ Page.INSERT_FOR_SPLIT)) \r
+ {\r
+ // The current row has filled the page.\r
+\r
+ if (spaceAvailable <= OVERFLOW_POINTER_SIZE) \r
+ {\r
+ if ((i == startColumn) || \r
+ (lastColumnPositionAllowOverflow < 0)) \r
+ {\r
+ // not enough room for the overflow recordheader,\r
+ // and this is the first column on this page so \r
+ // need to try another page.\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+ } \r
+ else \r
+ {\r
+ // we need to go back to the last column\r
+ // that left enough room for an overflow pointer.\r
+\r
+ out.setPosition(lastColumnPositionAllowOverflow);\r
+ nextColumn = lastColumnAllowOverflow;\r
+ }\r
+ }\r
+ }\r
+\r
+ if (nextColumn < endFieldExclusive) \r
+ {\r
+ // If the number of cols has been reduced.\r
+\r
+ int actualNumberFields = nextColumn - startColumn;\r
+\r
+ // go back and update that numberFields in recordHeader.\r
+ // no need to update spaceAvailable here, because if we are\r
+ // here, we will be returning any way, and spaceAvailable \r
+ // will be thrown away.\r
+\r
+ int oldSize = recordHeader.size();\r
+ recordHeader.setNumberFields(actualNumberFields);\r
+\r
+ int newSize = recordHeader.size();\r
+ \r
+ // now we are ready to write the new record header.\r
+ int endPosition = out.getPosition();\r
+\r
+ if (oldSize > newSize) \r
+ {\r
+ // if the old size is bigger than the new size, then \r
+ // leave extra bytes at the beginning of byte stream.\r
+\r
+ int delta = oldSize - newSize;\r
+ out.setBeginPosition(beginPosition + delta);\r
+ out.setPosition(beginPosition + delta);\r
+ } \r
+ else if (newSize > oldSize) \r
+ {\r
+ out.setPosition(beginPosition);\r
+\r
+ } \r
+ else \r
+ {\r
+ out.setBeginPosition(beginPosition);\r
+ out.setPosition(beginPosition);\r
+ }\r
+\r
+ int realLen = recordHeader.write(logicalDataOut);\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if ((realLen + (oldSize - newSize)) != oldSize)\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "recordHeader size incorrect. realLen = " + \r
+ realLen + ", delta = " + \r
+ (oldSize - newSize) + ", oldSize = " + oldSize);\r
+ }\r
+ }\r
+\r
+ out.setPosition(endPosition);\r
+\r
+ if (!forInsert) \r
+ {\r
+ // The update is incomplete, fields beyond this\r
+ // point will have to move off the page. For any fields\r
+ // that are not being updated we have to save their\r
+ // values from this page to insert into an overflow \r
+ // portion.\r
+ // \r
+ // When the complete row is being updated there is no\r
+ // need to save any fields so just return.\r
+ if (validColumns != null) \r
+ {\r
+ handleIncompleteLogRow(\r
+ slot, nextColumn, validColumns, out);\r
+ }\r
+ }\r
+\r
+ return (nextColumn);\r
+ }\r
+ \r
+ columnFlag = COLUMN_NONE;\r
+ }\r
+\r
+ out.setBeginPosition(beginPosition);\r
+ startColumn = -1;\r
+\r
+ if ((calcMinimumRecordSize) && \r
+ (spaceAvailable < (minimumRecordSize - userRowSize)))\r
+ {\r
+ throw new NoSpaceOnPage(isOverflowPage()); \r
+ }\r
+\r
+ } \r
+ finally \r
+ {\r
+ resetOutputStream();\r
+ }\r
+\r
+ return (startColumn);\r
+ }\r
+\r
+ /**\r
+ * Handle an update of a record portion that is incomplete.\r
+ * <p>\r
+ * Handle an update of a record portion that is incomplete.\r
+ * Ie. Columns have expanded that require other columns to move\r
+ * off the page into a new portion.\r
+ * <P> \r
+ * This method works out of the columns that need to be moved which are not\r
+ * being updated and makes a copy of their data. It then throws an \r
+ * exception with this data, much like the long column exception which will\r
+ * then allow the original insert to complete. \r
+ * <P> \r
+ * If no columns need to be saved (ie all the ones that would move are \r
+ * being updated) then no exception is thrown, logRow() will return and the\r
+ * update completes normally.\r
+ * <p>\r
+ *\r
+ * @param slot slot of the current update.\r
+ * @param startColumn column to start at, handles start in middle of row\r
+ * @param columnList bit map indicating which columns are being updated.\r
+ * @param out place to lot to.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private void handleIncompleteLogRow(\r
+ int slot, \r
+ int startColumn, \r
+ FormatableBitSet columnList, \r
+ DynamicByteArrayOutputStream out)\r
+ throws StandardException \r
+ {\r
+ if (SanityManager.DEBUG)\r
+ SanityManager.ASSERT(columnList != null);\r
+\r
+ StoredRecordHeader rh = getHeaderAtSlot(slot);\r
+\r
+ int endFieldExclusive = rh.getFirstField() + rh.getNumberFields();\r
+\r
+ // first see if any fields are not being modified\r
+ boolean needSave = false;\r
+ int columnListSize = columnList.size();\r
+ for (int i = startColumn; i < endFieldExclusive; i++) \r
+ {\r
+ if (!(columnListSize > i && columnList.get(i))) \r
+ {\r
+ needSave = true;\r
+ break;\r
+ }\r
+ }\r
+ if (!needSave)\r
+ return;\r
+\r
+ Object[] savedFields = \r
+ new Object[endFieldExclusive - startColumn];\r
+\r
+ ByteArrayOutputStream fieldStream = null;\r
+\r
+ for (int i = startColumn; i < endFieldExclusive; i++) \r
+ {\r
+ // row is being updated - ignore\r
+ if (columnListSize > i && columnList.get(i))\r
+ continue;\r
+\r
+ // save the data\r
+\r
+ try \r
+ {\r
+ // use the old value - we use logField to ensure that we\r
+ // get the raw contents of the field and don't follow\r
+ // any long columns. In addition we save this as a RawField\r
+ // so that we preserve the state of the field header.\r
+ if (fieldStream == null)\r
+ fieldStream = new ByteArrayOutputStream();\r
+ else\r
+ fieldStream.reset();\r
+\r
+ logField(slot, i, fieldStream);\r
+\r
+ savedFields[i - startColumn] = \r
+ new RawField(fieldStream.toByteArray());\r
+\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, ioe, getPageId()));\r
+ }\r
+ }\r
+\r
+ // Use a long column exception to notify the caller of the need\r
+ // to perform an insert of the columns that need to move\r
+\r
+ LongColumnException lce = new LongColumnException();\r
+ lce.setExceptionInfo(\r
+ out, startColumn, -1 /* indicates not actual long column */);\r
+ lce.setColumn(savedFields);\r
+\r
+ throw lce; \r
+ }\r
+\r
+ /**\r
+\r
+ @param row (IN/OUT) the row that is to be restored (sparse representation)\r
+ @param limitInput the limit input stream\r
+ @param objectInput the object input stream\r
+\r
+ @exception StandardException Standard Derby error policy\r
+ @exception IOException I/O exception in reading meta data.\r
+ */\r
+\r
+ /**\r
+ * Restore a storable row from a LimitInputStream.\r
+ * <p>\r
+ * Restore a storable row from an LimitInputStream - user must supply two \r
+ * streams on top of the same data, one implements ObjectInput interface \r
+ * that knows how to restore the object, the other one implements \r
+ * LimitInputStream.\r
+ * <p>\r
+ * @param in the limit input stream\r
+ * @param row (IN/OUT) row that is to be restored \r
+ * (sparse representation)\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public void restoreRecordFromStream(\r
+ LimitObjectInput in, \r
+ Object[] row) \r
+ throws StandardException, IOException\r
+ {\r
+\r
+ StoredRecordHeader recordHeader = new StoredRecordHeader();\r
+ recordHeader.read(in);\r
+ readRecordFromStream(\r
+ row, \r
+ row.length - 1, \r
+ (int[]) null,\r
+ (int[]) null, \r
+ in, \r
+ recordHeader,\r
+ null);\r
+ }\r
+\r
+ /**\r
+ * Process the qualifier list on the row, return true if it qualifies.\r
+ * <p>\r
+ * A two dimensional array is to be used to pass around a AND's and OR's in\r
+ * conjunctive normal form. The top slot of the 2 dimensional array is \r
+ * optimized for the more frequent where no OR's are present. The first \r
+ * array slot is always a list of AND's to be treated as described above \r
+ * for single dimensional AND qualifier arrays. The subsequent slots are \r
+ * to be treated as AND'd arrays or OR's. Thus the 2 dimensional array \r
+ * qual[][] argument is to be treated as the following, note if \r
+ * qual.length = 1 then only the first array is valid and it is and an \r
+ * array of and clauses:\r
+ *\r
+ * (qual[0][0] and qual[0][0] ... and qual[0][qual[0].length - 1])\r
+ * and\r
+ * (qual[1][0] or qual[1][1] ... or qual[1][qual[1].length - 1])\r
+ * and\r
+ * (qual[2][0] or qual[2][1] ... or qual[2][qual[2].length - 1])\r
+ * ...\r
+ * and\r
+ * (qual[qual.length - 1][0] or qual[1][1] ... or qual[1][2])\r
+ *\r
+ * \r
+ * @return true if the row qualifies.\r
+ *\r
+ * @param row The row being qualified.\r
+ * @param qual_list 2 dimensional array representing conjunctive\r
+ * normal form of simple qualifiers.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private boolean qualifyRecordFromRow(\r
+ Object[] row, \r
+ Qualifier[][] qual_list)\r
+ throws StandardException\r
+ {\r
+ boolean row_qualifies = true;\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.ASSERT(row != null);\r
+ }\r
+\r
+ // First do the qual[0] which is an array of qualifer terms.\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ // routine should not be called if there is no qualifier\r
+ SanityManager.ASSERT(qual_list != null);\r
+ SanityManager.ASSERT(qual_list.length > 0);\r
+ }\r
+\r
+ for (int i = 0; i < qual_list[0].length; i++)\r
+ {\r
+ // process each AND clause \r
+\r
+ row_qualifies = false;\r
+\r
+ // process each OR clause.\r
+\r
+ Qualifier q = qual_list[0][i];\r
+\r
+ // Get the column from the possibly partial row, of the \r
+ // q.getColumnId()'th column in the full row.\r
+ DataValueDescriptor columnValue = \r
+ (DataValueDescriptor) row[q.getColumnId()];\r
+\r
+ row_qualifies =\r
+ columnValue.compare(\r
+ q.getOperator(),\r
+ q.getOrderable(),\r
+ q.getOrderedNulls(),\r
+ q.getUnknownRV());\r
+\r
+ if (q.negateCompareResult())\r
+ row_qualifies = !row_qualifies;\r
+\r
+ // Once an AND fails the whole Qualification fails - do a return!\r
+ if (!row_qualifies)\r
+ return(false);\r
+ }\r
+\r
+ // all the qual[0] and terms passed, now process the OR clauses\r
+\r
+ for (int and_idx = 1; and_idx < qual_list.length; and_idx++)\r
+ {\r
+ // loop through each of the "and" clause.\r
+\r
+ row_qualifies = false;\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ // Each OR clause must be non-empty.\r
+ SanityManager.ASSERT(qual_list[and_idx].length > 0);\r
+ }\r
+\r
+ for (int or_idx = 0; or_idx < qual_list[and_idx].length; or_idx++)\r
+ {\r
+ // Apply one qualifier to the row.\r
+ Qualifier q = qual_list[and_idx][or_idx];\r
+ int col_id = q.getColumnId();\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.ASSERT(\r
+ (col_id < row.length),\r
+ "Qualifier is referencing a column not in the row.");\r
+ }\r
+\r
+ // Get the column from the possibly partial row, of the \r
+ // q.getColumnId()'th column in the full row.\r
+ DataValueDescriptor columnValue = \r
+ (DataValueDescriptor) row[q.getColumnId()];\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if (columnValue == null)\r
+ SanityManager.THROWASSERT(\r
+ "1:row = " + RowUtil.toString(row) +\r
+ "row.length = " + row.length +\r
+ ";q.getColumnId() = " + q.getColumnId());\r
+ }\r
+\r
+ // do the compare between the column value and value in the\r
+ // qualifier.\r
+ row_qualifies = \r
+ columnValue.compare(\r
+ q.getOperator(),\r
+ q.getOrderable(),\r
+ q.getOrderedNulls(),\r
+ q.getUnknownRV());\r
+\r
+ if (q.negateCompareResult())\r
+ row_qualifies = !row_qualifies;\r
+\r
+ // SanityManager.DEBUG_PRINT("StoredPage.qual", "processing qual[" + and_idx + "][" + or_idx + "] = " + qual_list[and_idx][or_idx] );\r
+\r
+ // SanityManager.DEBUG_PRINT("StoredPage.qual", "value = " + row_qualifies);\r
+\r
+ // processing "OR" clauses, so as soon as one is true, break\r
+ // to go and process next AND clause.\r
+ if (row_qualifies)\r
+ break;\r
+\r
+ }\r
+\r
+ // The qualifier list represented a set of "AND'd" \r
+ // qualifications so as soon as one is false processing is done.\r
+ if (!row_qualifies)\r
+ break;\r
+ }\r
+\r
+ return(row_qualifies);\r
+ }\r
+\r
+ /**\r
+ * Read just one column from stream into row.\r
+ * <p>\r
+ * The routine reads just one column from the row, it is mostly code\r
+ * taken from readRecordFromStream, but highly optimized to just get\r
+ * one column from a non-overflow row. It can only be called to read\r
+ * a row from the pageData array as it directly accesses the page array\r
+ * to avoid the Stream overhead while processing non-user data which\r
+ * does not need the limit functionality.\r
+ * <p>\r
+ * It is expected that this code will be called to read in a column \r
+ * associated with a qualifiers which are applied one column at a time, \r
+ * and has been specialized to proved the greatest peformance for \r
+ * processing qualifiers. This kind of access is done when scanning\r
+ * large datasets while applying qualifiers and thus any performance\r
+ * gain at this low level is multiplied by the large number of rows that\r
+ * may be iterated over.\r
+ * <p>\r
+ * The column is read into the object located in row[qual_colid].\r
+ *\r
+ * @param row col is read into object in row[qual_colid].\r
+ * @param offset_to_field_data offset in bytes from top of page to field\r
+ * @param colid the column id to read, colid N is row[N]\r
+ * @param recordHeader record header of row to read column from.\r
+ * @param recordToLock record handle to lock, \r
+ * used by overflow column code.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private final void readOneColumnFromPage(\r
+ Object[] row, \r
+ int colid,\r
+ int offset_to_field_data,\r
+ StoredRecordHeader recordHeader,\r
+ RecordHandle recordToLock)\r
+ throws StandardException, IOException\r
+ {\r
+ ErrorObjectInput inUserCode = null;\r
+\r
+ // Reads in this routine are always against the raw data in the \r
+ // pageData array, thus it can assume array access to page data array.\r
+ ArrayInputStream lrdi = rawDataIn;\r
+\r
+ try\r
+ {\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if (colid >= row.length)\r
+ SanityManager.THROWASSERT(\r
+ "colid = " + colid +\r
+ ";row length = " + row.length);\r
+\r
+ // currently this routine will not work on long rows.\r
+ if (recordHeader.getFirstField() != 0)\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "recordHeader.getFirstField() = " +\r
+ recordHeader.getFirstField());\r
+ }\r
+ }\r
+\r
+ Object column = row[colid];\r
+\r
+ // if the column id exists on this page.\r
+ if (colid <= (recordHeader.getNumberFields() - 1))\r
+ {\r
+ // skip the fields before colid, the column in question\r
+ // existent on this page.\r
+\r
+ for (int columnId = colid; columnId > 0; columnId--)\r
+ {\r
+ offset_to_field_data += \r
+ StoredFieldHeader.readTotalFieldLength(\r
+ pageData, offset_to_field_data);\r
+ }\r
+\r
+\r
+\r
+ // read the field header\r
+\r
+ // read the status byte.\r
+ int fieldStatus = \r
+ StoredFieldHeader.readStatus(\r
+ pageData, offset_to_field_data);\r
+\r
+ // read the field data length, and position on 1st byte of data.\r
+ int fieldDataLength = \r
+ StoredFieldHeader.readFieldLengthAndSetStreamPosition(\r
+ pageData, \r
+ offset_to_field_data + \r
+ StoredFieldHeader.STORED_FIELD_HEADER_STATUS_SIZE,\r
+ fieldStatus,\r
+ slotFieldSize,\r
+ lrdi);\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(\r
+ !StoredFieldHeader.isExtensible(fieldStatus), \r
+ "extensible fields not supported yet");\r
+ }\r
+\r
+ // SRW-DJD code assumes non-extensible case ...\r
+\r
+ if (!StoredFieldHeader.isNonexistent(fieldStatus))\r
+ {\r
+ boolean isOverflow = \r
+ StoredFieldHeader.isOverflow(fieldStatus);\r
+\r
+ OverflowInputStream overflowIn = null;\r
+\r
+ if (isOverflow) \r
+ {\r
+ // A fetched long column is returned as a stream\r
+ long overflowPage = \r
+ CompressedNumber.readLong((InputStream) lrdi);\r
+\r
+ int overflowId = \r
+ CompressedNumber.readInt((InputStream) lrdi);\r
+\r
+ // Prepare the stream for results...\r
+ // create the byteHolder the size of a page, so, that it \r
+ // will fit the field Data that would fit on a page.\r
+ MemByteHolder byteHolder = \r
+ new MemByteHolder(pageData.length);\r
+\r
+ overflowIn = new OverflowInputStream(\r
+ byteHolder, owner, overflowPage, \r
+ overflowId, recordToLock);\r
+ }\r
+\r
+ // Deal with Storable columns\r
+ if (column instanceof DataValueDescriptor) \r
+ {\r
+ DataValueDescriptor sColumn = \r
+ (DataValueDescriptor) column;\r
+\r
+ // is the column null ?\r
+ if (StoredFieldHeader.isNull(fieldStatus)) \r
+ {\r
+ sColumn.restoreToNull();\r
+ }\r
+ else\r
+ {\r
+ // set the limit for the user read\r
+ if (!isOverflow) \r
+ {\r
+ // normal, non-overflow column case.\r
+\r
+ lrdi.setLimit(fieldDataLength);\r
+ inUserCode = lrdi;\r
+ sColumn.readExternalFromArray(lrdi);\r
+ inUserCode = null;\r
+ int unread = lrdi.clearLimit();\r
+ if (unread != 0)\r
+ lrdi.skipBytes(unread);\r
+ }\r
+ else\r
+ {\r
+ // fetched column is a Storable long column.\r
+\r
+ FormatIdInputStream newIn = \r
+ new FormatIdInputStream(overflowIn);\r
+\r
+ if ((sColumn instanceof StreamStorable)) \r
+ {\r
+ ((StreamStorable)sColumn).setStream(newIn);\r
+ } \r
+ else \r
+ {\r
+ inUserCode = newIn;\r
+ sColumn.readExternal(newIn);\r
+ inUserCode = null;\r
+ }\r
+ } \r
+ }\r
+ }\r
+ else\r
+ {\r
+ // At this point only non-Storable columns.\r
+\r
+ if (StoredFieldHeader.isNull(fieldStatus))\r
+ {\r
+ // Only Storables can be null ...\r
+\r
+ throw StandardException.newException(\r
+ SQLState.DATA_NULL_STORABLE_COLUMN,\r
+ Integer.toString(colid));\r
+ }\r
+\r
+ // This is a non-extensible field, which means the \r
+ // caller must know the correct type and thus the \r
+ // element in row is the correct type or null. It must \r
+ // be Serializable.\r
+ //\r
+ // We do not support Externalizable here.\r
+\r
+ lrdi.setLimit(fieldDataLength);\r
+ inUserCode = lrdi;\r
+ // RESOLVE (no non-storables?)\r
+ row[colid] = (Object) lrdi.readObject();\r
+ inUserCode = null;\r
+ int unread = lrdi.clearLimit();\r
+ if (unread != 0)\r
+ lrdi.skipBytes(unread);\r
+ }\r
+\r
+ }\r
+ else\r
+ {\r
+ // column does not exist in the row, return null.\r
+\r
+ // field is non-existent\r
+\r
+ if (column instanceof DataValueDescriptor) \r
+ {\r
+ // RESOLVE - This is in place for 1.2. In the future\r
+ // we may want to return this column as non-existent\r
+ // even if it is a storable column, or maybe use a\r
+ // supplied default.\r
+\r
+ ((DataValueDescriptor) column).restoreToNull();\r
+ } \r
+ else \r
+ {\r
+ row[colid] = null;\r
+ }\r
+ }\r
+ }\r
+ else\r
+ {\r
+ // field does not exist on this page.\r
+\r
+ if (column instanceof DataValueDescriptor) \r
+ {\r
+ // RESOLVE - This is in place for 1.2. In the future\r
+ // we may want to return this column as non-existent\r
+ // even if it is a storable column, or maybe use a \r
+ // supplied default.\r
+ ((DataValueDescriptor) column).restoreToNull();\r
+ } \r
+ else \r
+ {\r
+ row[colid] = null;\r
+ }\r
+ }\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ // an exception during the restore of a user column, this doesn't\r
+ // make the database corrupt, just that this field is inaccessable\r
+\r
+ if (inUserCode != null) \r
+ {\r
+ lrdi.clearLimit();\r
+\r
+ if (ioe instanceof EOFException) \r
+ {\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.DEBUG_PRINT("DEBUG_TRACE",\r
+ "StoredPage.readOneColumnFromPage - EOF while restoring record: " +\r
+ recordHeader +\r
+ "Page dump = " + this);\r
+ SanityManager.showTrace(ioe);\r
+ }\r
+\r
+ // going beyond the limit in a DataInput class results in\r
+ // an EOFException when it sees the -1 from a read\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_MISMATCH,\r
+ ioe, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ // some SQLData error reporting\r
+ Exception ne = inUserCode.getNestedException();\r
+ if (ne != null)\r
+ {\r
+ if (ne instanceof InstantiationException)\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION,\r
+ ne, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ if (ne instanceof IllegalAccessException)\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION,\r
+ ne, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ if (ne instanceof StandardException)\r
+ {\r
+ throw (StandardException) ne;\r
+ }\r
+ }\r
+\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_EXCEPTION,\r
+ ioe, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ // re-throw to higher levels so they can put it in correct context.\r
+ throw ioe;\r
+\r
+ } \r
+ catch (ClassNotFoundException cnfe) \r
+ {\r
+ lrdi.clearLimit();\r
+\r
+ // an exception during the restore of a user column, this doesn't\r
+ // make the database corrupt, just that this field is inaccessable\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_MISSING_CLASS,\r
+ cnfe, inUserCode.getErrorInfo());\r
+\r
+ } \r
+ catch (LinkageError le)\r
+ {\r
+ // Some error during the link of a user class\r
+ if (inUserCode != null)\r
+ {\r
+ lrdi.clearLimit();\r
+\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_EXCEPTION,\r
+ le, inUserCode.getErrorInfo());\r
+ }\r
+ throw le;\r
+ }\r
+\r
+ }\r
+\r
+\r
+\r
+ /**\r
+ * Process the list of qualifiers on the row in the stream.\r
+ * <p>\r
+ * The rawDataIn stream is expected to be positioned after the record \r
+ * header.\r
+ * <p>\r
+ * Check all qualifiers in the qualifier array against row. Return true\r
+ * if all compares specified by the qualifier array return true, else\r
+ * return false.\r
+ * <p>\r
+ * This routine assumes client caller has already checked if the row\r
+ * is deleted or not. The row that it get's is expected to match\r
+ * the partial column list of the scan. \r
+ * <p>\r
+ * On entering this routine the stream should be positioned to the\r
+ * beginning of the row data, just after the row header. On exit\r
+ * the stream will also be positioned there.\r
+ *\r
+ * A two dimensional array is to be used to pass around a AND's and OR's in\r
+ * conjunctive normal form. The top slot of the 2 dimensional array is \r
+ * optimized for the more frequent where no OR's are present. The first \r
+ * array slot is always a list of AND's to be treated as described above \r
+ * for single dimensional AND qualifier arrays. The subsequent slots are \r
+ * to be treated as AND'd arrays or OR's. Thus the 2 dimensional array \r
+ * qual[][] argument is to be treated as the following, note if \r
+ * qual.length = 1 then only the first array is valid and it is and an \r
+ * array of and clauses:\r
+ *\r
+ * (qual[0][0] and qual[0][0] ... and qual[0][qual[0].length - 1])\r
+ * and\r
+ * (qual[1][0] or qual[1][1] ... or qual[1][qual[1].length - 1])\r
+ * and\r
+ * (qual[2][0] or qual[2][1] ... or qual[2][qual[2].length - 1])\r
+ * ...\r
+ * and\r
+ * (qual[qual.length - 1][0] or qual[1][1] ... or qual[1][2])\r
+ *\r
+ * @return Whether or not the row input qualifies.\r
+ *\r
+ * @param row restore row into this object array.\r
+ * @param offset_to_row_data offset in bytes from top of page to row\r
+ * @param fetchDesc Description of fetch including which cols\r
+ * and qualifiers.\r
+ * @param recordHeader The record header of the row, it was read \r
+ * in from stream and dataIn is positioned \r
+ * after it.\r
+ * @param recordToLock The head row to use for locking, used to \r
+ * lock head row of overflow columns/rows.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private final boolean qualifyRecordFromSlot(\r
+ Object[] row, \r
+ int offset_to_row_data,\r
+ FetchDescriptor fetchDesc,\r
+ StoredRecordHeader recordHeader,\r
+ RecordHandle recordToLock)\r
+ throws StandardException, IOException\r
+ {\r
+ boolean row_qualifies = true;\r
+ Qualifier[][] qual_list = fetchDesc.getQualifierList();\r
+ int[] materializedCols = fetchDesc.getMaterializedColumns();\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.ASSERT(qual_list != null, "Not coded yet!");\r
+ }\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.ASSERT(row != null);\r
+ }\r
+\r
+ // First process the initial list of AND's in the 1st array\r
+\r
+ for (int i = 0; i < qual_list[0].length; i++)\r
+ {\r
+ // process each AND clause \r
+\r
+ row_qualifies = false;\r
+\r
+ // Apply one qualifier to the row.\r
+ Qualifier q = qual_list[0][i];\r
+ int col_id = q.getColumnId();\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.ASSERT(\r
+ (col_id < row.length),\r
+ "Qualifier is referencing a column not in the row.");\r
+ }\r
+\r
+ // materialize the column object if we haven't done it yet.\r
+ if (materializedCols[col_id] == 0)\r
+ {\r
+ // materialize just this column from the row, no qualifiers\r
+ readOneColumnFromPage(\r
+ row, \r
+ col_id,\r
+ offset_to_row_data,\r
+ recordHeader,\r
+ recordToLock);\r
+\r
+ // mark offset, indicating the row has been read in.\r
+ //\r
+ // RESOLVE (mikem) - right now value of entry is useless, it\r
+ // is an int so that in the future we could cache the offset\r
+ // to fields to improve performance of getting to a column \r
+ // after qualifying.\r
+ materializedCols[col_id] = offset_to_row_data;\r
+ }\r
+\r
+ // Get the column from the possibly partial row, of the \r
+ // q.getColumnId()'th column in the full row.\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if (row[col_id] == null)\r
+ SanityManager.THROWASSERT(\r
+ "1:row = " + RowUtil.toString(row) +\r
+ "row.length = " + row.length +\r
+ ";q.getColumnId() = " + q.getColumnId());\r
+ }\r
+\r
+ // do the compare between the column value and value in the\r
+ // qualifier.\r
+ row_qualifies = \r
+ ((DataValueDescriptor) row[col_id]).compare(\r
+ q.getOperator(),\r
+ q.getOrderable(),\r
+ q.getOrderedNulls(),\r
+ q.getUnknownRV());\r
+\r
+ if (q.negateCompareResult())\r
+ row_qualifies = !row_qualifies;\r
+\r
+ // Once an AND fails the whole Qualification fails - do a return!\r
+ if (!row_qualifies)\r
+ return(false);\r
+ }\r
+\r
+ // Now process the Subsequent OR clause's, beginning with qual_list[1]\r
+\r
+ for (int and_idx = 1; and_idx < qual_list.length; and_idx++)\r
+ {\r
+ // loop through each of the "and" clause.\r
+\r
+ row_qualifies = false;\r
+\r
+ for (int or_idx = 0; or_idx < qual_list[and_idx].length; or_idx++)\r
+ {\r
+ // Apply one qualifier to the row.\r
+ Qualifier q = qual_list[and_idx][or_idx];\r
+ int col_id = q.getColumnId();\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.ASSERT(\r
+ (col_id < row.length),\r
+ "Qualifier is referencing a column not in the row.");\r
+ }\r
+\r
+ // materialize the column object if we haven't done it yet.\r
+ if (materializedCols[col_id] == 0)\r
+ {\r
+ // materialize just this column from the row, no qualifiers\r
+ readOneColumnFromPage(\r
+ row, \r
+ col_id,\r
+ offset_to_row_data,\r
+ recordHeader,\r
+ recordToLock);\r
+\r
+ // mark offset, indicating the row has been read in.\r
+ //\r
+ // RESOLVE (mikem) - right now value of entry is useless, it\r
+ // is an int so that in the future we could cache the offset\r
+ // to fields to improve performance of getting to a column \r
+ // after qualifying.\r
+ materializedCols[col_id] = offset_to_row_data;\r
+ }\r
+\r
+ // Get the column from the possibly partial row, of the \r
+ // q.getColumnId()'th column in the full row.\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if (row[col_id] == null)\r
+ SanityManager.THROWASSERT(\r
+ "1:row = " + RowUtil.toString(row) +\r
+ "row.length = " + row.length +\r
+ ";q.getColumnId() = " + q.getColumnId());\r
+ }\r
+\r
+ // do the compare between the column value and value in the\r
+ // qualifier.\r
+ row_qualifies = \r
+ ((DataValueDescriptor) row[col_id]).compare(\r
+ q.getOperator(),\r
+ q.getOrderable(),\r
+ q.getOrderedNulls(),\r
+ q.getUnknownRV());\r
+\r
+\r
+ if (q.negateCompareResult())\r
+ row_qualifies = !row_qualifies;\r
+\r
+ // SanityManager.DEBUG_PRINT("StoredPage.qual", "processing qual[" + and_idx + "][" + or_idx + "] = " + qual_list[and_idx][or_idx] );\r
+\r
+ // SanityManager.DEBUG_PRINT("StoredPage.qual", "value = " + row_qualifies);\r
+\r
+ // processing "OR" clauses, so as soon as one is true, break\r
+ // to go and process next AND clause.\r
+ if (row_qualifies)\r
+ break;\r
+\r
+ }\r
+\r
+ // The qualifier list represented a set of "AND'd" \r
+ // qualifications so as soon as one is false processing is done.\r
+ if (!row_qualifies)\r
+ break;\r
+ }\r
+\r
+ return(row_qualifies);\r
+ }\r
+\r
+ /**\r
+ * restore a record from a stream.\r
+ * <p>\r
+ * The rawDataIn stream is expected to be positioned after the record \r
+ * header.\r
+ *\r
+ * @return The identifier to be used to open the conglomerate later.\r
+ *\r
+ * @param row restore row into this object array.\r
+ * @param max_colid The maximum numbered column id that will be \r
+ * requested by caller. It should be:\r
+ * min(row.length - 1, maximum bit set in vCols)\r
+ * It is used to stop the inner most loop from \r
+ * looking at more columns in the row.\r
+ * @param vCols If not null, bit map indicates valid cols.\r
+ * @param mCols If not null, int array indicates columns already\r
+ * read in from the stream. A non-zero entry \r
+ * means the column has already been read in.\r
+ * @param dataIn restore row from this stream.\r
+ * @param recordHeader The record header of the row, it was read in \r
+ * from stream and dataIn is positioned after it.\r
+ * @param recordToLock The head row to use for locking, used to lock \r
+ * head row of overflow columns/rows.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private final boolean readRecordFromStream(\r
+ Object[] row, \r
+ int max_colid,\r
+ int[] vCols,\r
+ int[] mCols,\r
+ LimitObjectInput dataIn, \r
+ StoredRecordHeader recordHeader,\r
+ RecordHandle recordToLock)\r
+ throws StandardException, IOException\r
+ {\r
+ ErrorObjectInput inUserCode = null;\r
+ try\r
+ {\r
+ // Get the number of columns in the row.\r
+ int numberFields = recordHeader.getNumberFields();\r
+\r
+ int startColumn = recordHeader.getFirstField();\r
+\r
+ if (startColumn > max_colid)\r
+ {\r
+ // done if the startColumn is higher than highest column.\r
+ return true;\r
+ }\r
+\r
+ // For each column in the row, restore the column from\r
+ // the corresponding field in the record. If the field\r
+ // is missing or not set, set the column to null.\r
+\r
+ int highestColumnOnPage = numberFields + startColumn;\r
+\r
+ int vColsSize = (vCols == null ) ? 0 : vCols.length;\r
+\r
+ for (int columnId = startColumn; columnId <= max_colid; columnId++) \r
+ {\r
+ // skip any "existing" columns not requested, or requested cols\r
+ // that have already been read.\r
+ if (((vCols != null) && \r
+ (!(vColsSize > columnId && (vCols[columnId] != 0)))) ||\r
+ ((mCols != null) && (mCols[columnId] != 0)))\r
+ {\r
+ if (columnId < highestColumnOnPage)\r
+ {\r
+ // If the field exists in the row on the page, but the\r
+ // partial row being returned does not include it,\r
+ // skip the field ...\r
+ \r
+ skipField(dataIn);\r
+ }\r
+\r
+ continue;\r
+ }\r
+\r
+ // See if the column identifier is beyond the number of fields\r
+ // that this record has\r
+ if (columnId >= highestColumnOnPage) \r
+ {\r
+ // field is non-existent\r
+ Object column = row[columnId];\r
+\r
+ if (column instanceof DataValueDescriptor) \r
+ {\r
+ // RESOLVE - This is in place for 1.2. In the future\r
+ // we may want to return this column as non-existent\r
+ // even if it is a storable column, or maybe use a\r
+ // supplied default.\r
+\r
+ ((DataValueDescriptor) column).restoreToNull();\r
+ } \r
+ else \r
+ {\r
+ row[columnId] = null;\r
+ }\r
+ continue;\r
+ }\r
+\r
+ // read the field header\r
+ int fieldStatus = \r
+ StoredFieldHeader.readStatus(dataIn);\r
+\r
+ int fieldDataLength = \r
+ StoredFieldHeader.readFieldDataLength(\r
+ dataIn, fieldStatus, slotFieldSize);\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(\r
+ !StoredFieldHeader.isExtensible(fieldStatus), \r
+ "extensible fields not supported yet");\r
+ }\r
+\r
+ Object column = row[columnId];\r
+\r
+ OverflowInputStream overflowIn = null;\r
+\r
+ // SRW-DJD code assumes non-extensible case ...\r
+\r
+ // field is non-existent, return null\r
+ if (StoredFieldHeader.isNonexistent(fieldStatus)) \r
+ {\r
+\r
+ if (column instanceof DataValueDescriptor) \r
+ {\r
+ // RESOLVE - This is in place for 1.2. In the future\r
+ // we may want to return this column as non-existent\r
+ // even if it is a storable column, or maybe use a \r
+ // supplied default.\r
+ ((DataValueDescriptor) column).restoreToNull();\r
+ } \r
+ else \r
+ {\r
+ row[columnId] = null;\r
+ }\r
+ continue;\r
+ }\r
+\r
+ boolean isOverflow = StoredFieldHeader.isOverflow(fieldStatus);\r
+\r
+ if (isOverflow) \r
+ {\r
+\r
+ // A fetched long column needs to be returned as a stream\r
+ //\r
+ long overflowPage = \r
+ CompressedNumber.readLong((InputStream) dataIn);\r
+\r
+ int overflowId = \r
+ CompressedNumber.readInt((InputStream) dataIn);\r
+\r
+ // Prepare the stream for results...\r
+ // create the byteHolder the size of a page, so, that it \r
+ // will fit the field Data that would fit on a page.\r
+ MemByteHolder byteHolder = \r
+ new MemByteHolder(pageData.length);\r
+\r
+ overflowIn = new OverflowInputStream(\r
+ byteHolder, owner, overflowPage, \r
+ overflowId, recordToLock);\r
+ }\r
+\r
+ // Deal with Object columns\r
+ if (column instanceof DataValueDescriptor) \r
+ {\r
+ DataValueDescriptor sColumn = (DataValueDescriptor) column;\r
+\r
+ // is the column null ?\r
+ if (StoredFieldHeader.isNull(fieldStatus)) \r
+ {\r
+ sColumn.restoreToNull();\r
+ continue;\r
+ }\r
+\r
+ // set the limit for the user read\r
+ if (!isOverflow) \r
+ {\r
+ // normal, non-overflow column case.\r
+\r
+ dataIn.setLimit(fieldDataLength);\r
+ inUserCode = dataIn;\r
+ sColumn.readExternal(dataIn);\r
+ inUserCode = null;\r
+ int unread = dataIn.clearLimit();\r
+ if (unread != 0)\r
+ dataIn.skipBytes(unread);\r
+ }\r
+ else\r
+ {\r
+ // column being fetched is a Object long column.\r
+\r
+ FormatIdInputStream newIn = \r
+ new FormatIdInputStream(overflowIn);\r
+\r
+ // if a column is a long column, store recommends user \r
+ // fetch it as a stream.\r
+ boolean fetchStream = true;\r
+\r
+ if (!(sColumn instanceof StreamStorable)) \r
+ {\r
+ fetchStream = false;\r
+ }\r
+\r
+ if (fetchStream) \r
+ {\r
+ ((StreamStorable)sColumn).setStream(newIn);\r
+ } \r
+ else \r
+ {\r
+ inUserCode = newIn;\r
+ sColumn.readExternal(newIn);\r
+ inUserCode = null;\r
+ }\r
+\r
+ } \r
+\r
+ continue;\r
+ }\r
+\r
+ // At this point only non-Storable columns.\r
+\r
+ if (StoredFieldHeader.isNull(fieldStatus))\r
+ {\r
+ // Only Storables can be null ...\r
+\r
+ throw StandardException.newException(\r
+ SQLState.DATA_NULL_STORABLE_COLUMN,\r
+ Integer.toString(columnId));\r
+ }\r
+\r
+ // This is a non-extensible field, which means the caller must \r
+ // know the correct type and thus the element in row is the \r
+ // correct type or null. It must be Serializable.\r
+ //\r
+ // We do not support Externalizable here.\r
+\r
+ dataIn.setLimit(fieldDataLength);\r
+ inUserCode = dataIn;\r
+ row[columnId] = (Object) dataIn.readObject();\r
+ inUserCode = null;\r
+ int unread = dataIn.clearLimit();\r
+ if (unread != 0)\r
+ dataIn.skipBytes(unread);\r
+\r
+ continue;\r
+ }\r
+\r
+ // if the last column on this page is bigger than the highest \r
+ // column we are looking for, then we are done restoring the record.\r
+\r
+ if ((numberFields + startColumn) > max_colid)\r
+ return true;\r
+ else\r
+ return false;\r
+\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ // an exception during the restore of a user column, this doesn't\r
+ // make the database corrupt, just that this field is inaccessable\r
+\r
+ if (inUserCode != null) \r
+ {\r
+ dataIn.clearLimit();\r
+\r
+ if (ioe instanceof EOFException) \r
+ {\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.DEBUG_PRINT("DEBUG_TRACE",\r
+ "StoredPage - EOF while restoring record: " +\r
+ recordHeader +\r
+ "Page dump = " + this);\r
+ }\r
+\r
+ // going beyond the limit in a DataInput class results in\r
+ // an EOFException when it sees the -1 from a read\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_MISMATCH,\r
+ ioe, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ // some SQLData error reporting\r
+ Exception ne = inUserCode.getNestedException();\r
+ if (ne != null)\r
+ {\r
+ if (ne instanceof InstantiationException)\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION,\r
+ ne, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ if (ne instanceof IllegalAccessException)\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION,\r
+ ne, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ if (ne instanceof StandardException)\r
+ {\r
+ throw (StandardException) ne;\r
+ }\r
+ }\r
+\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_EXCEPTION,\r
+ ioe, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ // re-throw to higher levels so they can put it in correct context.\r
+ throw ioe;\r
+\r
+ } \r
+ catch (ClassNotFoundException cnfe) \r
+ {\r
+ dataIn.clearLimit();\r
+\r
+ // an exception during the restore of a user column, this doesn't\r
+ // make the database corrupt, just that this field is inaccessable\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_MISSING_CLASS,\r
+ cnfe, inUserCode.getErrorInfo());\r
+\r
+ } \r
+ catch (LinkageError le)\r
+ {\r
+ // Some error during the link of a user class\r
+ if (inUserCode != null)\r
+ {\r
+ dataIn.clearLimit();\r
+\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_EXCEPTION,\r
+ le, inUserCode.getErrorInfo());\r
+ }\r
+ throw le;\r
+ }\r
+\r
+ }\r
+\r
+ private final boolean readRecordFromArray(\r
+ Object[] row, \r
+ int max_colid,\r
+ int[] vCols,\r
+ int[] mCols,\r
+ ArrayInputStream dataIn, \r
+ StoredRecordHeader recordHeader,\r
+ RecordHandle recordToLock)\r
+ throws StandardException, IOException\r
+ {\r
+ ErrorObjectInput inUserCode = null;\r
+ try\r
+ {\r
+ // Get the number of columns in the row.\r
+ int numberFields = recordHeader.getNumberFields();\r
+\r
+ int startColumn = recordHeader.getFirstField();\r
+\r
+ if (startColumn > max_colid)\r
+ {\r
+ // done if the startColumn is higher than highest column.\r
+ return true;\r
+ }\r
+\r
+ // For each column in the row, restore the column from\r
+ // the corresponding field in the record. If the field\r
+ // is missing or not set, set the column to null.\r
+\r
+ int highestColumnOnPage = numberFields + startColumn;\r
+\r
+ int vColsSize = (vCols == null ) ? 0 : vCols.length;\r
+\r
+ int offset_to_field_data = dataIn.getPosition();\r
+\r
+ for (int columnId = startColumn; columnId <= max_colid; columnId++) \r
+ {\r
+ // skip any "existing" columns not requested, or requested cols\r
+ // that have already been read.\r
+ if (((vCols != null) && \r
+ (!(vColsSize > columnId && (vCols[columnId] != 0)))) ||\r
+ ((mCols != null) && (mCols[columnId] != 0)))\r
+ {\r
+ if (columnId < highestColumnOnPage)\r
+ {\r
+ // If the field exists in the row on the page, but the\r
+ // partial row being returned does not include it,\r
+ // skip the field ...\r
+ \r
+ offset_to_field_data += \r
+ StoredFieldHeader.readTotalFieldLength(\r
+ pageData, offset_to_field_data);\r
+ }\r
+\r
+ continue;\r
+ }\r
+ else if (columnId < highestColumnOnPage) \r
+ {\r
+ // the column is on this page.\r
+\r
+ // read the field header\r
+\r
+ // read the status byte.\r
+ int fieldStatus = \r
+ StoredFieldHeader.readStatus(\r
+ pageData, offset_to_field_data);\r
+\r
+ // read the field data length, position on 1st byte of data\r
+ int fieldDataLength = \r
+ StoredFieldHeader.readFieldLengthAndSetStreamPosition(\r
+ pageData, \r
+ offset_to_field_data + \r
+ StoredFieldHeader.STORED_FIELD_HEADER_STATUS_SIZE,\r
+ fieldStatus,\r
+ slotFieldSize,\r
+ dataIn);\r
+\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(\r
+ !StoredFieldHeader.isExtensible(fieldStatus), \r
+ "extensible fields not supported yet");\r
+ }\r
+\r
+ Object column = row[columnId];\r
+\r
+ OverflowInputStream overflowIn = null;\r
+\r
+ // SRW-DJD code assumes non-extensible case ...\r
+\r
+ if ((fieldStatus & StoredFieldHeader.FIELD_NONEXISTENT) != \r
+ StoredFieldHeader.FIELD_NONEXISTENT)\r
+ {\r
+ // normal path - field exists.\r
+\r
+ boolean isOverflow = \r
+ ((fieldStatus & \r
+ StoredFieldHeader.FIELD_OVERFLOW) != 0);\r
+\r
+ if (isOverflow) \r
+ {\r
+\r
+ // A fetched long column is returned as a stream\r
+\r
+ long overflowPage = \r
+ CompressedNumber.readLong((InputStream) dataIn);\r
+\r
+ int overflowId = \r
+ CompressedNumber.readInt((InputStream) dataIn);\r
+\r
+ // Prepare the stream for results...\r
+ // create the byteHolder the size of a page, so, \r
+ // that it will fit the field Data that would fit \r
+ // on a page.\r
+\r
+ MemByteHolder byteHolder = \r
+ new MemByteHolder(pageData.length);\r
+\r
+ overflowIn = new OverflowInputStream(\r
+ byteHolder, owner, overflowPage, \r
+ overflowId, recordToLock);\r
+ }\r
+\r
+ // Deal with Object columns\r
+ if (column instanceof DataValueDescriptor) \r
+ {\r
+ DataValueDescriptor sColumn = \r
+ (DataValueDescriptor) column;\r
+\r
+ // is the column null ?\r
+ if ((fieldStatus & \r
+ StoredFieldHeader.FIELD_NULL) == 0)\r
+ {\r
+ // the field is not null.\r
+\r
+ // set the limit for the user read\r
+ if (!isOverflow) \r
+ {\r
+ // normal, non-overflow column case.\r
+\r
+ dataIn.setLimit(fieldDataLength);\r
+ inUserCode = dataIn;\r
+ sColumn.readExternalFromArray(dataIn);\r
+ inUserCode = null;\r
+ int unread = dataIn.clearLimit();\r
+ if (unread != 0)\r
+ dataIn.skipBytes(unread);\r
+ }\r
+ else\r
+ {\r
+ // column being fetched is a long column.\r
+\r
+ FormatIdInputStream newIn = \r
+ new FormatIdInputStream(overflowIn);\r
+\r
+ // long columns are fetched as a stream.\r
+\r
+ boolean fetchStream = true;\r
+\r
+ if (!(sColumn instanceof StreamStorable)) \r
+ {\r
+ fetchStream = false;\r
+ }\r
+\r
+ if (fetchStream) \r
+ {\r
+ ((StreamStorable) sColumn).setStream(\r
+ newIn);\r
+ } \r
+ else \r
+ {\r
+ inUserCode = newIn;\r
+ sColumn.readExternal(newIn);\r
+ inUserCode = null;\r
+ }\r
+ } \r
+ }\r
+ else\r
+ {\r
+ sColumn.restoreToNull();\r
+ }\r
+\r
+ }\r
+ else\r
+ {\r
+\r
+ // At this point only non-Storable columns.\r
+\r
+ if (StoredFieldHeader.isNull(fieldStatus))\r
+ {\r
+ // Only Storables can be null ...\r
+\r
+ throw StandardException.newException(\r
+ SQLState.DATA_NULL_STORABLE_COLUMN,\r
+ Integer.toString(columnId));\r
+ }\r
+\r
+ // This is a non-extensible field, which means the \r
+ // caller must know the correct type and thus the \r
+ // element in row is the correct type or null. It \r
+ // must be Serializable.\r
+ //\r
+ // We do not support Externalizable here.\r
+\r
+ dataIn.setLimit(fieldDataLength);\r
+ inUserCode = dataIn;\r
+ // RESOLVE (no non-storables?)\r
+ row[columnId] = (Object) dataIn.readObject();\r
+ inUserCode = null;\r
+ int unread = dataIn.clearLimit();\r
+ if (unread != 0)\r
+ dataIn.skipBytes(unread);\r
+ }\r
+ }\r
+ else\r
+ {\r
+ // column is non-existent.\r
+\r
+ if (column instanceof DataValueDescriptor) \r
+ {\r
+ // RESOLVE - This is in place for 1.2. In the future\r
+ // we may want to return this column as non-existent\r
+ // even if it is a storable column, or maybe use a \r
+ // supplied default.\r
+ ((DataValueDescriptor) column).restoreToNull();\r
+ } \r
+ else \r
+ {\r
+ row[columnId] = null;\r
+ }\r
+ }\r
+\r
+ // move the counter to point to beginning of next field.\r
+ offset_to_field_data = dataIn.getPosition();\r
+ }\r
+ else\r
+ {\r
+ // field is non-existent\r
+ Object column = row[columnId];\r
+\r
+ if (column instanceof DataValueDescriptor) \r
+ {\r
+ // RESOLVE - This is in place for 1.2. In the future\r
+ // we may want to return this column as non-existent\r
+ // even if it is a storable column, or maybe use a\r
+ // supplied default.\r
+\r
+ ((DataValueDescriptor) column).restoreToNull();\r
+ } \r
+ else \r
+ {\r
+ row[columnId] = null;\r
+ }\r
+ }\r
+ }\r
+\r
+ // if the last column on this page is bigger than the highest \r
+ // column we are looking for, then we are done restoring the record.\r
+\r
+ if ((numberFields + startColumn) > max_colid)\r
+ return true;\r
+ else\r
+ return false;\r
+\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ // an exception during the restore of a user column, this doesn't\r
+ // make the database corrupt, just that this field is inaccessable\r
+\r
+ if (inUserCode != null) \r
+ {\r
+ dataIn.clearLimit();\r
+\r
+ if (ioe instanceof EOFException) \r
+ {\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.DEBUG_PRINT("DEBUG_TRACE",\r
+ "StoredPage - EOF while restoring record: " +\r
+ recordHeader +\r
+ "Page dump = " + this);\r
+ }\r
+\r
+ // going beyond the limit in a DataInput class results in\r
+ // an EOFException when it sees the -1 from a read\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_MISMATCH,\r
+ ioe, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ // some SQLData error reporting\r
+ Exception ne = inUserCode.getNestedException();\r
+ if (ne != null)\r
+ {\r
+ if (ne instanceof InstantiationException)\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_SQLDATA_READ_INSTANTIATION_EXCEPTION,\r
+ ne, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ if (ne instanceof IllegalAccessException)\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_SQLDATA_READ_ILLEGAL_ACCESS_EXCEPTION,\r
+ ne, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ if (ne instanceof StandardException)\r
+ {\r
+ throw (StandardException) ne;\r
+ }\r
+ }\r
+\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_EXCEPTION,\r
+ ioe, inUserCode.getErrorInfo());\r
+ }\r
+\r
+ // re-throw to higher levels so they can put it in correct context.\r
+ throw ioe;\r
+\r
+ } \r
+ catch (ClassNotFoundException cnfe) \r
+ {\r
+ dataIn.clearLimit();\r
+\r
+ // an exception during the restore of a user column, this doesn't\r
+ // make the database corrupt, just that this field is inaccessable\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_MISSING_CLASS,\r
+ cnfe, inUserCode.getErrorInfo());\r
+\r
+ } \r
+ catch (LinkageError le)\r
+ {\r
+ // Some error during the link of a user class\r
+ if (inUserCode != null)\r
+ {\r
+ dataIn.clearLimit();\r
+\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_READ_EXCEPTION,\r
+ le, inUserCode.getErrorInfo());\r
+ }\r
+ throw le;\r
+ }\r
+\r
+ }\r
+\r
+ /**\r
+ * Restore a portion of a long column.\r
+ * <p>\r
+ * Restore a portion of a long column - user must supply two streams on top\r
+ * of the same data, one implements ObjectInput interface that knows how to\r
+ * restore the object, the other one implements LimitInputStream.\r
+ *\r
+ * @param fetchStream the stream to read the next portion of long col from\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ public void restorePortionLongColumn(\r
+ OverflowInputStream fetchStream)\r
+ throws StandardException, IOException\r
+ {\r
+ int slot = \r
+ findRecordById(fetchStream.getOverflowId(), FIRST_SLOT_NUMBER);\r
+\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+\r
+ int offset = getRecordOffset(slot);\r
+ int numberFields = recordHeader.getNumberFields();\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ if ((numberFields > 2) || (numberFields < 1))\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "longColumn record header must have 1 or 2 fields." +\r
+ "numberFields = " + numberFields);\r
+ }\r
+ }\r
+\r
+ rawDataIn.setPosition(offset + recordHeader.size());\r
+\r
+ int fieldStatus = \r
+ StoredFieldHeader.readStatus(rawDataIn);\r
+ int fieldDataLength = \r
+ StoredFieldHeader.readFieldDataLength(\r
+ rawDataIn, fieldStatus, slotFieldSize);\r
+\r
+ // read the data portion of this segment from the stream.\r
+\r
+ ByteHolder bh = fetchStream.getByteHolder();\r
+ bh.write(rawDataIn, fieldDataLength);\r
+ fetchStream.setByteHolder(bh);\r
+\r
+ // set the next overflow pointer in the stream...\r
+ if (numberFields == 1) \r
+ {\r
+ // this is the last bit of the long column\r
+ fetchStream.setOverflowPage(-1);\r
+ fetchStream.setOverflowId(-1);\r
+ } \r
+ else \r
+ {\r
+ int firstFieldStatus = fieldStatus; // for DEBUG check\r
+\r
+ // get the field status and data length of the overflow pointer.\r
+ fieldStatus = \r
+ StoredFieldHeader.readStatus(rawDataIn);\r
+ fieldDataLength = \r
+ StoredFieldHeader.readFieldDataLength(\r
+ rawDataIn, fieldStatus, slotFieldSize);\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if (!StoredFieldHeader.isOverflow(fieldStatus))\r
+ {\r
+ // In version 1.5, the first field is overflow and the\r
+ // second is not. In version 2.0 onwards, the first\r
+ // field is not overflow and the second is overflow\r
+ // (the overflow bit goes with the overflow pointer).\r
+ // Check first field to make sure its overflow bit is\r
+ // set on.\r
+ SanityManager.ASSERT(\r
+ StoredFieldHeader.isOverflow(firstFieldStatus));\r
+ }\r
+ }\r
+\r
+ long overflowPage = \r
+ CompressedNumber.readLong((InputStream) rawDataIn);\r
+ int overflowId = \r
+ CompressedNumber.readInt((InputStream) rawDataIn);\r
+\r
+ // there is more after this chunk.\r
+ fetchStream.setOverflowPage(overflowPage);\r
+ fetchStream.setOverflowId(overflowId);\r
+ }\r
+ }\r
+\r
+\r
+ /**\r
+ * Log a Storable to a stream.\r
+ * <p>\r
+ * Log a Storable into a stream. This is used by update field operations\r
+ * <P>\r
+ * Write the column in its field format to the stream. Field format is a \r
+ * field header followed the data of the column as defined by the data \r
+ * itself. See this class's description for the specifics of the header.\r
+ *\r
+ * @exception StandardException Standard Derby error policy\r
+ * @exception IOException RESOLVE\r
+ **/\r
+ public void logColumn(\r
+ int slot, \r
+ int fieldId, \r
+ Object column, \r
+ DynamicByteArrayOutputStream out, \r
+ int overflowThreshold)\r
+ throws StandardException, IOException\r
+ {\r
+ // calculate the space available on the page, it includes\r
+ // the free space\r
+ // the space the record has reserved but not used\r
+ // the length of the old field itself\r
+\r
+ // free space\r
+ int bytesAvailable = freeSpace;\r
+ int beginPosition = -1;\r
+\r
+ // space reserved, but not used by the record\r
+ bytesAvailable += getReservedCount(slot);\r
+\r
+ // The size of the old field is also available for the new field\r
+ rawDataIn.setPosition(getFieldOffset(slot, fieldId));\r
+\r
+ int fieldStatus = \r
+ StoredFieldHeader.readStatus(rawDataIn);\r
+ int fieldDataLength = \r
+ StoredFieldHeader.readFieldDataLength(\r
+ rawDataIn, fieldStatus, slotFieldSize);\r
+\r
+ bytesAvailable += \r
+ StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize) \r
+ + fieldDataLength;\r
+\r
+ try \r
+ {\r
+ setOutputStream(out);\r
+ beginPosition = rawDataOut.getPosition();\r
+\r
+ Object[] row = new Object[1];\r
+ row[0] = column;\r
+ if (bytesAvailable == logColumn(\r
+ row, 0, out, bytesAvailable, \r
+ COLUMN_NONE, overflowThreshold)) \r
+ {\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+ }\r
+\r
+ } \r
+ finally \r
+ {\r
+ rawDataOut.setPosition(beginPosition);\r
+ resetOutputStream();\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Log a long column into a DataOuput.\r
+ * <p>\r
+ * Log a long column into a DataOuput. This is used by insert operations\r
+ * <P>\r
+ * Write the column in its field format to the stream. Field format is a \r
+ * field header followed the data of the column as defined by the data \r
+ * itself. See this class's description for the specifics of the header.\r
+ *\r
+ * @param slot slot of the row with the column\r
+ * @param recordId record id of the\r
+ * @param column the object form of the column to log \r
+ * @param out where to log to the column to.\r
+ *\r
+ * @exception StandardException Standard Derby error policy\r
+ * @exception IOException I/O exception from writing to an array.\r
+ *\r
+ * @see BasePage#logColumn\r
+ **/\r
+ public int logLongColumn(\r
+ int slot, \r
+ int recordId, \r
+ Object column, \r
+ DynamicByteArrayOutputStream out)\r
+ throws StandardException, IOException\r
+ {\r
+ int spaceAvailable = freeSpace;\r
+\r
+ // need to account for the slot table using extra space...\r
+ spaceAvailable -= slotEntrySize;\r
+\r
+ // <= is ok here as we know we want to write at least one more byte\r
+ if (spaceAvailable <= 0)\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+\r
+ setOutputStream(out);\r
+ int beginPosition = out.getPosition();\r
+\r
+ try \r
+ {\r
+ // in the long column portion on the new page there will be 1 field\r
+ // if the portion fits on the page (2 if it needs another pointer\r
+ // to continue to yet another page).\r
+ int numberFields = 1;\r
+\r
+ StoredRecordHeader recordHeader = \r
+ new StoredRecordHeader(recordId, numberFields);\r
+\r
+ int recordHeaderLength = recordHeader.write(logicalDataOut);\r
+\r
+ spaceAvailable -= recordHeaderLength;\r
+\r
+ if (spaceAvailable < 0)\r
+ {\r
+ // this part of long column won't totally fit on page, it\r
+ // needs to be linked to another page. Throw exception and\r
+ // caller will handle logging an overflow column portion \r
+ // with a forward pointer.\r
+\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+ }\r
+ else\r
+ {\r
+ // the rest of the long column fits on the page!\r
+\r
+ Object[] row = new Object[1];\r
+ row[0] = column;\r
+ return logColumn(row, 0, out, spaceAvailable, COLUMN_LONG, 100);\r
+ }\r
+\r
+ } \r
+ finally \r
+ {\r
+ resetOutputStream();\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Log column from input row to the given output stream.\r
+ * <p>\r
+ * Read data from row[arrayPosition], and write the column data in\r
+ * raw store page format to the given column. Along the way determine\r
+ * if the column will fit on the current page.\r
+ * <p>\r
+ * Action taken in this routine is determined by the kind of column as\r
+ * specified in the columnFlag:\r
+ * COLUMN_NONE - the column is insignificant\r
+ * COLUMN_FIRST - this is the first column in a logRow() call\r
+ * COLUMN_LONG - this is a known long column, therefore we will \r
+ * store part of the column on the current page and \r
+ * overflow the rest if necessary.\r
+ * <p>\r
+ * Upon entry to this routine logicalDataOut is tied to the \r
+ * DynamicByteArrayOutputStream out.\r
+ * <BR>\r
+ * If a column is a long column and it does not totally fit on the current\r
+ * page, then a LongColumnException is thrown. We package up info about\r
+ * the current long column in the partially filled in exception so that\r
+ * callers can take correct action. The column will now be set a as a\r
+ * stream.\r
+ *\r
+ * @return The spaceAvailable after accounting for space for this column.\r
+ *\r
+ * @param row array of column from which to read the column from.\r
+ * @param arrayPosition The array position of column to be reading from row.\r
+ * @param out The stream to write the raw store page format of the\r
+ * the column to.\r
+ * @param spaceAvailable The number of bytes available on the page for\r
+ * this column, this may differ from current page\r
+ * as it may include bytes used by previous \r
+ * columns.\r
+ * @param columnFlag one of: COLUMN_NONE, COLUMN_FIRST, or COLUMN_LONG.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ * @exception LongColumnException Thrown if column will not fit on a \r
+ * single page. See notes above\r
+ **/\r
+ private int logColumn(\r
+ Object[] row, \r
+ int arrayPosition,\r
+ DynamicByteArrayOutputStream out, \r
+ int spaceAvailable,\r
+ int columnFlag, \r
+ int overflowThreshold)\r
+ throws StandardException, IOException\r
+ {\r
+ // RESOLVE (mikem) - why will row be null?\r
+ Object column = (row != null ? row[arrayPosition] : null);\r
+\r
+ // Check to see if the data comes from a page, if it is, then the field\r
+ // header is already formatted.\r
+ if (column instanceof RawField)\r
+ {\r
+ // field data is raw, no need to set up a field header etc.\r
+\r
+ byte[] data = ((RawField) column).getData();\r
+\r
+ if (data.length <= spaceAvailable) \r
+ {\r
+ out.write(data);\r
+ spaceAvailable -= data.length;\r
+ }\r
+ return spaceAvailable;\r
+ }\r
+\r
+ // If this is a long column, it may fit in this page or it may not.\r
+ boolean longColumnDone = true;\r
+\r
+\r
+ // default field status.\r
+ int fieldStatus =\r
+ StoredFieldHeader.setFixed(StoredFieldHeader.setInitial(), true);\r
+\r
+ int beginPosition = out.getPosition();\r
+ int columnBeginPosition = 0;\r
+ int headerLength;\r
+ int fieldDataLength = 0;\r
+\r
+ if (column instanceof StreamStorable)\r
+ {\r
+ StreamStorable stream_storable_column = (StreamStorable) column;\r
+\r
+ if (stream_storable_column.returnStream() != null)\r
+ {\r
+ column = \r
+ (Object) stream_storable_column.returnStream();\r
+ }\r
+ }\r
+\r
+ if (column == null)\r
+ {\r
+ fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);\r
+ headerLength =\r
+ StoredFieldHeader.write(\r
+ logicalDataOut, fieldStatus, \r
+ fieldDataLength, slotFieldSize);\r
+ }\r
+ else if (column instanceof InputStream)\r
+ {\r
+ RememberBytesInputStream bufferedIn = null;\r
+ int bufferLen = 0;\r
+\r
+ int estimatedMaxDataSize =\r
+ getMaxDataLength(spaceAvailable, overflowThreshold);\r
+\r
+ // if column is already instanceof RememberBytesInputStream, then we\r
+ // need to find out how many bytes have already been stored in the \r
+ // buffer.\r
+ if (column instanceof RememberBytesInputStream)\r
+ {\r
+ // data is already RememberBytesInputStream\r
+\r
+ bufferedIn = (RememberBytesInputStream) column;\r
+ bufferLen = bufferedIn.numBytesSaved();\r
+\r
+ } \r
+ else \r
+ {\r
+ // data comes in as an inputstream\r
+ bufferedIn = new RememberBytesInputStream(\r
+ (InputStream) column, new MemByteHolder(maxFieldSize + 1));\r
+\r
+ // always set stream of InputStream to RememberBytesInputStream\r
+ // so that all future access to this column will be able to\r
+ // get at the bytes drained from the InputStream, and copied \r
+ // into the RememberBytesInputStream.\r
+ if (row[arrayPosition] instanceof StreamStorable)\r
+ ((StreamStorable)row[arrayPosition]).setStream(bufferedIn);\r
+ \r
+ // set column to the RememberBytesInputStream so that \r
+ // all future access to this column will be able to get \r
+ // at bytes that have been already read. This assignment\r
+ // is needed to ensure that if long column exception is \r
+ // thrown, the column is set correctly\r
+ column = bufferedIn;\r
+ }\r
+\r
+ // read the buffer by reading the max we can read.\r
+ if (bufferLen < (estimatedMaxDataSize + 1))\r
+ {\r
+ bufferLen +=\r
+ bufferedIn.fillBuf(estimatedMaxDataSize + 1 - bufferLen);\r
+ }\r
+\r
+ if ((bufferLen <= estimatedMaxDataSize))\r
+ {\r
+ // we will be able to fit this into the page\r
+ \r
+ fieldDataLength = bufferLen;\r
+ fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);\r
+ headerLength = StoredFieldHeader.write(\r
+ logicalDataOut, fieldStatus, \r
+ fieldDataLength, slotFieldSize);\r
+ \r
+ // if the field is extensible, then we write the serializable \r
+ // formatId. if the field is non-extensible, we don't need to \r
+ // write the formatId. but at this point, how do we know \r
+ // whether the field is extensible or not??? For Plato release,\r
+ // we do not support InputStream on extensible types, \r
+ // therefore, we ignore the formatId for now.\r
+ bufferedIn.putBuf(logicalDataOut, fieldDataLength);\r
+ } \r
+ else\r
+ {\r
+ // current column will not fit into the current page.\r
+\r
+ if (columnFlag == COLUMN_LONG)\r
+ {\r
+ // column is a long column and the remaining portion does\r
+ // not fit on the current page.\r
+ longColumnDone = false;\r
+ \r
+ // it's a portion of a long column, and there is more to \r
+ // write reserve enough room for overflow pointer, then \r
+ // write as much data as we can leaving an extra 2 bytes\r
+ // for overflow field header.\r
+ fieldDataLength =\r
+ estimatedMaxDataSize - OVERFLOW_POINTER_SIZE - 2;\r
+ fieldStatus =\r
+ StoredFieldHeader.setFixed(fieldStatus, true);\r
+\r
+ headerLength =\r
+ StoredFieldHeader.write(\r
+ logicalDataOut, fieldStatus, \r
+ fieldDataLength, slotFieldSize);\r
+ bufferedIn.putBuf(logicalDataOut, fieldDataLength);\r
+\r
+ // now, we need to adjust the buffer, move the unread \r
+ // bytes to the beginning position the cursor correctly,\r
+ // so, next time around, we can read more into the buffer.\r
+ int remainingBytes = bufferedIn.available();\r
+\r
+ // move the unread bytes to the beginning of the byteHolder.\r
+ int bytesShifted = bufferedIn.shiftToFront();\r
+\r
+ } \r
+ else\r
+ {\r
+ // column not a long column and does not fit on page.\r
+ int delta = maxFieldSize - bufferLen + 1;\r
+\r
+ if (delta > 0)\r
+ bufferLen += bufferedIn.fillBuf(delta);\r
+ fieldDataLength = bufferLen;\r
+\r
+ // the data will not fit on this page make sure the new \r
+ // input stream is passed back to the upper layer...\r
+ column = (Object) bufferedIn;\r
+ }\r
+ }\r
+ \r
+ } \r
+ else if (column instanceof DataValueDescriptor)\r
+ {\r
+ DataValueDescriptor sColumn = (DataValueDescriptor) column;\r
+\r
+ boolean isNull = sColumn.isNull();\r
+ if (isNull) \r
+ {\r
+ fieldStatus = StoredFieldHeader.setNull(fieldStatus, true);\r
+ }\r
+\r
+ // header is written with 0 length here.\r
+ headerLength = \r
+ StoredFieldHeader.write(\r
+ logicalDataOut, fieldStatus, \r
+ fieldDataLength, slotFieldSize);\r
+\r
+ if (!isNull) \r
+ {\r
+ // write the field data to the log \r
+ try \r
+ {\r
+ columnBeginPosition = out.getPosition();\r
+ sColumn.writeExternal(logicalDataOut);\r
+ }\r
+ catch (IOException ioe)\r
+ {\r
+ // SQLData error reporting\r
+ if (logicalDataOut != null)\r
+ {\r
+ Exception ne = logicalDataOut.getNestedException();\r
+ if (ne != null)\r
+ {\r
+ if (ne instanceof StandardException)\r
+ {\r
+ throw (StandardException) ne;\r
+ }\r
+ }\r
+ }\r
+\r
+\r
+ throw StandardException.newException(\r
+ SQLState.DATA_STORABLE_WRITE_EXCEPTION, ioe);\r
+ }\r
+\r
+ fieldDataLength =\r
+ (out.getPosition() - beginPosition) - headerLength;\r
+ }\r
+ }\r
+ else if (column instanceof RecordHandle)\r
+ {\r
+ // we are inserting an overflow pointer for a long column\r
+\r
+ // casted reference to column to avoid repeated casting.\r
+ RecordHandle overflowHandle = (RecordHandle) column;\r
+\r
+ fieldStatus = StoredFieldHeader.setOverflow(fieldStatus, true);\r
+ headerLength = \r
+ StoredFieldHeader.write(\r
+ logicalDataOut, fieldStatus, \r
+ fieldDataLength, slotFieldSize);\r
+\r
+ fieldDataLength += \r
+ CompressedNumber.writeLong(out, overflowHandle.getPageNumber());\r
+ fieldDataLength += \r
+ CompressedNumber.writeInt(out, overflowHandle.getId());\r
+\r
+ } \r
+ else\r
+ {\r
+ // Serializable/Externalizable/Formattable\r
+ // all look the same at this point.\r
+\r
+ // header is written with 0 length here.\r
+ headerLength = \r
+ StoredFieldHeader.write(\r
+ logicalDataOut, fieldStatus, \r
+ fieldDataLength, slotFieldSize);\r
+\r
+ logicalDataOut.writeObject(column);\r
+ fieldDataLength = \r
+ (out.getPosition() - beginPosition) - headerLength;\r
+ }\r
+\r
+ // calculate the size of the field on page with compresed field header\r
+\r
+ fieldStatus = StoredFieldHeader.setFixed(fieldStatus, false);\r
+ int fieldSizeOnPage =\r
+ StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize)\r
+ + fieldDataLength;\r
+\r
+ userRowSize += fieldDataLength;\r
+\r
+ boolean fieldIsLong = isLong(fieldSizeOnPage, overflowThreshold);\r
+ \r
+ // Do we have enough space on the page for this field?\r
+ if (((spaceAvailable < fieldSizeOnPage) || (fieldIsLong)) &&\r
+ (columnFlag != COLUMN_LONG)) \r
+ {\r
+ // Column was not long before getting here and does not fit.\r
+\r
+ if (fieldIsLong) \r
+ {\r
+ // long column, and this first time we have figured out this\r
+ // column is long.\r
+\r
+ if (!(column instanceof InputStream)) \r
+ {\r
+ // Convert already written object to an InputStream.\r
+ ByteArray fieldData =\r
+ new ByteArray(\r
+ ((DynamicByteArrayOutputStream)out).getByteArray(),\r
+ (columnBeginPosition), fieldDataLength);\r
+\r
+ ByteArrayInputStream columnIn =\r
+ new ByteArrayInputStream(\r
+ fieldData.getArray(), columnBeginPosition, \r
+ fieldDataLength);\r
+\r
+ MemByteHolder byteHolder = \r
+ new MemByteHolder(fieldDataLength + 1);\r
+\r
+ RememberBytesInputStream bufferedIn = \r
+ new RememberBytesInputStream(columnIn, byteHolder);\r
+ \r
+ // the data will not fit on this page make sure the new \r
+ // input stream is passed back to the upper layer...\r
+ column = bufferedIn;\r
+ }\r
+\r
+ out.setPosition(beginPosition);\r
+\r
+ // This exception carries the information for the client\r
+ // routine to continue inserting the long row on multiple\r
+ // pages.\r
+ LongColumnException lce = new LongColumnException();\r
+ lce.setColumn(column);\r
+ throw lce;\r
+\r
+ } \r
+ else \r
+ {\r
+ // Column does not fit on this page, but it isn't a long column.\r
+\r
+ out.setPosition(beginPosition);\r
+ return(spaceAvailable);\r
+ }\r
+ }\r
+ \r
+ // Now we go back to update the fieldDataLength in the field header\r
+ out.setPosition(beginPosition);\r
+\r
+ // slotFieldSize is set based on the pageSize.\r
+ // We are borrowing this to set the size of our fieldDataLength.\r
+ fieldStatus = StoredFieldHeader.setFixed(fieldStatus, true);\r
+ headerLength = StoredFieldHeader.write(\r
+ out, fieldStatus, fieldDataLength, slotFieldSize);\r
+\r
+ // set position to the end of the field\r
+ out.setPosition(beginPosition + fieldDataLength + headerLength);\r
+\r
+ spaceAvailable -= fieldSizeOnPage;\r
+\r
+ // YYZ: revisit\r
+ if (columnFlag == COLUMN_LONG)\r
+ {\r
+ // if we are logging a long column, we don't care how much space \r
+ // is left on the page, instead, we care whether we are done with \r
+ // the column or not. So, here, we want to return 1. if we are \r
+ // not done, and return -1 if we are done.\r
+ // If logColumn returns -1, that flag is returned all the way to\r
+ // BasePage.insertLongColumn to signal end of loop.\r
+ if (longColumnDone)\r
+ return -1;\r
+ else\r
+ return 1;\r
+ } else\r
+ {\r
+ return (spaceAvailable);\r
+ }\r
+ }\r
+\r
+ /**\r
+ * Create and write a long row header to the log stream.\r
+ * <p>\r
+ * Called to log a new overflow record, will check for space available\r
+ * and throw an exception if the record header will not fit on the page.\r
+ * <p>\r
+ *\r
+ * @return -1\r
+ *\r
+ * @param slot slot of record to log.\r
+ * @param spaceAvailable spaceAvaliable on page.\r
+ * @param out stream to log the record to.\r
+ *\r
+ * @exception StandardException Standard exception policy.\r
+ **/\r
+ private int logOverflowRecord(\r
+ int slot, \r
+ int spaceAvailable, \r
+ DynamicByteArrayOutputStream out)\r
+ throws StandardException, IOException\r
+ {\r
+ setOutputStream(out);\r
+ \r
+ StoredRecordHeader pageRecordHeader = getHeaderAtSlot(slot);\r
+ \r
+ StoredRecordHeader overflow_rh = getOverFlowRecordHeader();\r
+ overflow_rh.setOverflowFields(pageRecordHeader);\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(overflow_rh.getOverflowPage() != 0);\r
+ }\r
+\r
+ /*\r
+ // #1 situation,\r
+ // we want to update the header to just an overflow pointer with no data\r
+ // so, update the recordHeader, and we are done...\r
+ if (!overflow_rh.isPartialOverflow()) {\r
+ // this recordHeader becomes just a overflow pointer,\r
+ // we need to make sure that the number of fields is set to 0.\r
+ overflow_rh.setNumberFields(0);\r
+ \r
+ spaceAvailable -= overflow_rh.write(logicalDataOut);\r
+\r
+ if (spaceAvailable < 0) {\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+ }\r
+\r
+ resetOutputStream();\r
+\r
+ return (-1);\r
+ }\r
+ */\r
+\r
+ // #2 situation,\r
+ // we want to only update the recordheader of the page, while leaving\r
+ // the data of the record on the page. Just update the header part and\r
+ // then arrange for the data part to move to after the new header.\r
+\r
+ int oldSize = pageRecordHeader.size();\r
+ int newSize = overflow_rh.size();\r
+\r
+ if (oldSize < newSize) \r
+ {\r
+ // need extra room...\r
+ int delta = newSize - oldSize;\r
+ if (spaceAvailable < delta) \r
+ {\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+ }\r
+ }\r
+\r
+ // write the new overflow_rh for the record.\r
+ overflow_rh.write(logicalDataOut);\r
+\r
+ // now, log the data\r
+ logRecordDataPortion(\r
+ slot, LOG_RECORD_DEFAULT, pageRecordHeader, \r
+ (FormatableBitSet) null, logicalDataOut, (RecordHandle)null);\r
+\r
+ return (-1);\r
+ }\r
+\r
+ private int logOverflowField(\r
+ DynamicByteArrayOutputStream out, \r
+ int spaceAvailable,\r
+ long overflowPage, \r
+ int overflowId)\r
+ throws StandardException, IOException\r
+ {\r
+ int fieldStatus = \r
+ StoredFieldHeader.setOverflow(\r
+ StoredFieldHeader.setInitial(), true);\r
+\r
+ int fieldSizeOnPage = \r
+ CompressedNumber.sizeLong(overflowPage) + \r
+ CompressedNumber.sizeInt(overflowId);\r
+\r
+ int fieldDataLength = fieldSizeOnPage;\r
+\r
+ fieldSizeOnPage += \r
+ StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize);\r
+\r
+ // need to check that we have room on the page for this.\r
+ spaceAvailable -= fieldSizeOnPage;\r
+\r
+ // what if there is not enough room for the overflow pointer?\r
+ if (spaceAvailable < 0)\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+\r
+ // write the field to the page:\r
+ StoredFieldHeader.write(\r
+ logicalDataOut, fieldStatus, fieldDataLength, slotFieldSize);\r
+ CompressedNumber.writeLong(out, overflowPage);\r
+ CompressedNumber.writeInt(out, overflowId);\r
+\r
+ // return the available bytes\r
+ return(spaceAvailable);\r
+ }\r
+\r
+ /**\r
+ * Log a record to the ObjectOutput stream.\r
+ * <p>\r
+ * Write out the complete on-page record to the store stream. Data is \r
+ * preceeded by a compressed int that gives the length of the following \r
+ * data.\r
+ *\r
+ * @exception StandardException Standard Derby error policy\r
+ * @exception IOException on error writing to log stream.\r
+ *\r
+ * @see BasePage#logRecord\r
+ **/\r
+ public void logRecord(\r
+ int slot, \r
+ int flag, \r
+ int recordId, \r
+ FormatableBitSet validColumns, \r
+ OutputStream out, \r
+ RecordHandle headRowHandle) \r
+ throws StandardException, IOException\r
+ {\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+\r
+ if (recordId != recordHeader.getId()) \r
+ {\r
+ // the record is being logged under a different identifier,\r
+ // write it out with the correct identifier\r
+ StoredRecordHeader newRecordHeader = \r
+ new StoredRecordHeader(recordHeader);\r
+\r
+ newRecordHeader.setId(recordId);\r
+\r
+ newRecordHeader.write(out);\r
+ newRecordHeader = null;\r
+ } \r
+ else \r
+ {\r
+ // write the original record header\r
+ recordHeader.write(out);\r
+ }\r
+\r
+ logRecordDataPortion(\r
+ slot, flag, recordHeader, validColumns, out, headRowHandle);\r
+\r
+ }\r
+\r
+ private void logRecordDataPortion(\r
+ int slot, \r
+ int flag,\r
+ StoredRecordHeader recordHeader,\r
+ FormatableBitSet validColumns, \r
+ OutputStream out,\r
+ RecordHandle headRowHandle) \r
+ throws StandardException, IOException\r
+ {\r
+ int offset = getRecordOffset(slot);\r
+\r
+ // now skip over the original header before writing the data\r
+ int oldHeaderLength = recordHeader.size();\r
+ offset += oldHeaderLength;\r
+\r
+ // write out the record data (FH+data+...) from the page data\r
+ int startField = recordHeader.getFirstField();\r
+ int endField = startField + recordHeader.getNumberFields();\r
+ int validColumnsSize = (validColumns == null) ? 0 : validColumns.getLength();\r
+\r
+ for (int fieldId = startField; fieldId < endField; fieldId++) {\r
+\r
+ rawDataIn.setPosition(offset);\r
+\r
+ // get the field header information from the page\r
+ int fieldStatus = StoredFieldHeader.readStatus(rawDataIn);\r
+ int fieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, fieldStatus, slotFieldSize);\r
+\r
+ // see if this field needs to be logged\r
+ // no need to write the data portion if the log is getting written\r
+ // for purges unless the field is overflow pointer for a long column.\r
+ if (((validColumns != null) && !(validColumnsSize > fieldId && validColumns.isSet(fieldId))) || \r
+ ((flag & BasePage.LOG_RECORD_FOR_PURGE)!=0 && !StoredFieldHeader.isOverflow(fieldStatus)))\r
+ {\r
+ // nope, move page offset along\r
+ offset += StoredFieldHeader.size(fieldStatus, fieldDataLength, slotFieldSize);\r
+ offset += fieldDataLength;\r
+\r
+ // write a non-existent field\r
+ fieldStatus = StoredFieldHeader.setInitial();\r
+ fieldStatus = StoredFieldHeader.setNonexistent(fieldStatus);\r
+ StoredFieldHeader.write(out, fieldStatus, 0, slotFieldSize);\r
+ continue;\r
+ }\r
+\r
+ // If this field is to be updated, and it points to a long column\r
+ // chain, the entire long column chain will be orphaned after the\r
+ // update operation. Therefore, need to queue up a post commit\r
+ // work to reclaim the long column chain. We cannot do any clean\r
+ // up in this transaction now because we are underneath a log\r
+ // action and cannot interrupt the transaction log buffer.\r
+ // HeadRowHandle may be null if updateAtSlot is called to update a\r
+ // non-head row piece. In that case, don't do anything.\r
+ // If temp container, don't do anything.\r
+ if (((flag & BasePage.LOG_RECORD_FOR_UPDATE) != 0) && \r
+ headRowHandle != null &&\r
+ StoredFieldHeader.isOverflow(fieldStatus) &&\r
+ owner.isTemporaryContainer() == false)\r
+ {\r
+\r
+ int saveOffset = rawDataIn.getPosition(); // remember the page offset\r
+ long overflowPage = CompressedNumber.readLong((InputStream) rawDataIn);\r
+ int overflowId = CompressedNumber.readInt((InputStream) rawDataIn);\r
+\r
+ // Remember the time stamp on the first page of the column\r
+ // chain. This is to prevent the case where the post commit\r
+ // work gets fired twice, in that case, the second time it is\r
+ // fired, this overflow page may not part of this row chain\r
+ // that is being updated.\r
+ Page firstPageOnColumnChain = getOverflowPage(overflowPage);\r
+ PageTimeStamp ts = firstPageOnColumnChain.currentTimeStamp();\r
+ firstPageOnColumnChain.unlatch();\r
+\r
+ RawTransaction rxact = (RawTransaction)owner.getTransaction();\r
+\r
+ ReclaimSpace work = \r
+ new ReclaimSpace(ReclaimSpace.COLUMN_CHAIN,\r
+ headRowHandle,\r
+ fieldId, // long column about to be orphaned by update \r
+ overflowPage, // page where the long column starts\r
+ overflowId, // record Id of the beginning of the long column\r
+ ts,\r
+ rxact.getDataFactory(), true);\r
+\r
+ rxact.addPostCommitWork(work);\r
+\r
+ rawDataIn.setPosition(saveOffset); // Just to be safe, reset data stream\r
+ }\r
+\r
+\r
+ // write the field header for the log\r
+ offset += StoredFieldHeader.write(out, fieldStatus, fieldDataLength, slotFieldSize);\r
+\r
+ if (fieldDataLength != 0) {\r
+\r
+ // write the actual data\r
+ out.write(pageData, offset, fieldDataLength);\r
+\r
+ offset += fieldDataLength;\r
+ }\r
+ }\r
+ }\r
+\r
+ /**\r
+ Log a field to the ObjectOutput stream.\r
+ <P>\r
+ Find the field in the record and then write out the complete\r
+ field, i.e. header and data.\r
+\r
+ @exception StandardException Standard Derby error policy\r
+ @exception IOException RESOLVE\r
+\r
+ @see BasePage#logField\r
+ */\r
+\r
+ public void logField(int slot, int fieldNumber, OutputStream out)\r
+ throws StandardException, IOException\r
+ {\r
+ int offset = getFieldOffset(slot, fieldNumber);\r
+\r
+ // these reads are always against the page array\r
+ ArrayInputStream lrdi = rawDataIn;\r
+\r
+ // now write out the field we are interested in ...\r
+ lrdi.setPosition(offset);\r
+ int fieldStatus = StoredFieldHeader.readStatus(lrdi);\r
+ int fieldDataLength = StoredFieldHeader.readFieldDataLength(lrdi, fieldStatus, slotFieldSize);\r
+\r
+ StoredFieldHeader.write(out, fieldStatus, fieldDataLength, slotFieldSize);\r
+ \r
+ if (fieldDataLength != 0) {\r
+ // and then the data\r
+ out.write(pageData, lrdi.getPosition(), fieldDataLength);\r
+ }\r
+ }\r
+\r
+ /*\r
+ ** Overidden methods of BasePage\r
+ */\r
+\r
+ /**\r
+ Override insertAtSlot to provide long row support.\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ public RecordHandle insertAtSlot(\r
+ int slot, \r
+ Object[] row, \r
+ FormatableBitSet validColumns,\r
+ LogicalUndo undo, \r
+ byte insertFlag, \r
+ int overflowThreshold)\r
+ throws StandardException\r
+ {\r
+ try {\r
+\r
+ return super.insertAtSlot(slot, row, validColumns, undo, insertFlag, overflowThreshold);\r
+\r
+ } catch (NoSpaceOnPage nsop) {\r
+\r
+ // Super class already handle the case of insert that allows overflow.\r
+ // If we get here, we know that the insert should not allow overflow.\r
+ // Possibles causes:\r
+ // 1. insert to an empty page, row will never fit (ie long row)\r
+ // 2. insert to original page\r
+ // we will do:\r
+ // return a null to indicate the insert cannot be accepted ..\r
+ return null;\r
+\r
+ }\r
+ }\r
+ \r
+\r
+ /**\r
+ Update field at specified slot\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ public RecordHandle updateFieldAtSlot(\r
+ int slot, \r
+ int fieldId, \r
+ Object newValue, \r
+ LogicalUndo undo)\r
+ throws StandardException\r
+ {\r
+ try {\r
+\r
+ return super.updateFieldAtSlot(slot, fieldId, newValue, undo);\r
+\r
+ } catch (NoSpaceOnPage nsop) {\r
+\r
+\r
+ // empty page apart from the record\r
+ if (slotsInUse == 1) \r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_NO_SPACE_FOR_RECORD);\r
+ }\r
+ throw StandardException.newException(\r
+ SQLState.DATA_NO_SPACE_FOR_RECORD);\r
+\r
+/*\r
+// djd if (isOverflowPage()) {\r
+ }\r
+\r
+ return XXX;\r
+*/\r
+ }\r
+\r
+ }\r
+\r
+ /**\r
+ Get the number of fields on the row at slot\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ public int fetchNumFieldsAtSlot(int slot) throws StandardException\r
+ {\r
+\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+\r
+ if (!recordHeader.hasOverflow())\r
+ return super.fetchNumFieldsAtSlot(slot);\r
+\r
+ BasePage overflowPage = getOverflowPage(recordHeader.getOverflowPage());\r
+ int count = overflowPage.fetchNumFieldsAtSlot(getOverflowSlot(overflowPage, recordHeader));\r
+ overflowPage.unlatch();\r
+ return count;\r
+ }\r
+\r
+ /**\r
+ * Move record to a page toward the beginning of the file.\r
+ * <p>\r
+ * As part of compressing the table records need to be moved from the\r
+ * end of the file toward the beginning of the file. Only the \r
+ * contiguous set of free pages at the very end of the file can\r
+ * be given back to the OS. This call is used to purge the row from\r
+ * the current page, insert it into a previous page, and return the\r
+ * new row location \r
+ * Mark the record identified by position as deleted. The record may be \r
+ * undeleted sometime later using undelete() by any transaction that sees \r
+ * the record.\r
+ * <p>\r
+ * The interface is optimized to work on a number of rows at a time, \r
+ * optimally processing all rows on the page at once. The call will \r
+ * process either all rows on the page, or the number of slots in the\r
+ * input arrays - whichever is smaller.\r
+ * <B>Locking Policy</B>\r
+ * <P>\r
+ * MUST be called with table locked, not locks are requested. Because\r
+ * it is called with table locks the call will go ahead and purge any\r
+ * row which is marked deleted. It will also use purge rather than\r
+ * delete to remove the old row after it moves it to a new page. This\r
+ * is ok since the table lock insures that no other transaction will\r
+ * use space on the table before this transaction commits.\r
+ *\r
+ * <BR>\r
+ * A page latch on the new page will be requested and released.\r
+ *\r
+ * @param slot slot of original row to move.\r
+ * @param row a row template to hold all columns of row.\r
+ * @param old_handle An array to be filled in by the call with the \r
+ * old handles of all rows moved.\r
+ * @param new_handle An array to be filled in by the call with the \r
+ * new handles of all rows moved.\r
+ *\r
+ * @return the number of rows processed.\r
+ *\r
+ * @exception StandardException Standard Derby error policy\r
+ *\r
+ **/\r
+ public int moveRecordForCompressAtSlot(\r
+ int slot,\r
+ Object[] row,\r
+ RecordHandle[] old_handle,\r
+ RecordHandle[] new_handle)\r
+ throws StandardException\r
+ {\r
+ long src_pageno = getPageNumber();\r
+\r
+ try\r
+ {\r
+ fetchFromSlot(\r
+ null,\r
+ slot,\r
+ row,\r
+ (FetchDescriptor) null, // all columns retrieved\r
+ false);\r
+\r
+ int row_size = getRecordPortionLength(slot);\r
+\r
+ // first see if row will fit on current page being used to insert\r
+ StoredPage dest_page = \r
+ (StoredPage) owner.getPageForCompress(0, src_pageno);\r
+\r
+ if (dest_page != null)\r
+ {\r
+ if ((dest_page.getPageNumber() >= getPageNumber()) ||\r
+ (!dest_page.spaceForCopy(row_size)))\r
+ {\r
+ // page won't work\r
+ dest_page.unlatch();\r
+ dest_page = null;\r
+ }\r
+ }\r
+\r
+ if (dest_page == null)\r
+ {\r
+ // last page did not work, try unfilled page\r
+ dest_page = (StoredPage) \r
+ owner.getPageForCompress(\r
+ ContainerHandle.GET_PAGE_UNFILLED, src_pageno);\r
+\r
+ if (dest_page != null)\r
+ {\r
+ if ((dest_page.getPageNumber() >= getPageNumber()) ||\r
+ (!dest_page.spaceForCopy(row_size)))\r
+ {\r
+ // page won't work\r
+ dest_page.unlatch();\r
+ dest_page = null;\r
+ }\r
+ }\r
+ }\r
+\r
+ if (dest_page == null)\r
+ {\r
+ // last and unfilled page did not work, try getting a free page\r
+ dest_page = (StoredPage) owner.addPage();\r
+\r
+ if (dest_page.getPageNumber() >= getPageNumber())\r
+ {\r
+ owner.removePage(dest_page);\r
+ dest_page = null;\r
+ }\r
+ }\r
+\r
+ if (dest_page != null)\r
+ {\r
+ int dest_slot = dest_page.recordCount();\r
+\r
+ old_handle[0] = getRecordHandleAtSlot(slot);\r
+\r
+ copyAndPurge(dest_page, slot, 1, dest_slot);\r
+\r
+ new_handle[0] = dest_page.getRecordHandleAtSlot(dest_slot);\r
+\r
+ dest_page.unlatch();\r
+\r
+ return(1);\r
+ }\r
+ else\r
+ {\r
+ return(0);\r
+ }\r
+ }\r
+ catch (IOException ioe)\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);\r
+ }\r
+ }\r
+\r
+ /*\r
+ * methods that is called underneath a page action\r
+ */\r
+\r
+ /*\r
+ * update page version and instance due to actions by a log record\r
+ */\r
+ public void logAction(LogInstant instant) throws StandardException\r
+ {\r
+ if (SanityManager.DEBUG) {\r
+ SanityManager.ASSERT(isLatched());\r
+ }\r
+\r
+ if (rawDataOut == null)\r
+ createOutStreams();\r
+\r
+ if (!isActuallyDirty()) {\r
+ // if this is not an overflow page and the page is valid, set the\r
+ // initial row count.\r
+ if (!isOverflowPage() && ((getPageStatus() & VALID_PAGE) != 0)) {\r
+ initialRowCount = internalNonDeletedRecordCount();\r
+ } else\r
+ initialRowCount = 0;\r
+ }\r
+\r
+ setDirty();\r
+\r
+ bumpPageVersion();\r
+ updateLastLogInstant(instant);\r
+ }\r
+\r
+\r
+ /* clean the page for first use or reuse */\r
+ private void cleanPage()\r
+ {\r
+ setDirty();\r
+\r
+ // set pageData to all nulls\r
+ clearSection(0, getPageSize());\r
+\r
+ slotsInUse = 0;\r
+ deletedRowCount = 0;\r
+ headerOutOfDate = true; // headerOutOfDate must be set after setDirty\r
+ // because isDirty maybe called unlatched\r
+\r
+ clearAllSpace();\r
+\r
+ }\r
+\r
+ /**\r
+ Initialize the page. \r
+\r
+ If reuse, then \r
+ Clean up any in memory or on disk structure to ready the page for reuse.\r
+ This is not only reusing the page buffer, but reusing a free page \r
+ which may or may not be cleaned up the the client of raw store when it \r
+ was deallocated.\r
+\r
+ @exception StandardException Derby Standard Error Policy\r
+ */\r
+ public void initPage(LogInstant instant, byte status, int recordId, \r
+ boolean overflow, boolean reuse)\r
+ throws StandardException\r
+ {\r
+ // log action at the end after the page is updated with all the\r
+ // pertinent information\r
+ logAction(instant);\r
+\r
+ if (reuse)\r
+ {\r
+ cleanPage();\r
+ super.cleanPageForReuse();\r
+ }\r
+ // if not reuse, createPage already called cleanpage\r
+\r
+ headerOutOfDate = true; // headerOutOfDate must be set after setDirty\r
+ // because isDirty maybe called unlatched\r
+ setPageStatus(status);\r
+ isOverflowPage = overflow;\r
+ nextId = recordId;\r
+\r
+ }\r
+\r
+ /**\r
+ Set page status\r
+ @exception StandardException Derby Standard Error Policy\r
+ */\r
+ public void setPageStatus(LogInstant instant, byte status)\r
+ throws StandardException\r
+ {\r
+ logAction(instant);\r
+ headerOutOfDate = true; // headerOutOfDate must be set after setDirty\r
+ // because isDirty maybe called unlatched\r
+\r
+ setPageStatus(status);\r
+ }\r
+\r
+ /**\r
+ Set the row reserved space.\r
+ @exception StandardException Derby Standard Error Policy\r
+ */\r
+ public void setReservedSpace(LogInstant instant, int slot, int value)\r
+ throws StandardException, IOException\r
+ {\r
+ logAction(instant);\r
+ headerOutOfDate = true; // headerOutOfDate must be set after setDirty\r
+ // because isDirty maybe called unlatched\r
+\r
+ int delta = value - getReservedCount(slot);\r
+\r
+ if (SanityManager.DEBUG) {\r
+ SanityManager.ASSERT(delta <= freeSpace, \r
+ "Cannot grow reserved space because there is not enough free space on the page");\r
+ SanityManager.ASSERT(delta != 0,\r
+ "Set Reserved Space called to set identical value");\r
+\r
+ if (value < 0)\r
+ SanityManager.THROWASSERT(\r
+ "Cannot set reserved space to value " + value);\r
+ }\r
+\r
+ // Find the end of the record that we are about to add or subtract from\r
+ // the reserved space.\r
+ int nextRecordOffset = getRecordOffset(slot) + getTotalSpace(slot);\r
+\r
+ if (delta > 0) {\r
+ // Growing - hopefully during a RRR restore\r
+ expandPage(nextRecordOffset, delta);\r
+ } else {\r
+ // shrinking, delta is < 0\r
+ shrinkPage(nextRecordOffset, -delta);\r
+ }\r
+\r
+ // Lastly, update the reserved space count in the slot.\r
+ rawDataOut.setPosition(getSlotOffset(slot) + (2*slotFieldSize));\r
+ if (slotFieldSize == SMALL_SLOT_SIZE)\r
+ logicalDataOut.writeShort(value);\r
+ else\r
+ logicalDataOut.writeInt(value);\r
+\r
+ }\r
+\r
+\r
+ /**\r
+ Store a record at the given slot.\r
+\r
+ @exception StandardException Standard Derby error policy\r
+ @exception IOException RESOLVE\r
+ */\r
+ public void storeRecord(LogInstant instant, int slot, boolean insert, ObjectInput in)\r
+ throws StandardException, IOException\r
+ {\r
+ logAction(instant);\r
+\r
+ if (insert)\r
+ storeRecordForInsert(slot, in);\r
+ else\r
+ storeRecordForUpdate(slot, in);\r
+ }\r
+\r
+ private void storeRecordForInsert(int slot, ObjectInput in)\r
+ throws StandardException, IOException\r
+ {\r
+\r
+ StoredRecordHeader recordHeader = shiftUp(slot);\r
+ if (recordHeader == null) {\r
+ recordHeader = new StoredRecordHeader();\r
+ setHeaderAtSlot(slot, recordHeader);\r
+ }\r
+\r
+ bumpRecordCount(1);\r
+\r
+ // recordHeader represents the new version of the record header.\r
+ recordHeader.read(in);\r
+\r
+ // the record is already marked delete, we need to bump the deletedRowCount\r
+ if (recordHeader.isDeleted()) {\r
+ deletedRowCount++;\r
+ headerOutOfDate = true;\r
+ }\r
+\r
+ // during a rollforward insert, recordId == nextId\r
+ // during a rollback of purge, recordId < nextId\r
+ if (nextId <= recordHeader.getId())\r
+ nextId = recordHeader.getId()+1;\r
+\r
+ int recordOffset = firstFreeByte;\r
+ int offset = recordOffset;\r
+\r
+ // write each field out to the page\r
+ int numberFields = recordHeader.getNumberFields();\r
+\r
+ rawDataOut.setPosition(offset);\r
+ offset += recordHeader.write(rawDataOut);\r
+\r
+ int userData = 0;\r
+ for (int i = 0; i < numberFields; i++) {\r
+\r
+ // get the field header information, the input stream came from the log \r
+ int newFieldStatus = StoredFieldHeader.readStatus(in);\r
+ int newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);\r
+ newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);\r
+\r
+ rawDataOut.setPosition(offset);\r
+ offset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);\r
+\r
+ if (newFieldDataLength != 0) {\r
+ in.readFully(pageData, offset, newFieldDataLength);\r
+ offset += newFieldDataLength;\r
+ userData += newFieldDataLength;\r
+ }\r
+ }\r
+\r
+ int dataWritten = offset - firstFreeByte;\r
+\r
+ freeSpace -= dataWritten;\r
+ firstFreeByte += dataWritten;\r
+\r
+ int reservedSpace = 0;\r
+ if (minimumRecordSize > 0) {\r
+\r
+ // make sure we reserve the minimumRecordSize for the user data \r
+ // portion of the record excluding the space we took on recordHeader \r
+ // and fieldHeaders.\r
+ if (userData < minimumRecordSize) {\r
+ reservedSpace = minimumRecordSize - userData;\r
+ freeSpace -= reservedSpace;\r
+ firstFreeByte += reservedSpace;\r
+ }\r
+ }\r
+\r
+ // update the slot table\r
+ addSlotEntry(slot, recordOffset, dataWritten, reservedSpace);\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if ((firstFreeByte > getSlotOffset(slot)) ||\r
+ (freeSpace < 0))\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ " firstFreeByte = " + firstFreeByte + \r
+ " dataWritten = " + dataWritten +\r
+ " getSlotOffset(slot) = " + getSlotOffset(slot) + \r
+ " slot = " + slot +\r
+ " firstFreeByte = " + firstFreeByte + \r
+ " freeSpace = " + freeSpace + \r
+ " page = " + this);\r
+ }\r
+ }\r
+\r
+ if ((firstFreeByte > getSlotOffset(slot)) || (freeSpace < 0))\r
+ {\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, getPageId()));\r
+ }\r
+\r
+ }\r
+\r
+\r
+ private void storeRecordForUpdate(int slot, ObjectInput in)\r
+ throws StandardException, IOException\r
+ {\r
+ // set up to read the in-memory record header back from the record\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+ StoredRecordHeader newRecorderHeader = new StoredRecordHeader();\r
+\r
+ // recordHeader represents the new version of the record header.\r
+ newRecorderHeader.read(in);\r
+\r
+ int oldFieldCount = recordHeader.getNumberFields();\r
+ int newFieldCount = newRecorderHeader.getNumberFields();\r
+\r
+ int startField = recordHeader.getFirstField();\r
+ if (SanityManager.DEBUG) {\r
+ if (startField != newRecorderHeader.getFirstField())\r
+ SanityManager.THROWASSERT("First field changed from " + startField + " to " + newRecorderHeader.getFirstField());\r
+ }\r
+\r
+ // See if the number of fields shrunk, if so clear out the old data\r
+ // we do this first to stop shuffling about the fields that are going to\r
+ // be deleted during the update of the earlier fields. This case occurs\r
+ // on an update that changes the row to be overflowed.\r
+ if (newFieldCount < oldFieldCount) {\r
+\r
+ int oldDataStartingOffset = getFieldOffset(slot, startField + newFieldCount);\r
+\r
+ // calculate the length of the to be deleted fields\r
+ int deleteLength = getRecordOffset(slot) + getRecordPortionLength(slot) - oldDataStartingOffset;\r
+\r
+ // we are updateing to zero bytes!\r
+ updateRecordPortionLength(slot, -(deleteLength), deleteLength);\r
+ }\r
+\r
+ // write each field out to the page\r
+\r
+ int startingOffset = getRecordOffset(slot);\r
+ int newOffset = startingOffset;\r
+ int oldOffset = startingOffset;\r
+\r
+ // see which field we get to use the reserve space\r
+ int reservedSpaceFieldId = newFieldCount < oldFieldCount ?\r
+ newFieldCount - 1 : oldFieldCount - 1;\r
+ reservedSpaceFieldId += startField;\r
+\r
+\r
+ // the new data the needs to be written at newOffset but can't until\r
+ // unsedSpace >= newDataToWrite.length (allowing for the header)\r
+ DynamicByteArrayOutputStream newDataToWrite = null;\r
+\r
+ rawDataOut.setPosition(newOffset);\r
+\r
+ // write the record header, which may change in size\r
+ int oldLength = recordHeader.size();\r
+ int newLength = newRecorderHeader.size();\r
+\r
+ int unusedSpace = oldLength; // the unused space at newOffset\r
+\r
+ // no fields, so we can eat into the reserve space\r
+ if (reservedSpaceFieldId < startField) // no fields\r
+ unusedSpace += getReservedCount(slot);\r
+\r
+ if (unusedSpace >= newLength) {\r
+ newRecorderHeader.write(rawDataOut);\r
+ newOffset += newLength;\r
+ unusedSpace -= newLength;\r
+ \r
+ } else { \r
+\r
+ newDataToWrite = new DynamicByteArrayOutputStream(getPageSize());\r
+ newRecorderHeader.write(newDataToWrite);\r
+ }\r
+ oldOffset += oldLength;\r
+ int recordDelta = (newLength - oldLength);\r
+\r
+ int oldFieldStatus = 0;\r
+ int oldFieldDataLength = 0;\r
+ int newFieldStatus = 0;\r
+ int newFieldDataLength = 0;\r
+\r
+ int oldEndFieldExclusive = startField + oldFieldCount;\r
+ int newEndFieldExclusive = startField + newFieldCount;\r
+\r
+ for (int fieldId = startField; fieldId < newEndFieldExclusive; fieldId++) {\r
+\r
+ int oldFieldLength = 0;\r
+ if (fieldId < oldEndFieldExclusive) {\r
+ rawDataIn.setPosition(oldOffset);\r
+ oldFieldStatus = StoredFieldHeader.readStatus(rawDataIn);\r
+ oldFieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, oldFieldStatus, slotFieldSize);\r
+ oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize)\r
+ + oldFieldDataLength;\r
+ }\r
+\r
+ newFieldStatus = StoredFieldHeader.readStatus(in);\r
+ newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);\r
+\r
+ // if no value was provided on an update of a field then use the old value,\r
+ // unless the old field didn't exist.\r
+ if (StoredFieldHeader.isNonexistent(newFieldStatus) && (fieldId < oldEndFieldExclusive)) {\r
+\r
+ // may need to move this old field ...\r
+ if ((newDataToWrite == null) || (newDataToWrite.getUsed() == 0)) {\r
+ // the is no old data to catch up on, is the data at\r
+ // the correct position already?\r
+ if (newOffset == oldOffset) {\r
+ // yes, nothing to do!!\r
+ if (SanityManager.DEBUG) {\r
+ if (unusedSpace != 0)\r
+ SanityManager.THROWASSERT("Unused space is out of sync, expect 0 got " + unusedSpace);\r
+ }\r
+ } else {\r
+ // need to shift the field left\r
+ if (SanityManager.DEBUG) {\r
+ if (unusedSpace != (oldOffset - newOffset))\r
+ SanityManager.THROWASSERT(\r
+ "Unused space is out of sync expected " + (oldOffset - newOffset) + " got " + unusedSpace);\r
+ }\r
+\r
+ System.arraycopy(pageData, oldOffset, pageData, newOffset, oldFieldLength);\r
+ }\r
+ newOffset += oldFieldLength;\r
+\r
+ // last field to be updated can eat into the reserve space\r
+ if (fieldId == reservedSpaceFieldId)\r
+ unusedSpace += getReservedCount(slot);\r
+\r
+ } else {\r
+ // there is data still to be written, just append this field to the\r
+ // saved data\r
+ int position = newDataToWrite.getPosition();\r
+ newDataToWrite.setPosition(position + oldFieldLength);\r
+ System.arraycopy(pageData, oldOffset,\r
+ newDataToWrite.getByteArray(), position, oldFieldLength);\r
+\r
+ unusedSpace += oldFieldLength;\r
+\r
+ // last field to be updated can eat into the reserve space\r
+ if (fieldId == reservedSpaceFieldId)\r
+ unusedSpace += getReservedCount(slot);\r
+\r
+ // attempt to write out some of what we have in the side buffer now.\r
+ int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);\r
+ newOffset += copyLength;\r
+ unusedSpace -= copyLength;\r
+\r
+ }\r
+ oldOffset += oldFieldLength;\r
+ continue;\r
+ }\r
+\r
+ newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);\r
+\r
+ int newFieldHeaderLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize);\r
+ int newFieldLength = newFieldHeaderLength + newFieldDataLength;\r
+\r
+ recordDelta += (newFieldLength - oldFieldLength);\r
+\r
+ // See if we can write this field now\r
+\r
+ // space available increases by the amount of the old field\r
+ unusedSpace += oldFieldLength;\r
+ oldOffset += oldFieldLength;\r
+\r
+ // last field to be updated can eat into the reserve space\r
+ if (fieldId == reservedSpaceFieldId)\r
+ unusedSpace += getReservedCount(slot);\r
+\r
+ if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {\r
+\r
+ // catch up on the old data if possible\r
+ int copyLength = moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);\r
+ newOffset += copyLength;\r
+ unusedSpace -= copyLength;\r
+ }\r
+\r
+ if (((newDataToWrite == null) || (newDataToWrite.getUsed() == 0))\r
+ && (unusedSpace >= newFieldHeaderLength)) {\r
+\r
+ // can fit the header in\r
+ rawDataOut.setPosition(newOffset);\r
+ newOffset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);\r
+ unusedSpace -= newFieldHeaderLength;\r
+\r
+ if (newFieldDataLength != 0) {\r
+\r
+ // read as much as the field as possible\r
+ int fieldCopy = unusedSpace >= newFieldDataLength ?\r
+ newFieldDataLength : unusedSpace;\r
+\r
+ if (fieldCopy != 0) {\r
+ in.readFully(pageData, newOffset, fieldCopy);\r
+\r
+ newOffset += fieldCopy;\r
+ unusedSpace -= fieldCopy;\r
+ }\r
+\r
+\r
+ fieldCopy = newFieldDataLength - fieldCopy;\r
+ if (fieldCopy != 0) {\r
+ if (newDataToWrite == null)\r
+ newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);\r
+\r
+ // append the remaining portion of the field to the saved data\r
+ int position = newDataToWrite.getPosition();\r
+ newDataToWrite.setPosition(position + fieldCopy);\r
+ in.readFully(newDataToWrite.getByteArray(),\r
+ position, fieldCopy);\r
+\r
+ }\r
+ }\r
+ } else {\r
+ // can't fit these header, or therefore the field, append it\r
+ // to the buffer.\r
+\r
+ if (newDataToWrite == null)\r
+ newDataToWrite = new DynamicByteArrayOutputStream(newFieldLength * 2);\r
+\r
+ StoredFieldHeader.write(newDataToWrite, newFieldStatus, newFieldDataLength, slotFieldSize);\r
+\r
+ // save the new field data\r
+ if (newFieldDataLength != 0) {\r
+ int position = newDataToWrite.getPosition();\r
+ newDataToWrite.setPosition(position + newFieldDataLength);\r
+ in.readFully(newDataToWrite.getByteArray(),\r
+ position, newFieldDataLength);\r
+ }\r
+ }\r
+ }\r
+\r
+ // at this point there may still be data left in the saved buffer\r
+ // but presumably we can't fit it in\r
+\r
+ int reservedDelta;\r
+\r
+ if ((newDataToWrite != null) && (newDataToWrite.getUsed() != 0)) {\r
+\r
+ // need to shift the later records down ...\r
+ int nextRecordOffset = startingOffset + getTotalSpace(slot);\r
+\r
+ int spaceRequiredFromFreeSpace = newDataToWrite.getUsed() - (nextRecordOffset - newOffset);\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (newOffset > nextRecordOffset)\r
+ SanityManager.THROWASSERT("data has overwritten next record - offset " + newOffset\r
+ + " next record " + nextRecordOffset);\r
+\r
+ if ((spaceRequiredFromFreeSpace <= 0) || (spaceRequiredFromFreeSpace > freeSpace))\r
+ SanityManager.THROWASSERT("invalid space required " + spaceRequiredFromFreeSpace\r
+ + " newDataToWrite.getUsed() " + newDataToWrite.getUsed()\r
+ + " nextRecordOffset " + nextRecordOffset\r
+ + " newOffset " + newOffset\r
+ + " reservedSpaceFieldId " + reservedSpaceFieldId\r
+ + " startField " + startField\r
+ + " newEndFieldExclusive " + newEndFieldExclusive\r
+ + " newFieldCount " + newFieldCount\r
+ + " oldFieldCount " + oldFieldCount\r
+ + " slot " + slot\r
+ + " freeSpace " + freeSpace\r
+ + " unusedSpace " + unusedSpace\r
+ + " page " + getPageId());\r
+\r
+\r
+ if ((getReservedCount(slot) + spaceRequiredFromFreeSpace) != recordDelta)\r
+ SanityManager.THROWASSERT("mismatch on count: reserved " + getReservedCount(slot) +\r
+ "free space take " + spaceRequiredFromFreeSpace +\r
+ "record delta " + recordDelta);\r
+\r
+ }\r
+\r
+ if (spaceRequiredFromFreeSpace > freeSpace) {\r
+ throw dataFactory.markCorrupt(\r
+ StandardException.newException(\r
+ SQLState.DATA_CORRUPT_PAGE, getPageId()));\r
+ }\r
+\r
+ // see if this is the last record on the page, if so a simple\r
+ // shift of the remaining fields will sufice...\r
+ expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);\r
+\r
+ unusedSpace += spaceRequiredFromFreeSpace;\r
+\r
+ moveSavedDataToPage(newDataToWrite, unusedSpace, newOffset);\r
+\r
+ reservedDelta = -1 * getReservedCount(slot);\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (newDataToWrite.getUsed() != 0)\r
+ SanityManager.THROWASSERT("data is left in save buffer ... " + newDataToWrite.getUsed());\r
+ }\r
+ } else {\r
+ reservedDelta = -1 * recordDelta;\r
+ }\r
+\r
+ // now reset the length in the slot entry\r
+ updateRecordPortionLength(slot, recordDelta, reservedDelta);\r
+\r
+ setHeaderAtSlot(slot, newRecorderHeader);\r
+ }\r
+\r
+ private int moveSavedDataToPage(DynamicByteArrayOutputStream savedData, int unusedSpace, int pageOffset) {\r
+ // catch up on the old data if possible\r
+ if (unusedSpace > (savedData.getUsed() / 2)) {\r
+ // copy onto the page\r
+ int copyLength = unusedSpace <= savedData.getUsed() ?\r
+ unusedSpace : savedData.getUsed();\r
+ System.arraycopy(savedData.getByteArray(), 0,\r
+ pageData, pageOffset, copyLength);\r
+\r
+ // fix up the saved buffer\r
+ savedData.discardLeft(copyLength);\r
+\r
+ return copyLength;\r
+ }\r
+\r
+ return 0;\r
+ }\r
+\r
+\r
+ /**\r
+ Create the space to update a portion of a record.\r
+ This method ensures there is enough room to replace the\r
+ old data of length oldLength at the given offset, with the new data of length\r
+ newLength. This method does put any new data on the page, it moves old data around\r
+ and zeros out any old data when newLength < oldLength. This method does\r
+ update the information in the slot table.\r
+\r
+ The passed in offset is the correct place to put the data\r
+ when this method returns, ie. it only moves data that\r
+ has an offset greater then this.\r
+\r
+ @exception StandardException Standard Derby error policy\r
+ @exception IOException RESOLVE\r
+ */\r
+ private void createSpaceForUpdate(int slot, int offset, int oldLength, int newLength)\r
+ throws StandardException, IOException\r
+ {\r
+\r
+ // now replace the old data with the new data\r
+ if (newLength <= oldLength) {\r
+\r
+ // now shift the remaining data down ...\r
+ int diffLength = oldLength - newLength;\r
+\r
+ // real easy\r
+ if (diffLength == 0)\r
+ return;\r
+\r
+ // shift the remaing fields down\r
+ int remainingLength = \r
+ shiftRemainingData(slot, offset, oldLength, newLength);\r
+\r
+ // clear the now unused data on the page\r
+ clearSection(offset + newLength + remainingLength, diffLength);\r
+\r
+ if (SanityManager.DEBUG) {\r
+\r
+ if ((getRecordPortionLength(slot) - diffLength) != \r
+ ((offset - getRecordOffset(slot)) + newLength + \r
+ remainingLength))\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ " Slot table trying to update record length " + \r
+ (getRecordPortionLength(slot) - diffLength) +\r
+ " that is not the same as what it actully is");\r
+ }\r
+ }\r
+\r
+ // now reset the length in the slot entry, increase the reserved space\r
+ updateRecordPortionLength(slot, -(diffLength), diffLength);\r
+ return;\r
+ }\r
+\r
+ // tough case, the new field is bigger than the old field ... \r
+ // first attempt, see how much space is in row private reserved space\r
+\r
+ int extraLength = newLength - oldLength; \r
+\r
+ // extraLength is always greater than 0.\r
+ if (SanityManager.DEBUG)\r
+ SanityManager.ASSERT(extraLength > 0);\r
+\r
+ int recordReservedSpace = getReservedCount(slot);\r
+ int reservedDelta = 0;\r
+\r
+ int spaceRequiredFromFreeSpace = extraLength - recordReservedSpace;\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (spaceRequiredFromFreeSpace > freeSpace)\r
+ SanityManager.THROWASSERT(\r
+ "spaceRequiredFromFreeSpace = " +\r
+ spaceRequiredFromFreeSpace +\r
+ ";freeSpace = " + freeSpace +\r
+ ";newLength = " + newLength +\r
+ ";oldLength = " + oldLength +\r
+ ";\npage= " + this);\r
+ }\r
+\r
+ if (spaceRequiredFromFreeSpace > 0) {\r
+ // The update requires all the reserved space + some from free space\r
+ \r
+ int nextRecordOffset = getRecordOffset(slot) + getTotalSpace(slot);\r
+\r
+ // see if this is the last record on the page, if so a simple\r
+ // shift of the remaining fields will sufice...\r
+ expandPage(nextRecordOffset, spaceRequiredFromFreeSpace);\r
+\r
+ // we used all the reserved space we have, set it to 0\r
+ reservedDelta = -(recordReservedSpace);\r
+ } else {\r
+ // the update uses some amount of space from the rows reserved space\r
+\r
+ // set reserved Delta to account for amount of reserved space used.\r
+ reservedDelta = -(extraLength);\r
+ }\r
+ \r
+ // just shift all remaining fields up\r
+ int remainingLength = shiftRemainingData(slot, offset, oldLength, newLength);\r
+ \r
+ if (SanityManager.DEBUG) {\r
+ if ((extraLength + reservedDelta) < 0)\r
+ SanityManager.THROWASSERT(\r
+ "total space the record occupies cannot shrink, extraLength = "\r
+ + extraLength + " reservedDelta = " + reservedDelta\r
+ + " spacerequired = " + spaceRequiredFromFreeSpace\r
+ + " recordReservedSpace = " + recordReservedSpace);\r
+ }\r
+\r
+ // now reset the length in the slot entry\r
+ updateRecordPortionLength(slot, extraLength, reservedDelta);\r
+ }\r
+\r
+ /**\r
+ storeField\r
+\r
+ @exception StandardException Standard Derby error policy\r
+ @exception IOException RESOLVE\r
+ */\r
+ public void storeField(LogInstant instant, int slot, int fieldNumber, ObjectInput in)\r
+ throws StandardException, IOException\r
+ {\r
+ logAction(instant);\r
+\r
+ int offset = getFieldOffset(slot, fieldNumber);\r
+\r
+ // get the field header information, the input stream came from the log\r
+ ArrayInputStream lrdi = rawDataIn;\r
+ lrdi.setPosition(offset);\r
+ int oldFieldStatus = StoredFieldHeader.readStatus(lrdi);\r
+ int oldFieldDataLength = StoredFieldHeader.readFieldDataLength(lrdi, oldFieldStatus, slotFieldSize);\r
+\r
+ int newFieldStatus = StoredFieldHeader.readStatus(in);\r
+ int newFieldDataLength = StoredFieldHeader.readFieldDataLength(in, newFieldStatus, slotFieldSize);\r
+ newFieldStatus = StoredFieldHeader.setFixed(newFieldStatus, false);\r
+\r
+ int oldFieldLength = StoredFieldHeader.size(oldFieldStatus, oldFieldDataLength, slotFieldSize) + oldFieldDataLength;\r
+ int newFieldLength = StoredFieldHeader.size(newFieldStatus, newFieldDataLength, slotFieldSize) + newFieldDataLength;\r
+\r
+ createSpaceForUpdate(slot, offset, oldFieldLength, newFieldLength);\r
+ \r
+ rawDataOut.setPosition(offset);\r
+ offset += StoredFieldHeader.write(rawDataOut, newFieldStatus, newFieldDataLength, slotFieldSize);\r
+\r
+ if (newFieldDataLength != 0)\r
+ in.readFully(pageData, offset, newFieldDataLength);\r
+ }\r
+\r
+ /**\r
+ reserveSpaceForSlot\r
+ This method will reserve at least specified "spaceToReserve" bytes for the record\r
+ in the slot.\r
+\r
+ @exception StandardException Standard Derby error policy\r
+ @exception IOException RESOLVE\r
+ */\r
+ public void reserveSpaceForSlot(LogInstant instant, int slot, int spaceToReserve)\r
+ throws StandardException, IOException\r
+ {\r
+ logAction(instant);\r
+\r
+ int extraSpace = spaceToReserve - getReservedCount(slot);\r
+ if (extraSpace <= 0)\r
+ return;\r
+\r
+ if (freeSpace < extraSpace)\r
+ throw new NoSpaceOnPage(isOverflowPage());\r
+\r
+ // need to shift the later records down ...\r
+ int startingOffset = getRecordOffset(slot);\r
+ int nextRecordOffset = startingOffset + getTotalSpace(slot);\r
+\r
+ // see if this is the last record on the page, if so a simple\r
+ // shift of the remaining fields will sufice...\r
+ expandPage(nextRecordOffset, extraSpace);\r
+\r
+ setSlotEntry(slot, startingOffset, getRecordPortionLength(slot), spaceToReserve);\r
+ }\r
+\r
+ /**\r
+ Skip a field header and its data on the given stream.\r
+ \r
+ @exception IOException corrupt stream\r
+ */\r
+ public void skipField(ObjectInput in) throws IOException {\r
+\r
+\r
+ int fieldStatus = StoredFieldHeader.readStatus(in);\r
+ int fieldDataLength = StoredFieldHeader.readFieldDataLength(in, fieldStatus, slotFieldSize);\r
+\r
+ if (fieldDataLength != 0) {\r
+ in.skipBytes(fieldDataLength);\r
+ }\r
+ }\r
+\r
+ public void skipRecord(ObjectInput in) throws IOException\r
+ {\r
+\r
+ StoredRecordHeader recordHeader = new StoredRecordHeader();\r
+ recordHeader.read(in);\r
+\r
+ for (int i = recordHeader.getNumberFields(); i > 0; i--) {\r
+ skipField(in); \r
+ }\r
+ }\r
+\r
+ /**\r
+ Shift data within a record to account for an update.\r
+\r
+ @param offset Offset where the update starts, need not be on a field boundry.\r
+ @param oldLength length of the data being replaced\r
+ @param newLength length of the data replacing the old data\r
+\r
+ @return the length of the data in the record after the replaced data.\r
+ */\r
+ private int shiftRemainingData(int slot, int offset, int oldLength, int newLength) \r
+ throws IOException\r
+ {\r
+\r
+ // length of valid data remaining in the record after the portion that\r
+ // is being replaced.\r
+ int remainingLength = (getRecordOffset(slot) + getRecordPortionLength(slot)) - \r
+ (offset + oldLength);\r
+\r
+ if (SanityManager.DEBUG) {\r
+\r
+ if (!(((remainingLength >= 0) && \r
+ (getRecordPortionLength(slot) >= oldLength))))\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "oldLength = " + oldLength + " newLength = " + newLength + \r
+ "remainingLength = " + remainingLength + \r
+ " offset = " + offset + \r
+ " getRecordOffset(" + slot + ") = " + getRecordOffset(slot)+\r
+ " getRecordPortionLength(" + slot + ") = " + \r
+ getRecordPortionLength(slot));\r
+ }\r
+ }\r
+\r
+ if (remainingLength != 0) {\r
+ System.arraycopy(pageData, offset + oldLength,\r
+ pageData, offset + newLength, remainingLength);\r
+ }\r
+\r
+ return remainingLength;\r
+\r
+ }\r
+\r
+ /**\r
+ Set the deleted status\r
+\r
+ @exception StandardException Standard Derby error policy\r
+ @exception IOException RESOLVE\r
+ @see BasePage#setDeleteStatus\r
+ */\r
+ public void setDeleteStatus(LogInstant instant, int slot, boolean delete)\r
+ throws StandardException, IOException \r
+ {\r
+\r
+ logAction(instant);\r
+\r
+ deletedRowCount += super.setDeleteStatus(slot, delete);\r
+ headerOutOfDate = true;\r
+\r
+ int offset = getRecordOffset(slot);\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+\r
+ rawDataOut.setPosition(offset);\r
+ recordHeader.write(logicalDataOut);\r
+ }\r
+\r
+ /**\r
+ get record count without checking for latch\r
+ */\r
+ protected int internalDeletedRecordCount()\r
+ {\r
+ return deletedRowCount;\r
+ }\r
+\r
+ /**\r
+ purgeRecord from page. Move following slots up by one.\r
+\r
+ @exception StandardException Standard Derby error policy\r
+ @exception IOException RESOLVE\r
+ */\r
+ public void purgeRecord(LogInstant instant, int slot, int recordId)\r
+ throws StandardException, IOException \r
+ {\r
+\r
+ logAction(instant);\r
+\r
+ // if record is marked deleted, reduce deletedRowCount\r
+ if (getHeaderAtSlot(slot).isDeleted())\r
+ deletedRowCount--;\r
+\r
+ int startByte = getRecordOffset(slot);\r
+ int endByte = startByte + getTotalSpace(slot) - 1;\r
+\r
+ compressPage(startByte, endByte);\r
+ \r
+ // fix up the on-page slot table\r
+ removeSlotEntry(slot);\r
+\r
+ // fix up the in-memory version\r
+ removeAndShiftDown(slot);\r
+ }\r
+\r
+ /*\r
+ **\r
+ */\r
+\r
+ /**\r
+ Get the offset of the field header of the given field for\r
+ the record in the given slot.\r
+\r
+ Field number is the absolute number for the complete record, not just this portion.\r
+ E.g. if this is a record portion that starts at field 3 and has 6 fields\r
+ then the second field on this *page* has field number 4.\r
+ */\r
+ private int getFieldOffset(int slot, int fieldNumber) throws IOException\r
+ {\r
+ // RESOLVE - overflow, needs to be changed\r
+ int offset = getRecordOffset(slot);\r
+\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+\r
+ // get the number of fields\r
+ int startField = recordHeader.getFirstField();\r
+\r
+ if (SanityManager.DEBUG) {\r
+ int numberFields = recordHeader.getNumberFields();\r
+\r
+ if ((fieldNumber < startField) || (fieldNumber >= (startField + numberFields)))\r
+ SanityManager.THROWASSERT(\r
+ "fieldNumber: " + fieldNumber +\r
+ " start field: " + startField +\r
+ " number of fields " + numberFields);\r
+ }\r
+\r
+ ArrayInputStream lrdi = rawDataIn;\r
+\r
+ // skip the record header\r
+ lrdi.setPosition(offset + recordHeader.size());\r
+\r
+ // skip any earlier fields ...\r
+ for (int i = startField; i < fieldNumber; i++) {\r
+ skipField(lrdi);\r
+ }\r
+\r
+ return rawDataIn.getPosition();\r
+ }\r
+\r
+\r
+ /*\r
+ * Time stamp support - this page supports time stamp\r
+ */\r
+\r
+ /**\r
+ Get a time stamp for this page\r
+ @return page time stamp\r
+ */ \r
+ public PageTimeStamp currentTimeStamp()\r
+ {\r
+ // saving the whole key would be an overkill\r
+ return new PageVersion(getPageNumber(), getPageVersion());\r
+ }\r
+\r
+ /**\r
+ Set given pageVersion to be the as what is on this page\r
+ \r
+ @exception StandardException given time stamp is null or is not a time\r
+ stamp implementation this page knows how to deal with\r
+ */\r
+ public void setTimeStamp(PageTimeStamp ts) throws StandardException\r
+ {\r
+ if (ts == null)\r
+ {\r
+ throw StandardException.newException(SQLState.DATA_TIME_STAMP_NULL);\r
+ }\r
+\r
+ if (!(ts instanceof PageVersion))\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_TIME_STAMP_ILLEGAL, ts);\r
+ }\r
+\r
+ PageVersion pv = (PageVersion)ts;\r
+\r
+ pv.setPageNumber(getPageNumber());\r
+ pv.setPageVersion(getPageVersion());\r
+ }\r
+\r
+ /**\r
+ compare given PageVersion with pageVersion on page\r
+\r
+ @param ts the page version gotton from this page via a currentTimeStamp\r
+ or setTimeStamp call earlier\r
+\r
+ @return true if the same\r
+ @exception StandardException given time stamp not gotton from this page\r
+ */\r
+ public boolean equalTimeStamp(PageTimeStamp ts) throws StandardException\r
+ {\r
+ if (ts == null)\r
+ return false;\r
+\r
+ if (!(ts instanceof PageVersion))\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_TIME_STAMP_ILLEGAL, ts);\r
+ }\r
+\r
+ PageVersion pv = (PageVersion)ts;\r
+\r
+ if (pv.getPageNumber() != getPageNumber())\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_TIME_STAMP_ILLEGAL, ts);\r
+ }\r
+\r
+ return (pv.getPageVersion() == getPageVersion());\r
+ }\r
+\r
+ /** debugging, print this page */\r
+ public String toString()\r
+ {\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ if (SanityManager.DEBUG_ON("DeadlockTrace") || SanityManager.DEBUG_ON("userLockStackTrace"))\r
+ return "page = " + getIdentity();\r
+\r
+ String str = "---------------------------------------------------\n";\r
+ str += pageHeaderToString();\r
+ // str += slotTableToString(); // print in memory slot table\r
+\r
+ // now print each row\r
+ for (int s = 0; s < slotsInUse; s++)\r
+ str += recordToString(s);\r
+ \r
+ //if (SanityManager.DEBUG_ON("dumpPageImage"))\r
+ {\r
+ str += "---------------------------------------------------\n";\r
+ str += pagedataToHexDump(pageData);\r
+ str += "---------------------------------------------------\n";\r
+ }\r
+ return str;\r
+ }\r
+ else\r
+ return null;\r
+ }\r
+\r
+ /**\r
+ * Provide a hex dump of the data in the in memory version of the page.\r
+ * <p>\r
+ * The output looks like:\r
+ *\r
+ * 00000000: 4d5a 9000 0300 0000 0400 0000 ffff 0000 MZ..............\r
+ * 00000010: b800 0000 0000 0000 4000 0000 0000 0000 ........@.......\r
+ * 00000020: 0000 0000 0000 0000 0000 0000 0000 0000 ................\r
+ * 00000030: 0000 0000 0000 0000 0000 0000 8000 0000 ................\r
+ * 00000040: 0e1f ba0e 00b4 09cd 21b8 014c cd21 5468 ........!..L.!Th\r
+ * 00000050: 6973 2070 726f 6772 616d 2063 616e 6e6f is program canno\r
+ * 00000060: 7420 6265 2072 756e 2069 6e20 444f 5320 t be run in DOS \r
+ * 00000070: 6d6f 6465 2e0d 0a24 0000 0000 0000 0050 mode...$.......P\r
+ * 00000080: 4500 004c 0109 008b abfd 3000 0000 0000 E..L......0.....\r
+ * 00000090: 0000 00e0 000e 210b 0102 3700 3405 0000 ......!...7.4...\r
+ * 000000a0: 8401 0000 6400 0000 6004 0000 1000 0000 ....d...`.......\r
+ * 000000b0: 5005 0000 0008 6000 1000 0000 0200 0001 P.....`.........\r
+ * 000000c0: 0000 0000 0000 0004 0000 0000 0000 0000 ................\r
+ * 000000d0: 9007 0000 0400 0009 a207 0002 0000 0000 ................\r
+ * 000000e0: 0010 0000 1000 0000 0010 0000 1000 0000 ................\r
+ * 000000f0: 0000 0010 0000 0000 6006 00ef 8100 0000 ........`.......\r
+ * 00000100: 5006 00e6 0c00 0000 0007 00d0 0400 0000 P...............\r
+ * 00000110: 0000 0000 0000 0000 0000 0000 0000 0000 ................\r
+ * 00000120: 1007 00c8 7100 0000 0000 0000 0000 0000 ....q...........\r
+ * 00000130: 0000 0000 0000 0000 0000 0000 0000 0000 ................\r
+ *\r
+ * <p>\r
+ * RESOLVE - this has been hacked together and is not efficient. There\r
+ * are probably some java utilities to use.\r
+ *\r
+ * @return The string with the hex dump in it.\r
+ *\r
+ * @param data array of bytes to dump.\r
+ **/\r
+ private static String pagedataToHexDump(byte[] data)\r
+ {\r
+ return org.apache.derby.iapi.util.StringUtil.hexDump(data);\r
+ }\r
+\r
+ private String pageHeaderToString()\r
+ {\r
+ if (SanityManager.DEBUG) {\r
+ return "page id " + getIdentity() + \r
+ " Overflow: " + isOverflowPage +\r
+ " PageVersion: " + getPageVersion() +\r
+ " SlotsInUse: " + slotsInUse +\r
+ " DeletedRowCount: " + deletedRowCount +\r
+ " PageStatus: " + getPageStatus() + \r
+ " NextId: " + nextId + \r
+ " firstFreeByte: " + firstFreeByte + \r
+ " freeSpace: " + freeSpace + \r
+ " totalSpace: " + totalSpace + \r
+ " spareSpace: " + spareSpace + \r
+ " PageSize: " + getPageSize() +\r
+ "\n";\r
+ }\r
+ else\r
+ return null;\r
+ }\r
+\r
+ private String recordToString(int slot)\r
+ {\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ String str = new String();\r
+ try \r
+ {\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+ int offset = getRecordOffset(slot);\r
+ int numberFields = recordHeader.getNumberFields();\r
+ str = "\nslot " + slot + " offset " + offset + " " +\r
+ " recordlen " + getTotalSpace(slot) +\r
+ " (" + getRecordPortionLength(slot) +\r
+ "," + getReservedCount(slot) + ")"+\r
+ recordHeader.toString();\r
+\r
+ rawDataIn.setPosition(offset + recordHeader.size());\r
+\r
+ for (int i = 0; i < numberFields; i++)\r
+ {\r
+ int fieldStatus = StoredFieldHeader.readStatus(rawDataIn);\r
+ int fieldDataLength = StoredFieldHeader.readFieldDataLength(rawDataIn, fieldStatus, slotFieldSize);\r
+ if (fieldDataLength < 0)\r
+ {\r
+ str += "\n\tField " + i + ": offset=" + offset + " null " + \r
+ StoredFieldHeader.toDebugString(fieldStatus);\r
+ }\r
+ else \r
+ {\r
+ str += "\n\tField " + i + ": offset=" + offset + \r
+ " len=" + fieldDataLength + " " + \r
+ StoredFieldHeader.toDebugString(fieldStatus);\r
+\r
+ if (StoredFieldHeader.isOverflow(fieldStatus))\r
+ {\r
+ // not likely to be a real pointer, this is most\r
+ // likely an old column chain where the first field\r
+ // is set to overflow even though the second field\r
+ // is the overflow pointer\r
+ if (i == 0 && fieldDataLength != 3) \r
+ {\r
+ // figure out where we should go next\r
+ offset = rawDataIn.getPosition() + fieldDataLength;\r
+ long overflowPage = CompressedNumber.readLong((InputStream) rawDataIn);\r
+ int overflowId = CompressedNumber.readInt((InputStream) rawDataIn);\r
+\r
+ str += "Questionable long column at (" +\r
+ overflowPage + "," + overflowId + ")";\r
+ rawDataIn.setPosition(offset);\r
+ }\r
+ else\r
+ {\r
+ // print the overflow pointer\r
+ long overflowPage = CompressedNumber.readLong((InputStream) rawDataIn);\r
+ int overflowId = CompressedNumber.readInt((InputStream) rawDataIn);\r
+ str += "long column at (" + overflowPage + "," + overflowId + ")";\r
+ }\r
+ }\r
+ else\r
+ {\r
+ // go to next field\r
+ offset = rawDataIn.getPosition() + fieldDataLength;\r
+ rawDataIn.setPosition(offset);\r
+ }\r
+ }\r
+ }\r
+ str += "\n";\r
+\r
+ }\r
+ catch (IOException ioe)\r
+ {\r
+ str += "\n ======= ERROR IOException =============\n";\r
+ str += ioe.toString();\r
+ }\r
+ catch (StandardException se)\r
+ {\r
+ str += "\n ======= ERROR StandardException =============\n";\r
+ str += se.toString();\r
+ }\r
+\r
+ return str;\r
+ }\r
+ else\r
+ return null;\r
+ }\r
+\r
+ /*\r
+ ** Overflow related methods\r
+ */\r
+\r
+ /**\r
+ Get the overflow page for a record that has already overflowed.\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ protected StoredPage getOverflowPage(long pageNumber) throws StandardException\r
+ {\r
+\r
+ StoredPage overflowPage = (StoredPage) owner.getPage(pageNumber);\r
+ if (overflowPage == null) {\r
+ }\r
+\r
+ // RESOLVE-LR\r
+ //if (!overflowPage.isOverflow()) {\r
+ // overflowPage.unlatch();\r
+ //}\r
+\r
+ return overflowPage;\r
+ }\r
+\r
+ /**\r
+ Get an empty overflow page.\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ protected BasePage getNewOverflowPage() throws StandardException\r
+ {\r
+\r
+ FileContainer myContainer = (FileContainer) containerCache.find(identity.getContainerId());\r
+\r
+ try {\r
+ // add an overflow page\r
+ return (BasePage) myContainer.addPage(owner, true);\r
+ } finally {\r
+ containerCache.release(myContainer);\r
+ }\r
+ }\r
+\r
+ /**\r
+ Get the overflow slot for a record that has already overflowed.\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ protected static int getOverflowSlot(BasePage overflowPage, StoredRecordHeader recordHeader)\r
+ throws StandardException\r
+ {\r
+\r
+ int slot = overflowPage.findRecordById(\r
+ recordHeader.getOverflowId(), Page.FIRST_SLOT_NUMBER);\r
+\r
+ if (slot < 0)\r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_SLOT_NOT_ON_PAGE);\r
+ }\r
+\r
+ return slot;\r
+ }\r
+\r
+ /**\r
+ Get a overflow page that potentially can handle a new overflowed record.\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ public BasePage getOverflowPageForInsert(\r
+ int currentSlot, \r
+ Object[] row, \r
+ FormatableBitSet validColumns)\r
+ throws StandardException\r
+ {\r
+ return getOverflowPageForInsert(currentSlot, row, validColumns, 0);\r
+ }\r
+\r
+ /**\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ public BasePage getOverflowPageForInsert(\r
+ int currentSlot, \r
+ Object[] row, \r
+ FormatableBitSet validColumns, \r
+ int startColumn)\r
+ throws StandardException\r
+ {\r
+ // System.out.println("Top of getOverflowPageForInsert");\r
+\r
+ // look at all the overflow pages that are in use on this page, up\r
+ // to a maximum of 5.\r
+ long[] pageList = new long[5];\r
+ int pageCount = 0;\r
+\r
+ long currentOverflowPageNumber = 0;\r
+\r
+slotScan:\r
+ for (int slot = 0; (slot < slotsInUse) && (pageCount < pageList.length); slot++) {\r
+\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+ if (!recordHeader.hasOverflow())\r
+ continue;\r
+\r
+ long overflowPageNumber = recordHeader.getOverflowPage();\r
+\r
+ if (slot == currentSlot) {\r
+ currentOverflowPageNumber = overflowPageNumber;\r
+ continue;\r
+ }\r
+\r
+ for (int i = 0; i < pageCount; i++) {\r
+ if (pageList[i] == overflowPageNumber)\r
+ continue slotScan;\r
+ }\r
+\r
+ pageList[pageCount++] = overflowPageNumber; \r
+ }\r
+\r
+\r
+ for (int i = 0; i < pageCount; i++) {\r
+\r
+ long pageNumber = pageList[i];\r
+\r
+ // don't look at the current overflow page\r
+ // used by this slot, because it the record is already\r
+ // overflowed then we reached here because the overflow\r
+ // page is full.\r
+ if (pageNumber == currentOverflowPageNumber)\r
+ continue;\r
+ StoredPage overflowPage = null;\r
+ int spaceNeeded = 0;\r
+ try {\r
+ overflowPage = getOverflowPage(pageNumber);\r
+ if ( overflowPage.spaceForInsert(row, validColumns,\r
+ spaceNeeded, startColumn, 100))\r
+ {\r
+ // System.out.println("returning used page: " + pageNumber);\r
+ return overflowPage;\r
+ }\r
+\r
+ spaceNeeded = ((StoredPage) overflowPage).getCurrentFreeSpace();\r
+ overflowPage.unlatch();\r
+ overflowPage = null;\r
+ \r
+ } catch (StandardException se) {\r
+ if (overflowPage != null) {\r
+ overflowPage.unlatch();\r
+ overflowPage = null;\r
+ }\r
+\r
+ }\r
+ }\r
+\r
+ // if we get here then we have to allocate a new overflow page\r
+ // System.out.println("returning new page: ");\r
+ return getNewOverflowPage();\r
+ }\r
+ \r
+ /**\r
+ Update an already overflowed record.\r
+\r
+ @param slot Slot of the original record on its original page\r
+ @param row new version of the data\r
+\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ protected void updateOverflowed(\r
+ RawTransaction t, \r
+ int slot,\r
+ Object[] row, \r
+ FormatableBitSet validColumns,\r
+ StoredRecordHeader recordHeader)\r
+ throws StandardException\r
+ {\r
+\r
+ BasePage overflowPage = getOverflowPage(recordHeader.getOverflowPage());\r
+\r
+ try {\r
+\r
+ int overflowSlot = getOverflowSlot(overflowPage, recordHeader);\r
+\r
+ overflowPage.doUpdateAtSlot(t, overflowSlot, recordHeader.getOverflowId(), row, validColumns);\r
+ overflowPage.unlatch();\r
+ overflowPage = null;\r
+\r
+ return;\r
+\r
+ } finally {\r
+ if (overflowPage != null) {\r
+ overflowPage.unlatch();\r
+ overflowPage = null;\r
+ }\r
+ }\r
+ }\r
+\r
+\r
+ /**\r
+ Update a record handle to point to an overflowed record portion.\r
+ Note that the record handle need not be the current page.\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ public void updateOverflowDetails(RecordHandle handle, RecordHandle overflowHandle)\r
+ throws StandardException\r
+ {\r
+ long handlePageNumber = handle.getPageNumber();\r
+ if (handlePageNumber == getPageNumber()) {\r
+ updateOverflowDetails(this, handle, overflowHandle);\r
+ return;\r
+ }\r
+ \r
+ StoredPage handlePage = (StoredPage) owner.getPage(handlePageNumber);\r
+\r
+ updateOverflowDetails(handlePage, handle, overflowHandle); \r
+ handlePage.unlatch();\r
+ }\r
+\r
+ private void updateOverflowDetails(StoredPage handlePage, RecordHandle handle, RecordHandle overflowHandle)\r
+ throws StandardException {\r
+ // update the temp record header, this will be used in the log row ..\r
+ handlePage.getOverFlowRecordHeader().setOverflowDetails(overflowHandle);\r
+\r
+ // Use the slot interface as we don't need a lock since\r
+ // the initial insert/update holds the lock on the first\r
+ // portion of the record.\r
+ int slot = handlePage.getSlotNumber(handle);\r
+\r
+ // use doUpdateAtSlot as it avoids unnecessary work in updateAtSlot the\r
+ // null indicates to this page that the record should become an \r
+ // overflow record\r
+ handlePage.doUpdateAtSlot(\r
+ owner.getTransaction(), slot, handle.getId(), \r
+ (Object[]) null, (FormatableBitSet) null);\r
+ }\r
+\r
+ /**\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ public void updateFieldOverflowDetails(RecordHandle handle, RecordHandle overflowHandle)\r
+ throws StandardException\r
+ {\r
+ // add an overflow field at the end of the previous record\r
+ // uses sparse rows\r
+ Object[] row = new Object[2];\r
+ row[1] = overflowHandle;\r
+\r
+ // we are expanding the record to have 2 fields, the second field is the overflow pointer.\r
+ FormatableBitSet validColumns = new FormatableBitSet(2);\r
+ validColumns.set(1);\r
+\r
+ // Use the slot interface as we don't need a lock since\r
+ // the initial insert/update holds the lock on the first\r
+ // portion of the record.\r
+ int slot = getSlotNumber(handle);\r
+\r
+ // use doUpdateAtSlot as it avoids unnecessary work in updateAtSlot\r
+ doUpdateAtSlot(owner.getTransaction(), slot, handle.getId(), row, validColumns);\r
+ }\r
+\r
+ /**\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ public int appendOverflowFieldHeader(DynamicByteArrayOutputStream logBuffer, RecordHandle overflowHandle)\r
+ throws StandardException, IOException\r
+ {\r
+ int fieldStatus = StoredFieldHeader.setInitial();\r
+ fieldStatus = StoredFieldHeader.setOverflow(fieldStatus, true);\r
+\r
+ long overflowPage = overflowHandle.getPageNumber();\r
+ int overflowId = overflowHandle.getId();\r
+ int fieldDataLength = CompressedNumber.sizeLong(overflowPage)\r
+ + CompressedNumber.sizeInt(overflowId);\r
+\r
+ // write the field header to the log buffer\r
+ int lenWritten = StoredFieldHeader.write(logBuffer, fieldStatus, fieldDataLength, slotFieldSize);\r
+\r
+ // write the overflow details to the log buffer\r
+ lenWritten += CompressedNumber.writeLong(logBuffer, overflowPage);\r
+ lenWritten += CompressedNumber.writeInt(logBuffer, overflowId);\r
+\r
+ // this length is the same on page as in the log\r
+ return (lenWritten);\r
+ }\r
+\r
+ protected int getSlotsInUse()\r
+ {\r
+ return(slotsInUse);\r
+ }\r
+\r
+\r
+ /**\r
+ return the max datalength allowed with the space available\r
+ */\r
+ private int getMaxDataLength(int spaceAvailable, int overflowThreshold) {\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (overflowThreshold == 0) \r
+ SanityManager.THROWASSERT("overflowThreshold cannot be 0");\r
+ }\r
+\r
+ // we need to take into considering of the overflowThreshold\r
+ // the overflowThreshold limits the max data length,\r
+ // whatever space we have left, we will not allow max data length\r
+ // to exceed the overflow threshold.\r
+ int maxThresholdSpace = totalSpace * overflowThreshold / 100;\r
+ int maxAvailable = 0;\r
+\r
+ if (spaceAvailable < (64 - 2))\r
+ maxAvailable = spaceAvailable - 2;\r
+ else if (spaceAvailable < (16383 - 3))\r
+ maxAvailable = spaceAvailable - 3;\r
+ else\r
+ maxAvailable = spaceAvailable - 5;\r
+\r
+ return (maxAvailable > maxThresholdSpace ? maxThresholdSpace : maxAvailable);\r
+\r
+ }\r
+\r
+ /**\r
+ return whether the field has exceeded the max threshold for this page\r
+ it compares the fieldSize with the largest possible field for this page\r
+ */\r
+ private boolean isLong(int fieldSize, int overflowThreshold) {\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (overflowThreshold == 0) \r
+ SanityManager.THROWASSERT("overflowThreshold cannot be 0");\r
+ }\r
+\r
+ // if a field size is over the threshold, then it becomes a long column\r
+ int maxThresholdSize = maxFieldSize * overflowThreshold / 100;\r
+ return (fieldSize > maxThresholdSize);\r
+ }\r
+\r
+ /**\r
+ Perform an update.\r
+\r
+ @exception StandardException Standard Derby policy\r
+ */\r
+ public void doUpdateAtSlot(\r
+ RawTransaction t, \r
+ int slot, \r
+ int id, \r
+ Object[] row, \r
+ FormatableBitSet validColumns)\r
+ throws StandardException\r
+ {\r
+ // If this is a head page, the recordHandle is the head row handle.\r
+ // If this is not a head page, we are calling updateAtSlot inside some\r
+ // convoluted loop that updates an overflow chain. There is nothing we\r
+ // can doing about it anyway.\r
+ RecordHandle headRowHandle = \r
+ isOverflowPage() ? null : getRecordHandleAtSlot(slot); \r
+ \r
+ // RESOLVE: djd/yyz what does a null row means? (sku)\r
+ if (row == null) \r
+ {\r
+ owner.getActionSet().actionUpdate(\r
+ t, this, slot, id, row, validColumns, -1, \r
+ (DynamicByteArrayOutputStream) null, -1, headRowHandle);\r
+\r
+ return;\r
+ }\r
+\r
+ // startColumn is the first column to be updated.\r
+ int startColumn = RowUtil.nextColumn(row, validColumns, 0);\r
+ if (startColumn == -1)\r
+ return;\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ // make sure that if N bits are set in the validColumns that\r
+ // exactly N columns are passed in via the row array.\r
+ if (!isOverflowPage() && validColumns != null)\r
+ {\r
+ if (RowUtil.getNumberOfColumns(-1, validColumns) > row.length)\r
+ SanityManager.THROWASSERT("updating slot " + slot + \r
+ " on page " + getIdentity() + " " +\r
+ RowUtil.getNumberOfColumns(-1, validColumns) + \r
+ " bits are set in validColumns but only " +\r
+ row.length + " columns in row[]");\r
+ }\r
+ }\r
+\r
+\r
+ // Keep track of row shrinkage in the head row piece. If any row piece\r
+ // shrinks, file a post commit work to clear all reserved space for the\r
+ // entire row chain.\r
+ boolean rowHasReservedSpace = false; \r
+\r
+ StoredPage curPage = this;\r
+ for (;;) \r
+ {\r
+ StoredRecordHeader rh = curPage.getHeaderAtSlot(slot);\r
+\r
+ int startField = rh.getFirstField(); \r
+ int endFieldExclusive = startField + rh.getNumberFields();\r
+\r
+ // curPage contains column[startField] to column[endFieldExclusive-1]\r
+\r
+ // Need to cope with an update that is increasing the number of \r
+ // columns. If this occurs we want to make sure that we perform a \r
+ // single update to the last portion of a record, and not an update\r
+ // of the current columns and then an update to append a column.\r
+\r
+ long nextPage = -1;\r
+ int realStartColumn = -1;\r
+ int realSpaceOnPage = -1;\r
+\r
+ if (!rh.hasOverflow() || \r
+ ((startColumn >= startField) && \r
+ (startColumn < endFieldExclusive))) \r
+ {\r
+ boolean hitLongColumn;\r
+ int nextColumn = -1;\r
+ Object[] savedFields = null;\r
+ DynamicByteArrayOutputStream logBuffer = null;\r
+\r
+ do \r
+ {\r
+ try \r
+ {\r
+ // Update this portion of the record.\r
+ // Pass in headRowHandle in case we are to update any\r
+ // long column and they need to be cleaned up by post\r
+ // commit processing. We don't want to purge the\r
+ // columns right now because in order to reclaim the\r
+ // page, we need to remove them. But it would be bad\r
+ // to remove them now because the transaction may not\r
+ // commit for a long time. We can do both purging of\r
+ // the long column and page removal together in the\r
+ // post commit.\r
+ nextColumn = \r
+ owner.getActionSet().actionUpdate(\r
+ t, curPage, slot, id, row, validColumns, \r
+ realStartColumn, logBuffer, \r
+ realSpaceOnPage, headRowHandle);\r
+\r
+ hitLongColumn = false;\r
+\r
+ } \r
+ catch (LongColumnException lce) \r
+ {\r
+ \r
+ if (lce.getRealSpaceOnPage() == -1) \r
+ {\r
+ // an update that has caused the row to increase \r
+ // in size *and* push some fields off the page \r
+ // that need to be inserted in an overflow page\r
+\r
+ // no need to make a copy as we are going to use \r
+ // this buffer right away\r
+ logBuffer = lce.getLogBuffer();\r
+\r
+ savedFields = \r
+ (Object[]) lce.getColumn();\r
+ \r
+ realStartColumn = lce.getNextColumn();\r
+ realSpaceOnPage = -1;\r
+\r
+ hitLongColumn = true;\r
+\r
+ continue;\r
+ }\r
+\r
+ \r
+ // we caught a real long column exception\r
+ // three things should happen here:\r
+ // 1. insert the long column into overflow pages.\r
+ // 2. append the overflow field header in the main chain.\r
+ // 3. continue the update in the main data chain.\r
+ logBuffer = \r
+ new DynamicByteArrayOutputStream(lce.getLogBuffer());\r
+\r
+ // step 1: insert the long column ... if this update \r
+ // operation rolls back, purge the after image column \r
+ // chain and reclaim the overflow page because the \r
+ // whole chain will be orphaned anyway. \r
+ RecordHandle longColumnHandle =\r
+ insertLongColumn(\r
+ curPage, lce, Page.INSERT_UNDO_WITH_PURGE);\r
+\r
+ // step 2: append overflow field header to log buffer\r
+ int overflowFieldLen = 0;\r
+ try \r
+ {\r
+ overflowFieldLen +=\r
+ appendOverflowFieldHeader(\r
+ logBuffer, longColumnHandle);\r
+\r
+ } \r
+ catch (IOException ioe) \r
+ {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);\r
+ }\r
+\r
+ // step 3: continue the insert in the main data chain\r
+ // need to pass the log buffer, and start column to the\r
+ // next insert.\r
+ realStartColumn = lce.getNextColumn() + 1;\r
+ realSpaceOnPage = lce.getRealSpaceOnPage() - overflowFieldLen;\r
+ hitLongColumn = true;\r
+\r
+ }\r
+\r
+ } while (hitLongColumn);\r
+\r
+\r
+ // See if we completed all the columns that are on this page.\r
+ int validColumnsSize = \r
+ (validColumns == null) ? 0 : validColumns.getLength();\r
+\r
+ if (nextColumn != -1) \r
+ {\r
+\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ // note nextColumn might be less than the the first \r
+ // column we started updating. This is because the \r
+ // update might force the record header to grow and \r
+ // push fields before the one we are updating off the \r
+ // page and into this insert.\r
+\r
+ if ((nextColumn < startField) || \r
+ (rh.hasOverflow() && (nextColumn >= endFieldExclusive)))\r
+ {\r
+ SanityManager.THROWASSERT(\r
+ "nextColumn out of range = " + nextColumn +\r
+ " expected between " + \r
+ startField + " and " + endFieldExclusive);\r
+ }\r
+ }\r
+\r
+ // Need to insert rows from nextColumn to endFieldExclusive \r
+ // onto a new overflow page.\r
+ // If the column is not being updated we\r
+ // pick it up from the current page. If it is being updated\r
+ // we take it from the new value.\r
+ int possibleLastFieldExclusive = endFieldExclusive;\r
+ \r
+ if (!rh.hasOverflow()) \r
+ {\r
+ // we might be adding a field here\r
+ if (validColumns == null) \r
+ {\r
+ if (row.length > possibleLastFieldExclusive)\r
+ possibleLastFieldExclusive = row.length;\r
+ } \r
+ else \r
+ {\r
+ if (validColumnsSize > possibleLastFieldExclusive)\r
+ possibleLastFieldExclusive = validColumnsSize;\r
+ }\r
+ }\r
+\r
+\r
+ // use a sparse row\r
+ Object[] newRow = \r
+ new Object[possibleLastFieldExclusive];\r
+\r
+ FormatableBitSet newColumnList = \r
+ new FormatableBitSet(possibleLastFieldExclusive);\r
+\r
+ ByteArrayOutputStream fieldStream = null;\r
+\r
+ for (int i = nextColumn; i < possibleLastFieldExclusive; i++) \r
+ {\r
+ if ((validColumns == null) || \r
+ (validColumnsSize > i && validColumns.isSet(i))) \r
+ {\r
+ newColumnList.set(i);\r
+ // use the new value\r
+ newRow[i] = RowUtil.getColumn(row, validColumns, i);\r
+\r
+ }\r
+ else if (i < endFieldExclusive) \r
+ {\r
+ newColumnList.set(i);\r
+\r
+ // use the old value\r
+ newRow[i] = savedFields[i - nextColumn];\r
+ }\r
+ }\r
+\r
+ RecordHandle handle = curPage.getRecordHandleAtSlot(slot);\r
+\r
+ // If the portion we just updated is the last portion then \r
+ // there cannot be any updates to do.\r
+ if (rh.hasOverflow()) \r
+ {\r
+ // We have to carry across the overflow information\r
+ // from the current record, if any.\r
+ nextPage = rh.getOverflowPage();\r
+ id = rh.getOverflowId();\r
+\r
+ // find the next starting column before unlatching page\r
+ startColumn = \r
+ RowUtil.nextColumn(\r
+ row, validColumns, endFieldExclusive);\r
+ } \r
+ else \r
+ {\r
+ startColumn = -1;\r
+ nextPage = 0;\r
+ }\r
+\r
+\r
+ // After the update is done, see if this row piece has\r
+ // shrunk in curPage if no other row pieces have shrunk so\r
+ // far. In head page, need to respect minimumRecordSize.\r
+ // In overflow page, only need to respect\r
+ // RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT\r
+ // Don't bother with temp container.\r
+ if (!rowHasReservedSpace && headRowHandle != null &&\r
+ curPage != null && !owner.isTemporaryContainer())\r
+ {\r
+ rowHasReservedSpace = \r
+ curPage.checkRowReservedSpace(slot);\r
+ }\r
+\r
+\r
+ // insert the record portion on a new overflow page at slot\r
+ // 0 this will automatically handle any overflows in\r
+ // this new portion\r
+\r
+ // BasePage op = getNewOverflowPage();\r
+\r
+ BasePage op = \r
+ curPage.getOverflowPageForInsert(\r
+ slot,\r
+ newRow,\r
+ newColumnList,\r
+ nextColumn);\r
+\r
+ // We have all the information from this page so unlatch it\r
+ if (curPage != this) \r
+ {\r
+ curPage.unlatch();\r
+ curPage = null;\r
+ }\r
+\r
+ byte mode = Page.INSERT_OVERFLOW;\r
+ if (nextPage != 0)\r
+ mode |= Page.INSERT_FOR_SPLIT;\r
+\r
+ RecordHandle nextPortionHandle =\r
+ nextPage == 0 ? null :\r
+ owner.makeRecordHandle(nextPage, id);\r
+\r
+ // RESOLVED (sku): even though we would like to roll back \r
+ // these inserts with PURGE rather than with delete, \r
+ // we have to delete because if we purge the last row\r
+ // from an overflow page, the purge will queue a post \r
+ // commit to remove the page.\r
+ // While this is OK with long columns, we cannot do this \r
+ // for long rows because long row overflow pages can be \r
+ // shared by more than one long rows, and thus it is unsafe\r
+ // to remove the page without first latching the head page.\r
+ // However, the insert log record do not have the head \r
+ // row's page number so the rollback cannot put that\r
+ // information into the post commit work.\r
+ RecordHandle portionHandle =\r
+ op.insertAllowOverflow(\r
+ 0, newRow, newColumnList, nextColumn, mode, 100, \r
+ nextPortionHandle);\r
+\r
+ // Update the previous record header to point to new portion\r
+ if (curPage == this)\r
+ updateOverflowDetails(this, handle, portionHandle);\r
+ else\r
+ updateOverflowDetails(handle, portionHandle);\r
+ op.unlatch();\r
+ } \r
+ else \r
+ {\r
+\r
+ // See earlier comments on checking row reserved space.\r
+ if (!rowHasReservedSpace && \r
+ headRowHandle != null &&\r
+ curPage != null && \r
+ !owner.isTemporaryContainer()) \r
+ {\r
+ rowHasReservedSpace = \r
+ curPage.checkRowReservedSpace(slot);\r
+ }\r
+\r
+\r
+ // find the next starting column before we unlatch the page\r
+ startColumn = \r
+ rh.hasOverflow() ? \r
+ RowUtil.nextColumn(\r
+ row, validColumns, endFieldExclusive) : -1;\r
+ }\r
+\r
+ // have we completed this update?\r
+ if (startColumn == -1) {\r
+\r
+ if ((curPage != this) && (curPage != null))\r
+ curPage.unlatch();\r
+ break; // break out of the for loop\r
+ }\r
+ }\r
+\r
+ if (nextPage == -1) \r
+ {\r
+ if (SanityManager.DEBUG) \r
+ {\r
+ SanityManager.ASSERT(\r
+ curPage != null, \r
+ "Current page is null be no overflow information has been obtained");\r
+ }\r
+\r
+ // Get the next page info while we still have the page\r
+ // latched.\r
+ nextPage = rh.getOverflowPage();\r
+ id = rh.getOverflowId();\r
+ }\r
+ \r
+ if ((curPage != this) && (curPage != null))\r
+ curPage.unlatch();\r
+\r
+ // get the next portion page and find the correct slot\r
+ curPage = (StoredPage) owner.getPage(nextPage);\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.ASSERT(\r
+ curPage.isOverflowPage(), \r
+ "following row chain gets a non-overflow page");\r
+ }\r
+\r
+ slot = curPage.findRecordById(id, FIRST_SLOT_NUMBER);\r
+ }\r
+\r
+ // Back to the head page. Get rid of all reserved space in the entire\r
+ // row post commit.\r
+ if (rowHasReservedSpace)\r
+ {\r
+ RawTransaction rxact = (RawTransaction)owner.getTransaction();\r
+\r
+ ReclaimSpace work = \r
+ new ReclaimSpace(ReclaimSpace.ROW_RESERVE,\r
+ headRowHandle, \r
+ rxact.getDataFactory(), true);\r
+ rxact.addPostCommitWork(work);\r
+ }\r
+ }\r
+\r
+ /**\r
+ See if the row on this page has reserved space that can be shrunk once\r
+ the update commits.\r
+ */\r
+ private boolean checkRowReservedSpace(int slot) throws StandardException\r
+ {\r
+ boolean rowHasReservedSpace = false;\r
+ try {\r
+ int shrinkage = getReservedCount(slot);\r
+\r
+ // Only reclaim reserved space if it is\r
+ // "reasonably" sized, i.e., we can reclaim at\r
+ // least MININUM_RECORD_SIZE_DEFAULT\r
+ int reclaimThreshold = RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT;\r
+ \r
+ if (shrinkage > reclaimThreshold) {\r
+ int totalSpace = getRecordPortionLength(slot) + shrinkage; \r
+\r
+ if (isOverflowPage()) {\r
+ if (totalSpace >\r
+ RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT+reclaimThreshold)\r
+ rowHasReservedSpace = true;\r
+\r
+ // Otherwise, I can at most reclaim less than\r
+ // MINIMUM_RECORD_SIZE_DEFAULT, forget about that.\r
+ } else {\r
+ // this is a head page\r
+ if (totalSpace > (minimumRecordSize +\r
+ RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT)) \r
+ rowHasReservedSpace = true;\r
+\r
+ // Otherwise, I can at most reclaim less than\r
+ // MINIMUM_RECORD_SIZE_DEFAULT, forget about that.\r
+ }\r
+ }\r
+ } catch (IOException ioe) {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);\r
+ }\r
+\r
+ return rowHasReservedSpace;\r
+ }\r
+\r
+ /**\r
+ @see BasePage#compactRecord\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ protected void compactRecord(RawTransaction t, int slot, int id) \r
+ throws StandardException \r
+ {\r
+ // If this is a head row piece, first take care of the entire overflow\r
+ // row chain. Don't need to worry about long column because they are\r
+ // not in place updatable.\r
+ if (isOverflowPage() == false) {\r
+ StoredRecordHeader recordHeader = getHeaderAtSlot(slot);\r
+\r
+ while (recordHeader.hasOverflow()) {\r
+ StoredPage nextPageInRowChain =\r
+ getOverflowPage(recordHeader.getOverflowPage());\r
+\r
+ if (SanityManager.DEBUG)\r
+ SanityManager.ASSERT(nextPageInRowChain != null);\r
+\r
+ try {\r
+ int nextId = recordHeader.getOverflowId();\r
+ int nextSlot = getOverflowSlot(nextPageInRowChain, recordHeader);\r
+\r
+ nextPageInRowChain.compactRecord(t, nextSlot, nextId);\r
+\r
+ // Follow the next long row pointer.\r
+ recordHeader = nextPageInRowChain.getHeaderAtSlot(nextSlot);\r
+ } finally {\r
+ nextPageInRowChain.unlatch();\r
+ }\r
+ }\r
+ }\r
+\r
+ // Lastly, see if this row has anything sizable that can be freed.\r
+ // Try to only reclaim space larger than MINIMUM_RECORD_SIZE_DEFAULT\r
+ // because otherwise it is probably not worth the effort.\r
+ int reclaimThreshold = RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT;\r
+ try\r
+ {\r
+ int reserve = getReservedCount(slot);\r
+ if (reserve > reclaimThreshold) {\r
+ int recordLength = getRecordPortionLength(slot);\r
+ int correctReservedSpace = reserve;\r
+\r
+ if (isOverflowPage()) {\r
+ if ((reserve + recordLength) > \r
+ (RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT+reclaimThreshold))\r
+ { \r
+ // calculate what the correct reserved space is\r
+ if (recordLength >= RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT)\r
+ correctReservedSpace = 0;\r
+ else // make sure record takes up minimum_record_size \r
+ correctReservedSpace = \r
+ RawStoreFactory.MINIMUM_RECORD_SIZE_DEFAULT - recordLength; \r
+ }\r
+ } else {\r
+ // this is a head page\r
+ if ((reserve + recordLength) > \r
+ (minimumRecordSize+reclaimThreshold)) {\r
+ // calculate what the correct reserved space is\r
+ if (recordLength >= minimumRecordSize)\r
+ correctReservedSpace = 0;\r
+ else\r
+ correctReservedSpace = minimumRecordSize - recordLength;\r
+ }\r
+ }\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ SanityManager.ASSERT(correctReservedSpace <= reserve,\r
+ "correct reserve > reserve");\r
+ }\r
+\r
+ // A shrinkage has occured.\r
+ if (correctReservedSpace < reserve) {\r
+ owner.getActionSet().\r
+ actionShrinkReservedSpace(t, this, slot, id,\r
+ correctReservedSpace, reserve);\r
+ }\r
+ }\r
+ } catch (IOException ioe) {\r
+ throw StandardException.newException(\r
+ SQLState.DATA_UNEXPECTED_EXCEPTION, ioe);\r
+ }\r
+ }\r
+}\r
+\r