Adding JMCR-Stable version
[Benchmarks_CSolver.git] / JMCR-Stable / real-world application / MyDerby-10.3 / java / engine / org / apache / derby / impl / sql / execute / DeleteCascadeResultSet.java
diff --git a/JMCR-Stable/real-world application/MyDerby-10.3/java/engine/org/apache/derby/impl/sql/execute/DeleteCascadeResultSet.java b/JMCR-Stable/real-world application/MyDerby-10.3/java/engine/org/apache/derby/impl/sql/execute/DeleteCascadeResultSet.java
new file mode 100644 (file)
index 0000000..d0c729a
--- /dev/null
@@ -0,0 +1,488 @@
+/*\r
+\r
+   Derby - Class org.apache.derby.impl.sql.execute.DeleteCascadeResultSet\r
+\r
+   Licensed to the Apache Software Foundation (ASF) under one or more\r
+   contributor license agreements.  See the NOTICE file distributed with\r
+   this work for additional information regarding copyright ownership.\r
+   The ASF licenses this file to you under the Apache License, Version 2.0\r
+   (the "License"); you may not use this file except in compliance with\r
+   the License.  You may obtain a copy of the License at\r
+\r
+      http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+   Unless required by applicable law or agreed to in writing, software\r
+   distributed under the License is distributed on an "AS IS" BASIS,\r
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+   See the License for the specific language governing permissions and\r
+   limitations under the License.\r
+\r
+ */\r
+\r
+package org.apache.derby.impl.sql.execute;\r
+\r
+import org.apache.derby.iapi.services.sanity.SanityManager;\r
+import org.apache.derby.iapi.error.StandardException;\r
+import org.apache.derby.iapi.sql.execute.ConstantAction;\r
+import org.apache.derby.iapi.sql.execute.CursorResultSet;\r
+import org.apache.derby.iapi.sql.execute.RowChanger;\r
+import org.apache.derby.iapi.sql.execute.NoPutResultSet;\r
+import org.apache.derby.iapi.sql.Activation;\r
+import org.apache.derby.iapi.sql.ResultDescription;\r
+import org.apache.derby.iapi.types.DataValueDescriptor;\r
+import org.apache.derby.iapi.sql.ResultSet;\r
+import org.apache.derby.iapi.store.access.ConglomerateController;\r
+import org.apache.derby.iapi.store.access.TransactionController;\r
+import org.apache.derby.iapi.sql.execute.ExecRow;\r
+import org.apache.derby.iapi.sql.execute.TemporaryRowHolder;\r
+\r
+import org.apache.derby.iapi.reference.SQLState;\r
+\r
+import java.util.Vector;\r
+import java.util.Hashtable;\r
+import java.util.Enumeration;\r
+\r
+/**\r
+ * Delete the rows from the specified  base table and executes delete/update\r
+ * on dependent tables depending on the referential actions specified.\r
+ * Note:(beetle:5197) Dependent Resultsets of DeleteCascade Resultset can  in\r
+ * any one of the multiple resultsets generated for the same table because of\r
+ * multiple foreign key relationship to  the same table. At the bind time ,\r
+ * dependents are binded only once per table.\r
+ * We can not depend on mainNodeTable Flag to fire actions on dependents,\r
+ * it should be done based on whether the resultset has dependent resultsets or not.\r
+ *\r
+ */\r
+public class DeleteCascadeResultSet extends DeleteResultSet\r
+{\r
+\r
+\r
+       public ResultSet[] dependentResultSets;\r
+       private int noDependents =0;\r
+       private CursorResultSet parentSource;\r
+       private FKInfo parentFKInfo;\r
+       private long fkIndexConglomNumber;\r
+       private String resultSetId;\r
+       private boolean mainNodeForTable = true;\r
+       private boolean affectedRows = false;\r
+       private int tempRowHolderId; //this result sets temporary row holder id \r
+\r
+    /*\r
+     * class interface\r
+        * @exception StandardException         Thrown on error\r
+     */\r
+    public DeleteCascadeResultSet\r
+       (\r
+               NoPutResultSet          source,\r
+               Activation                      activation,\r
+               int                             constantActionItem,\r
+               ResultSet[]                     dependentResultSets,\r
+               String                  resultSetId\r
+       )\r
+               throws StandardException\r
+    {\r
+\r
+               super(source,\r
+                         ((constantActionItem == -1) ?activation.getConstantAction() :\r
+                         (ConstantAction)activation.getPreparedStatement().getSavedObject(constantActionItem)),\r
+                         activation);\r
+\r
+               ConstantAction passedInConstantAction;\r
+               if(constantActionItem == -1)\r
+                       passedInConstantAction = activation.getConstantAction(); //root table\r
+               else\r
+               {\r
+                       passedInConstantAction = \r
+                               (ConstantAction) activation.getPreparedStatement().getSavedObject(constantActionItem);\r
+                       resultDescription = constants.resultDescription;\r
+               }\r
+               cascadeDelete = true;\r
+               this.resultSetId = resultSetId;\r
+               \r
+               if(dependentResultSets != null)\r
+               {\r
+                       noDependents = dependentResultSets.length;\r
+                       this.dependentResultSets = dependentResultSets;\r
+               }\r
+\r
+       }\r
+\r
+\r
+\r
+       /**\r
+               @exception StandardException Standard Derby error policy\r
+       */\r
+       public void open() throws StandardException\r
+       {\r
+\r
+\r
+               try{\r
+                       setup();\r
+                       if(isMultipleDeletePathsExist())\r
+                       {\r
+                               setRowHoldersTypeToUniqueStream();\r
+                               //collect until there are no more rows to found\r
+                               while(collectAffectedRows(false));\r
+                       }else\r
+                       {\r
+                               collectAffectedRows(false);\r
+                       }\r
+                       if (! affectedRows)\r
+                       {\r
+                               activation.addWarning(\r
+                                                       StandardException.newWarning(\r
+                                                               SQLState.LANG_NO_ROW_FOUND));\r
+                       }\r
+\r
+                       runFkChecker(true); //check for only RESTRICT referential action rule violations\r
+                       Hashtable mntHashTable = new Hashtable(); //Hash Table to identify  mutiple node for same table cases. \r
+                       mergeRowHolders(mntHashTable);\r
+                       fireBeforeTriggers(mntHashTable);\r
+                       deleteDeferredRows();\r
+                       runFkChecker(false); //check for all constraint violations\r
+                       rowChangerFinish();\r
+                       fireAfterTriggers();\r
+               }finally\r
+               {\r
+                       cleanUp();\r
+\r
+                       //clear the parent result sets hash table\r
+                       activation.clearParentResultSets();\r
+               }\r
+\r
+               endTime = getCurrentTimeMillis();\r
+\r
+    }\r
+       \r
+\r
+       /**\r
+        *Gathers the rows that needs to be deleted/updated \r
+        *and creates a temporary resulsets that will be passed\r
+        *as source to its  dependent result sets.\r
+        */\r
+       void  setup() throws StandardException\r
+       {\r
+\r
+               /* Cache query plan text for source, before it gets blown away */\r
+               if (lcc.getRunTimeStatisticsMode())\r
+               {\r
+                       /* savedSource nulled after run time statistics generation */\r
+                       savedSource = source;\r
+               }\r
+\r
+               super.setup();\r
+               activation.setParentResultSet(rowHolder, resultSetId);\r
+               Vector sVector = (Vector) activation.getParentResultSet(resultSetId);\r
+               tempRowHolderId = sVector.size() -1;\r
+               for(int i =0 ; i < noDependents; i++)\r
+               {\r
+                       if(dependentResultSets[i] instanceof UpdateResultSet)\r
+                       {\r
+                               ((UpdateResultSet) dependentResultSets[i]).setup();\r
+                       }else\r
+                       {\r
+                               ((DeleteCascadeResultSet) dependentResultSets[i]).setup();\r
+                       }\r
+               }\r
+\r
+       }\r
+\r
+\r
+       boolean  collectAffectedRows(boolean rowsFound) throws StandardException\r
+       {\r
+               if(super.collectAffectedRows())\r
+               {\r
+                       affectedRows = true;\r
+                       rowsFound = true;\r
+               }\r
+\r
+               for(int i =0 ; i < noDependents; i++)\r
+               {\r
+                       if(dependentResultSets[i] instanceof UpdateResultSet)\r
+                       {\r
+                               if(((UpdateResultSet)dependentResultSets[i]).collectAffectedRows())\r
+                                       rowsFound = true;\r
+                       }else\r
+                       {\r
+                               if(((DeleteCascadeResultSet)\r
+                                       dependentResultSets[i]).collectAffectedRows(rowsFound))\r
+                                       rowsFound = true;\r
+                       }\r
+               }\r
+\r
+               return rowsFound;\r
+       }\r
+\r
+\r
+       void fireBeforeTriggers(Hashtable msht) throws StandardException\r
+       {\r
+               if(!mainNodeForTable) \r
+               {\r
+                       /*to handle case where no table node had qualified rows, in which case no node for\r
+                        * the table get marked as mainNodeFor table , one way to identify\r
+                        * such case is to look at the mutinode hash table and see if the result id exist ,\r
+                        *if it does not means none of the table nodes resulsets got marked\r
+                        * as main node for table. If that is the case we mark this\r
+                        * resultset as mainNodeTable and put entry in the hash table.\r
+                        */\r
+                       if(!msht.containsKey(resultSetId))\r
+                       {\r
+                               mainNodeForTable = true;\r
+                               msht.put(resultSetId, resultSetId);\r
+                       }\r
+               }\r
+               \r
+               //execute the before triggers on the dependents\r
+               //Defect 5743: Before enabling BEFORE triggers, check DB2 behavior.\r
+               for(int i =0 ; i < noDependents; i++)\r
+               {\r
+                       if(dependentResultSets[i] instanceof UpdateResultSet)\r
+                       {\r
+                               ((UpdateResultSet) dependentResultSets[i]).fireBeforeTriggers();\r
+                       }\r
+                       else{\r
+                               ((DeleteCascadeResultSet)dependentResultSets[i]).fireBeforeTriggers(msht);\r
+                       }\r
+               }\r
+\r
+               //If there is more than one node for the same table\r
+               //only one node fires the triggers\r
+               if(mainNodeForTable && constants.deferred)\r
+                       super.fireBeforeTriggers();\r
+       }\r
+\r
+    void fireAfterTriggers() throws StandardException\r
+       {\r
+               //fire the After Triggers on the dependent tables, if any rows changed\r
+               for(int i=0 ; i<noDependents && affectedRows; i++){\r
+                       if(dependentResultSets[i] instanceof UpdateResultSet)\r
+                       {\r
+                               ((UpdateResultSet) dependentResultSets[i]).fireAfterTriggers();\r
+                       }\r
+                       else{\r
+\r
+                               ((DeleteCascadeResultSet)dependentResultSets[i]).fireAfterTriggers();\r
+                       }\r
+               }\r
+\r
+               //If there is more than one node for the same table\r
+               //, we let only one node fire the triggers.\r
+               if(mainNodeForTable && constants.deferred)\r
+                       super.fireAfterTriggers();\r
+       }\r
+\r
+       void deleteDeferredRows() throws StandardException\r
+       {\r
+               \r
+               //delete the rows in the  dependents tables\r
+               for(int i =0 ; i < noDependents; i++)\r
+               {\r
+                       if(dependentResultSets[i] instanceof UpdateResultSet)\r
+                       {\r
+                               ((UpdateResultSet) dependentResultSets[i]).updateDeferredRows();\r
+                       }\r
+                       else{\r
+                               ((DeleteCascadeResultSet)dependentResultSets[i]).deleteDeferredRows();\r
+                       }\r
+               }\r
+\r
+                       \r
+               //If there is more than one node for the same table\r
+               //only one node deletes all the rows.\r
+               if(mainNodeForTable)\r
+                       super.deleteDeferredRows();\r
+       }\r
+\r
+       \r
+       void runFkChecker(boolean restrictCheckOnly) throws StandardException\r
+       {\r
+\r
+               //run the Foreign key or primary key Checker on the dependent tables\r
+               for(int i =0 ; i < noDependents; i++)\r
+               {               \r
+                       if(dependentResultSets[i] instanceof UpdateResultSet)\r
+                       {\r
+                               ((UpdateResultSet) dependentResultSets[i]).runChecker(restrictCheckOnly);\r
+                       }\r
+                       else{\r
+                               ((DeleteCascadeResultSet)dependentResultSets[i]).runFkChecker(restrictCheckOnly);\r
+                       }\r
+               }\r
+\r
+               //If there  is more than one node for the same table\r
+               //only one node does all foreign key checks.\r
+               if(mainNodeForTable)\r
+                       super.runFkChecker(restrictCheckOnly);\r
+       }\r
+\r
+\r
+       public void cleanUp() throws StandardException\r
+       {\r
+\r
+               super.cleanUp();\r
+               for(int i =0 ; i < noDependents; i++)\r
+               {\r
+                       if(dependentResultSets[i] instanceof UpdateResultSet)\r
+                       {\r
+                               ((UpdateResultSet) dependentResultSets[i]).cleanUp();\r
+                       }else\r
+                       {\r
+                               ((DeleteCascadeResultSet) dependentResultSets[i]).cleanUp();\r
+                       }\r
+               }\r
+               \r
+               endTime = getCurrentTimeMillis();\r
+       }\r
+\r
+\r
+       private void rowChangerFinish() throws StandardException\r
+       {\r
+\r
+               rc.finish();\r
+               for(int i =0 ; i < noDependents; i++)\r
+               {\r
+                       if(dependentResultSets[i] instanceof UpdateResultSet)\r
+                       {\r
+                               ((UpdateResultSet) dependentResultSets[i]).rowChangerFinish();\r
+                       }else\r
+                       {\r
+                               ((DeleteCascadeResultSet) dependentResultSets[i]).rowChangerFinish();\r
+                       }\r
+               }\r
+       }\r
+\r
+\r
+\r
+       //if there is more than one node for the same table, copy the rows\r
+       // into one node , so that we don't fire trigger more than once.\r
+       private void mergeRowHolders(Hashtable msht) throws StandardException\r
+       {\r
+               if(msht.containsKey(resultSetId) || rowCount ==0)\r
+               {\r
+                       //there is already another resultset node that is marked as main\r
+                       //node for this table or this resultset has no rows qualified.\r
+                       //when none of the  resultset nodes for the table has any rows then\r
+                       //we mark them as one them as main node in fireBeforeTriggers().\r
+                       mainNodeForTable = false;\r
+               }else\r
+               {\r
+                       mergeResultSets();\r
+                       mainNodeForTable = true;\r
+                       msht.put(resultSetId, resultSetId);\r
+               }\r
+               \r
+               for(int i =0 ; i < noDependents; i++)\r
+               {               \r
+                       if(dependentResultSets[i] instanceof UpdateResultSet)\r
+                       {\r
+                               return; \r
+                       }\r
+                       else{\r
+                               ((DeleteCascadeResultSet)dependentResultSets[i]).mergeRowHolders(msht);\r
+                       }\r
+               }\r
+       }\r
+\r
+\r
+\r
+       private void mergeResultSets() throws StandardException\r
+       {\r
+               Vector sVector = (Vector) activation.getParentResultSet(resultSetId);\r
+               int size = sVector.size();\r
+               // if there is more than one source, we need to merge them into onc\r
+               // temporary result set.\r
+               if(size > 1)\r
+               {\r
+                       ExecRow         row = null;\r
+                       int rowHolderId = 0 ;\r
+                       //copy all the vallues in the result set to the current resultset row holder\r
+                       while(rowHolderId <  size)\r
+                       {\r
+                               if(rowHolderId == tempRowHolderId )\r
+                               {\r
+                                       //skipping the row holder that  we are copying the rows into.\r
+                                       rowHolderId++;\r
+                                       continue;\r
+                               }\r
+                               TemporaryRowHolder currentRowHolder = (TemporaryRowHolder)sVector.elementAt(rowHolderId);       \r
+                               CursorResultSet rs = currentRowHolder.getResultSet();\r
+                               rs.open();\r
+                               while ((row = rs.getNextRow()) != null)\r
+                               {\r
+                                       rowHolder.insert(row);\r
+                               }\r
+                               rs.close();\r
+                               rowHolderId++;\r
+                       }\r
+                       \r
+               }\r
+       }\r
+\r
+\r
+       public void finish() throws StandardException {\r
+               super.finish();\r
+               \r
+               //clear the parent result sets hash table\r
+               //This is necessary in case if we hit any error conditions\r
+               activation.clearParentResultSets();\r
+       }\r
+\r
+\r
+       /* check whether we have mutiple path delete scenario, if\r
+       ** find any retun true. Multiple delete paths exist if we find more than\r
+       ** one parent source resultset for a table involved in the delete cascade\r
+       **/\r
+       private boolean isMultipleDeletePathsExist()\r
+       {\r
+               Hashtable parentResultSets = activation.getParentResultSets();\r
+               for (Enumeration e = parentResultSets.keys() ; e.hasMoreElements() ;) \r
+               {\r
+                       String rsId  = (String) e.nextElement();\r
+                       Vector sVector = (Vector) activation.getParentResultSet(rsId);\r
+                       int size = sVector.size();\r
+                       if(size > 1)\r
+                       {\r
+                               return true;\r
+                       }\r
+               }\r
+               return false;\r
+       }\r
+\r
+       /*\r
+       **Incases where we have multiple paths we could get the same\r
+       **rows to be deleted  mutiple time and also in case of cycles\r
+       **there might be new rows getting added to the row holders through\r
+       **multiple iterations. To handle these case we set the temporary row holders\r
+       ** to be  'uniqStream' type.\r
+       **/\r
+       private void setRowHoldersTypeToUniqueStream()\r
+       {\r
+               Hashtable parentResultSets = activation.getParentResultSets();\r
+               for (Enumeration e = parentResultSets.keys() ; e.hasMoreElements() ;) \r
+               {\r
+                       String rsId  = (String) e.nextElement();\r
+                       Vector sVector = (Vector) activation.getParentResultSet(rsId);\r
+                       int size = sVector.size();\r
+                       int rowHolderId = 0 ;\r
+                       while(rowHolderId <  size)\r
+                       {\r
+                               TemporaryRowHolder currentRowHolder = (TemporaryRowHolder)sVector.elementAt(rowHolderId);       \r
+                               currentRowHolder.setRowHolderTypeToUniqueStream();\r
+                               rowHolderId++;\r
+                       }\r
+               }\r
+       }\r
+\r
+}\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r
+\r