--- /dev/null
+/*\r
+\r
+ Derby - Class org.apache.derby.impl.services.cache.Clock\r
+\r
+ Licensed to the Apache Software Foundation (ASF) under one or more\r
+ contributor license agreements. See the NOTICE file distributed with\r
+ this work for additional information regarding copyright ownership.\r
+ The ASF licenses this file to you under the Apache License, Version 2.0\r
+ (the "License"); you may not use this file except in compliance with\r
+ the License. You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+\r
+ */\r
+\r
+package org.apache.derby.impl.services.cache;\r
+\r
+import org.apache.derby.iapi.services.cache.CacheManager;\r
+import org.apache.derby.iapi.services.cache.Cacheable;\r
+import org.apache.derby.iapi.services.cache.CacheableFactory;\r
+import org.apache.derby.iapi.services.cache.SizedCacheable;\r
+import org.apache.derby.iapi.services.context.ContextManager;\r
+import org.apache.derby.iapi.services.daemon.DaemonService;\r
+import org.apache.derby.iapi.services.daemon.Serviceable;\r
+\r
+import org.apache.derby.iapi.error.StandardException;\r
+\r
+import org.apache.derby.iapi.services.monitor.Monitor;\r
+\r
+import org.apache.derby.iapi.services.sanity.SanityManager;\r
+\r
+import org.apache.derby.iapi.services.cache.ClassSize;\r
+import org.apache.derby.iapi.util.Matchable;\r
+import org.apache.derby.iapi.util.Operator;\r
+import org.apache.derby.iapi.reference.SQLState;\r
+\r
+import java.util.ArrayList;\r
+import java.util.HashMap;\r
+import java.util.Iterator;\r
+import java.util.Collection;\r
+import java.util.Properties;\r
+\r
+\r
+/**\r
+ A cache manager that uses a HashMap and an ArrayList. The ArrayList holds\r
+ CachedItem objects, each with a holder object. The HashMap is keyed\r
+ by the identity of the holder object (Cacheable.getIdentity()) and\r
+ the data portion is a pointer to the CachedItem. CachedItems that have\r
+ holder objects with no identity do not have entries in the hash map.\r
+ <P>\r
+ CachedItems can in various state.\r
+ <UL>\r
+ <LI>isValid - the entry has a valid identity\r
+ <LI>inCreate - the entry is being created or being faulted in from persistent store\r
+ <LI>inClean - the entry is being written out to persistent store\r
+ <LI>isKept - the entry is currently being looked at/updated, do not remove or\r
+ clean it.\r
+ </OL>\r
+\r
+ <P>Multithreading considerations:<BR>\r
+ A clock cache manager must be MT-safe.\r
+ All member variables are accessed single threaded (synchronized on this) or\r
+ set once or readonly. Assumptions: holders size() and addElement must be\r
+ synchronized.\r
+ <BR>\r
+ CachedItem is never passed out of the clock cache manager, only the\r
+ Cacheable object is. The cachedItem is responsible for the setting and\r
+ clearing of its own member fields (RESOLVE: now they are done in cache\r
+ manager, need to be moved to the cachedItem). The cache manager will\r
+ following the following rules while accessing a cacheditem:\r
+ <UL>\r
+ <LI>invalid item is never returned from the cache\r
+ <LI>setValidState and isValid() is only called single threaded through the cache manager.\r
+ <LI>keep() and isKept() is only called single threaded through the cache\r
+ manager once the item has been added to the holders array\r
+ <LI>item that isKept() won't be cleaned or removed or invalidated from the cache.\r
+ <LI>item that is inClean() or inCreate(), the cache manager\r
+ will wait on the cachedItem to finish cleaning or creating before it\r
+ returns the cached item outside of the cache.\r
+ </UL>\r
+ <BR>\r
+ The cacheable must be cleaned thru the cache if it is managed by a cache.\r
+ On CacheItem, a inClean state is maintain to stablelize the content of the\r
+ cacheable while it is being cleaned. Only unkept items are cleaned. If an\r
+ item is found to be inClean, it will wait until it exits the inClean state.\r
+ If a cached item calls it own clean method without notifying the cache, it\r
+ has to stablize its content for the duration of the clean.\r
+ <BR>\r
+ It is assumed that the cacheable object maintain its own MT-safeness.<BR>\r
+\r
+ @see CachedItem\r
+ @see Cacheable\r
+\r
+*/\r
+\r
+final class Clock implements CacheManager, Serviceable {\r
+\r
+ /*\r
+ ** Fields\r
+ */\r
+ private final CacheStat stat;\r
+ private final HashMap cache_;\r
+ private DaemonService cleaner; // the background worker thread who is going to\r
+ // do pre-flush for this cache. \r
+ private final ArrayList holders;\r
+ private int validItemCount = 0;\r
+ private long maximumSize;\r
+ private boolean useByteCount; // regulate the total byte count or the entry count\r
+ private long currentByteCount = 0;\r
+ /* currentByteCount should be the sum of entry.getSize() for all entries in the cache.\r
+ * That is, it should be the sum of getItemSize( item, false) for each item in the holders\r
+ * vector.\r
+ */\r
+\r
+ private static final int ITEM_OVERHEAD = ClassSize.estimateBaseFromCatalog( CachedItem.class)\r
+ + ClassSize.getRefSize() // one ref per item in the holder ArrayList\r
+ + ClassSize.estimateHashEntrySize();\r
+\r
+ private final CacheableFactory holderFactory;\r
+\r
+ private boolean active; // true if active for find/create\r
+ private String name; // name of the cache, mainly for debugging purposes.\r
+ private int clockHand; // the sweep of the clock hand\r
+\r
+ \r
+ private int myClientNumber; // use this number to talk to cleaner service\r
+ private boolean wokenToClean; // true if the client was woken to clean, false if to shrink\r
+ private boolean cleanerRunning;\r
+ private boolean needService;\r
+\r
+ /**\r
+ Construct a new clock cache manager.\r
+\r
+ <P>MT - not needed for constructor.\r
+\r
+ @param holderFactory the cacheable object class\r
+ @param name the name of the cache\r
+ @param initialSize the initial number of cachable object this cache\r
+ holds.\r
+ @param maximumSize the maximum size of the cache. The cache may grow\r
+ from initialSize to maximumSize if the cache policy notices that there\r
+ is not enough free buffers availiable. Once the cache hits maximumSize\r
+ it will not grow. If the cache is full, an exception will be thrown\r
+\r
+ */\r
+ Clock(CacheableFactory holderFactory, String name,\r
+ int initialSize, long maximumSize, boolean useByteCount) {\r
+ cache_ = new HashMap(initialSize, (float) 0.95);\r
+ this.maximumSize = maximumSize;\r
+ this.holderFactory = holderFactory;\r
+ this.useByteCount = useByteCount;\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace)) {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace, "initializing " + name + " cache to size " + initialSize);\r
+ }\r
+ }\r
+\r
+ //int delta = initialSize / 2;\r
+ //if (delta < 5)\r
+ // delta = 5;\r
+\r
+ holders = new ArrayList(initialSize);\r
+ this.name = name;\r
+ active = true;\r
+\r
+ this.stat = new CacheStat();\r
+ stat.initialSize = initialSize;\r
+ stat.maxSize = maximumSize;\r
+ }\r
+\r
+ /**\r
+ Find the object or materialize one in the cache. If it has not been\r
+ created in the persistent store yet, return null.\r
+\r
+ <P>MT - must be MT-safe. The cache is single threaded through finding\r
+ the item in cache and finding a free item if it is not in cache, thus\r
+ preventing another thread from creating the same item while is is being\r
+ faulted in. (RESOLVE - this is really low performance if the cache\r
+ cleaner cannot keep a steady supply of free items and we have to do an\r
+ I/O while blocking the cache). If it needs to be faulted in, the\r
+ inCreate bit is set. The item is kept before it exits the sync block. \r
+ <BR>\r
+ If the item is in cache but in the middle of being faulted in or\r
+ cleaned, it needs to wait until this is done being before returning.\r
+ <BR>\r
+ The keep status prevents other threads from removing this item. \r
+ The inCreate status prevents other threads from looking at or writing\r
+ out this item while it is being faulted in.\r
+ (RESOLVE: need to handle the case where the object is marked for\r
+ removal and being waited on)\r
+\r
+ @param key the key to the object\r
+ @return a cacheable object that is kept in the cache.\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ public Cacheable find(Object key) throws StandardException {\r
+ CachedItem item;\r
+ boolean add;\r
+\r
+ /*\r
+ ** We will only loop if someone else tried to add the\r
+ ** same key as we did and they failed. In this case\r
+ ** we start all over. An example of this would be an\r
+ ** attempt to cache an object that failed due to a \r
+ ** transient error (e.g. deadlock), which should not\r
+ ** prevent another thread from trying to add the \r
+ ** key to the cache (e.g. it might be the one holding\r
+ ** the lock that caused the other thread to deadlock).\r
+ */\r
+ while (true)\r
+ {\r
+ add = false;\r
+\r
+ synchronized (this) {\r
+ \r
+ if (!active)\r
+ return null;\r
+ \r
+ item = (CachedItem) cache_.get(key);\r
+ \r
+ if (item != null) {\r
+ item.keepAfterSearch();\r
+ \r
+ stat.findHit++;\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace)) \r
+ {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace, name + ": Found key " +\r
+ key + " already in cache, item " + item);\r
+ }\r
+ }\r
+ }\r
+ } // synchronized(this)\r
+ \r
+ // no entry was found, need to add one\r
+ if (item == null) {\r
+ \r
+ // get a free item\r
+ item = findFreeItem();\r
+\r
+ stat.findMiss++;\r
+ \r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace)) \r
+ {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace, name + ": Find key " +\r
+ key + " Not in cache, get free item " + item);\r
+ }\r
+ }\r
+ \r
+ \r
+ if (SanityManager.DEBUG)\r
+ SanityManager.ASSERT(item != null, "found null item");\r
+ \r
+ synchronized (this) {\r
+ CachedItem inCacheItem = (CachedItem) cache_.get(key);\r
+ \r
+ if (inCacheItem != null) {\r
+ // some-one beat us to adding an item into the cache,\r
+ // just use that one\r
+ item.unkeepForCreate();\r
+ \r
+ item = inCacheItem;\r
+ item.keepAfterSearch();\r
+ } else {\r
+ // yes, we really are the ones to add it\r
+ cache_.put(key, item);\r
+ add = true;\r
+ if (SanityManager.DEBUG) {\r
+\r
+ if (SanityManager.DEBUG_ON("memoryLeakTrace")) {\r
+\r
+ if (cache_.size() > ((11 * maximumSize) / 10))\r
+ System.out.println\r
+ ("memoryLeakTrace:Cache:" + name +\r
+ " " + cache_.size());\r
+ }\r
+ }\r
+ }\r
+ } // synchronized(this)\r
+ } // if (item == null)\r
+ \r
+ if (add) {\r
+ \r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace))\r
+ {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace, name + " Added " + \r
+ key + " to cache, item " + item);\r
+ }\r
+ }\r
+ \r
+ stat.findFault++;\r
+\r
+ return addEntry(item, key, false, (Object) null);\r
+ }\r
+ \r
+ Cacheable entry = item.use();\r
+ if (entry == null) {\r
+ // item was not added by the other user successfully ...\r
+ synchronized (this) {\r
+ item.unkeep();\r
+ }\r
+\r
+ // try to hash the key again (see\r
+ // comment at head of loop)\r
+ continue;\r
+ }\r
+ \r
+ return entry;\r
+ }\r
+ }\r
+\r
+\r
+ /**\r
+ Find an object in the cache. Do not fault in or create the object if\r
+ is is not found in the cache.\r
+\r
+ <P>MT - must be MT-safe. The cache is single threaded through finding\r
+ the item in cache. If it needs to wait for it to be faulted in or\r
+ cleaned it is synchronized/waited on the cached item itself.\r
+\r
+ @param key the key to the object\r
+ @return a cacheable object that is kept in the cache.\r
+ */\r
+\r
+ public Cacheable findCached(Object key) throws StandardException {\r
+\r
+\r
+ CachedItem item;\r
+\r
+ synchronized (this) {\r
+\r
+ if (!active)\r
+ return null;\r
+ \r
+ item = (CachedItem) cache_.get(key);\r
+\r
+ if (item == null) {\r
+ stat.findCachedMiss++;\r
+ return null;\r
+ } else\r
+ stat.findCachedHit++;\r
+ \r
+ item.keepAfterSearch();\r
+ } // synchronized(this)\r
+\r
+ Cacheable entry = item.use();\r
+ if (entry == null) {\r
+ // item was not added by the other user successfully ...\r
+ synchronized (this) {\r
+ item.unkeep();\r
+ }\r
+ }\r
+\r
+ return entry;\r
+ }\r
+\r
+\r
+ /**\r
+ * Mark a set of entries as having been used. Normally this is done as a side effect\r
+ * of find() or findCached. Entries that are no longer in the cache are ignored.\r
+ *\r
+ * @param keys the key of the used entry.\r
+ */\r
+ public void setUsed( Object[] keys)\r
+ {\r
+ CachedItem item;\r
+\r
+ for( int i = 0; i < keys.length;)\r
+ {\r
+ // Do not hold the synchronization lock for too long.\r
+ synchronized (this)\r
+ {\r
+ if (!active)\r
+ return;\r
+\r
+ int endIdx = i + 32;\r
+ if( endIdx > keys.length)\r
+ endIdx = keys.length;\r
+ for( ; i < endIdx; i++)\r
+ {\r
+ if( keys[i] == null)\r
+ return;\r
+ \r
+ item = (CachedItem) cache_.get(keys[i]);\r
+ if( null != item)\r
+ item.setUsed( true);\r
+ }\r
+ } // synchronized(this)\r
+ }\r
+ } // end of setUsed\r
+\r
+ /**\r
+ Create a new object with the said key.\r
+\r
+ <P>MT - must be MT-safe. Single thread thru verifying no such item\r
+ exist in cache and finding a free item, keep the item and set inCreate\r
+ state. The actual creating of the object is done outside\r
+ the sync block and is protected by the isKept and inCreate bits.\r
+\r
+ @param key the key to the object\r
+ @return a cacheable object that is kept in the cache. \r
+\r
+ @exception StandardException Standard Derby error policy\r
+ */\r
+ public Cacheable create(Object key, Object createParameter) throws StandardException {\r
+\r
+ // assume the item is not already in the cache\r
+ CachedItem item = findFreeItem();\r
+\r
+ stat.create++;\r
+\r
+ synchronized (this) {\r
+\r
+ if (!active)\r
+ return null;\r
+\r
+ if (cache_.get(key) != null) {\r
+\r
+ item.unkeepForCreate();\r
+\r
+ throw StandardException.newException(SQLState.OBJECT_EXISTS_IN_CACHE, this.name, key);\r
+ }\r
+\r
+ cache_.put(key, item);\r
+\r
+ if (SanityManager.DEBUG) {\r
+\r
+ if (SanityManager.DEBUG_ON("memoryLeakTrace")) {\r
+\r
+ if (cache_.size() > ((11 * maximumSize) / 10))\r
+ System.out.println\r
+ ("memoryLeakTrace:Cache:" + name + " " +\r
+ cache_.size());\r
+ }\r
+ }\r
+ } // synchronized(this)\r
+\r
+ Cacheable entry = addEntry(item, key, true, createParameter);\r
+ \r
+ if (SanityManager.DEBUG) {\r
+ if (entry != null)\r
+ SanityManager.ASSERT(item.getEntry() == entry);\r
+ }\r
+\r
+ return entry;\r
+ }\r
+\r
+\r
+ /**\r
+ The caller is no longer looking at or updating the entry. Since there\r
+ could be more than one piece of code looking at this entry, release\r
+ does not mean nobody is looking at or updating the entry, just one\r
+ less. If the cacheable is marked for remove (someone is waiting to\r
+ remove the persistent object once nobody is looking at it), then notify\r
+ the waiter if this is the last one looking at it.\r
+ <BR>\r
+ Unless there is a good reason to do otherwise, release should be used\r
+ to release a cachable and not directly call cachedItem unkeep, since\r
+ unkeep does not handle the case of remove.\r
+\r
+\r
+ <P>MT - must be MT-safe. Getting and deleting item from the hash map\r
+ is in the same synchronized block. If the cacheable object is waiting\r
+ to be removed, that is synchronized thru the cachedItem itself\r
+ (RESOLVE: need to move this sync block to cachedItem instead)\r
+\r
+ @param entry the cached entry\r
+\r
+ */\r
+ public void release(Cacheable entry) {\r
+ boolean removeItem;\r
+ CachedItem item;\r
+ long toShrink = 0;\r
+\r
+ synchronized (this) {\r
+\r
+ item = (CachedItem) cache_.get(entry.getIdentity());\r
+\r
+ if (SanityManager.DEBUG) {\r
+ SanityManager.ASSERT(item != null, "item null");\r
+ SanityManager.ASSERT(item.getEntry() == entry, "entry not equals keyed entry");\r
+ SanityManager.ASSERT(item.isKept(), "item is not kept in release(Cachable)");\r
+ }\r
+\r
+ removeItem = item.unkeep();\r
+\r
+ if (removeItem) {\r
+ \r
+ cache_.remove(entry.getIdentity());\r
+\r
+ // we keep the item here to stop another thread trying to evict it\r
+ // while we are destroying it.\r
+ item.keepForClean();\r
+ }\r
+\r
+ if (cleaner == null) {\r
+ // try to shrink the cache on a release\r
+ toShrink = shrinkSize(getCurrentSizeNoSync());\r
+ }\r
+ } // synchronized(this)\r
+\r
+ if (removeItem) {\r
+\r
+ item.notifyRemover();\r
+ }\r
+\r
+ if (toShrink > 0)\r
+ performWork(true /* shrink only */);\r
+ }\r
+\r
+ private void release(CachedItem item) {\r
+\r
+ boolean removeItem;\r
+\r
+ synchronized (this) {\r
+\r
+ if (SanityManager.DEBUG) {\r
+ SanityManager.ASSERT(item.isKept(), "item is not kept in released(CachedItem)");\r
+ }\r
+\r
+ removeItem = item.unkeep();\r
+\r
+ if (removeItem) {\r
+ \r
+ cache_.remove(item.getEntry().getIdentity());\r
+\r
+ // we keep the item here to stop another thread trying to evict it\r
+ // while we are destroying it.\r
+ item.keepForClean();\r
+ }\r
+ } // synchronized(this)\r
+\r
+ if (removeItem) {\r
+\r
+ item.notifyRemover();\r
+ }\r
+ }\r
+\r
+ /**\r
+ Remove an object from the cache. The item will be placed into the NoIdentity\r
+ state through clean() (if required) and clearIdentity(). The removal of the\r
+ object will be delayed until it is not kept by anyone.\r
+\r
+ After this call the caller must throw away the reference to item.\r
+\r
+ <P>MT - must be MT-safe. Single thread thru finding and setting the\r
+ remove state of the item, the actual removal of the cacheable is\r
+ synchronized on the cachedItem itself.\r
+\r
+ @exception StandardException Standard Derby error policy.\r
+ */\r
+ public void remove(Cacheable entry) throws StandardException {\r
+\r
+ boolean removeNow;\r
+ CachedItem item;\r
+ long origItemSize = 0;\r
+\r
+ stat.remove++;\r
+\r
+ synchronized (this) {\r
+\r
+ \r
+\r
+ item = (CachedItem) cache_.get(entry.getIdentity());\r
+\r
+ if (SanityManager.DEBUG) {\r
+ SanityManager.ASSERT(item != null);\r
+ SanityManager.ASSERT(item.getEntry() == entry);\r
+ SanityManager.ASSERT(item.isKept());\r
+ }\r
+ if( useByteCount)\r
+ origItemSize = getItemSize( item);\r
+\r
+ item.setRemoveState();\r
+ removeNow = item.unkeep(); \r
+\r
+ if (removeNow) {\r
+ cache_.remove(entry.getIdentity());\r
+ item.keepForClean();\r
+ }\r
+ } // synchronized(this)\r
+\r
+ try {\r
+ // if removeNow is false then this thread may sleep\r
+ item.remove(removeNow);\r
+\r
+ } finally {\r
+\r
+ synchronized (this)\r
+ {\r
+ // in the case where this thread didn't call keepForClean() the thread\r
+ // that woke us would have called keepForClean.\r
+ item.unkeep();\r
+ item.setValidState(false);\r
+ validItemCount--;\r
+ item.getEntry().clearIdentity();\r
+ if( useByteCount)\r
+ currentByteCount += getItemSize( item) - origItemSize;\r
+ } // synchronized(this)\r
+ }\r
+\r
+ }\r
+\r
+ /**\r
+ Clean all objects in the cache.\r
+ */\r
+ public void cleanAll() throws StandardException {\r
+ stat.cleanAll++;\r
+ cleanCache((Matchable) null);\r
+ }\r
+\r
+ /**\r
+ Clean all objects that match a partial key.\r
+ */\r
+ public void clean(Matchable partialKey) throws StandardException {\r
+\r
+ cleanCache(partialKey);\r
+ }\r
+\r
+ /**\r
+ Age as many objects as possible out of the cache.\r
+\r
+ <BR>MT - thread safe\r
+\r
+ @see CacheManager#ageOut\r
+ */\r
+ public void ageOut() {\r
+\r
+ stat.ageOut++;\r
+ synchronized (this) {\r
+\r
+ int size = holders.size();\r
+ long toShrink = shrinkSize(getCurrentSizeNoSync());\r
+ boolean shrunk = false;\r
+\r
+ for (int position = 0; position < size; position++) {\r
+ CachedItem item = (CachedItem) holders.get(position);\r
+\r
+ if (item.isKept())\r
+ continue;\r
+ if (!item.isValid())\r
+ continue;\r
+\r
+ if (item.getEntry().isDirty()) {\r
+ continue;\r
+ }\r
+\r
+ long itemSize = removeIdentity(item);\r
+\r
+ if (toShrink > 0) {\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace)) {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace, name + \r
+ " shrinking item " + item + " at position " + position);\r
+ }\r
+ }\r
+ \r
+ toShrink -= itemSize;\r
+ shrunk = true;\r
+ }\r
+\r
+ } // end of for loop\r
+\r
+ if (shrunk)\r
+ trimToSize();\r
+\r
+ } // synchronized(this)\r
+ } // end of ageOut\r
+\r
+ /**\r
+ MT - synchronization provided by caller\r
+\r
+ @exception StandardException Standard Derby error policy.\r
+ */\r
+ public void shutdown() throws StandardException {\r
+\r
+ if (cleaner != null) {\r
+ cleaner.unsubscribe(myClientNumber);\r
+ cleaner = null;\r
+ }\r
+\r
+ synchronized (this) {\r
+ active = false;\r
+ }\r
+\r
+ ageOut();\r
+ cleanAll();\r
+ ageOut();\r
+ }\r
+\r
+ /**\r
+ MT - synchronization provided by caller\r
+\r
+ can use this Daemomn service if needed\r
+ */\r
+ public void useDaemonService(DaemonService daemon)\r
+ {\r
+ // if we were using another cleaner, unsubscribe first\r
+ if (cleaner != null)\r
+ cleaner.unsubscribe(myClientNumber);\r
+\r
+ cleaner = daemon;\r
+ myClientNumber = cleaner.subscribe(this, true /* onDemandOnly */);\r
+ }\r
+ /**\r
+ Discard all objects that match the partial key.\r
+\r
+ <BR>MT - thread safe\r
+ */\r
+ public boolean discard(Matchable partialKey) {\r
+\r
+ // we miss something because it was kept\r
+ boolean noMisses = true;\r
+\r
+ synchronized (this) {\r
+\r
+ int size = holders.size();\r
+ long toShrink = shrinkSize(getCurrentSizeNoSync());\r
+ boolean shrunk = false;\r
+\r
+ for (int position = 0; position < size; position++) {\r
+ CachedItem item = (CachedItem) holders.get(position);\r
+\r
+ if (!item.isValid())\r
+ continue;\r
+\r
+ Object key = item.getEntry().getIdentity();\r
+\r
+ if (partialKey != null && !partialKey.match(key))\r
+ continue;\r
+\r
+ if (item.isKept())\r
+ {\r
+ noMisses = false;\r
+ continue;\r
+ }\r
+\r
+ long itemSize = removeIdentity(item);\r
+\r
+ if (toShrink > 0) {\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace)) {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace, name + \r
+ " shrinking item " + item + " at position " + position);\r
+ }\r
+ }\r
+\r
+ // and we shrunk one item\r
+ toShrink -= itemSize;\r
+ shrunk = true;\r
+ }\r
+ } // for (int position = 0;...\r
+\r
+ if (shrunk)\r
+ trimToSize();\r
+ } // synchronized(this)\r
+\r
+ return noMisses;\r
+ }\r
+\r
+ /**\r
+ Add a new CachedItem and a holder object to the cache. The holder object\r
+ is returned kept.\r
+\r
+ <P>MT - need to be MT-safe. The insertion of the key into the hash\r
+ table is synchronized on this.\r
+\r
+ */\r
+ private Cacheable addEntry(CachedItem item, Object key, boolean forCreate, Object createParameter)\r
+ throws StandardException {\r
+\r
+ Cacheable entry = null;\r
+ long origEntrySize = 0;\r
+ if( useByteCount)\r
+ origEntrySize = getItemSize( item);\r
+\r
+ try\r
+ {\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace))\r
+ {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace, name + \r
+ " item " + item + " take on identity " + key);\r
+ }\r
+ }\r
+ \r
+ // tell the object it needs to create itself\r
+ entry = item.takeOnIdentity(this, holderFactory, key, forCreate, createParameter);\r
+ }\r
+ finally\r
+ {\r
+ boolean notifyWaiters;\r
+ synchronized (this) {\r
+\r
+ Object removed = cache_.remove(key);\r
+ if (SanityManager.DEBUG) {\r
+ SanityManager.ASSERT(removed == item);\r
+ }\r
+\r
+ if (entry != null) {\r
+ // put the actual key into the hash table, not the one that was passed in\r
+ // for the find or create. This is because the caller may re-use the key\r
+ // for another cache operation, which would corrupt our hashtable\r
+ cache_.put(entry.getIdentity(), item);\r
+ if( useByteCount)\r
+ currentByteCount += ((SizedCacheable) entry).getSize() - origEntrySize;\r
+ item.setValidState(true);\r
+ validItemCount++;\r
+ notifyWaiters = true;\r
+ } else {\r
+ item.unkeep();\r
+ notifyWaiters = item.isKept();\r
+ }\r
+ } // synchronized(this)\r
+\r
+ // whatever the outcome, we have to notify waiters ...\r
+ if (notifyWaiters)\r
+ item.settingIdentityComplete();\r
+ }\r
+\r
+ return entry;\r
+ }\r
+\r
+ \r
+ private CachedItem findFreeItem() throws StandardException {\r
+\r
+ // Need to avoid thrashing the cache when we start out\r
+ // so if the cache is smaller than its maximum size\r
+ // then that's a good indication we should grow.\r
+\r
+ long currentSize = getCurrentSize();\r
+\r
+\r
+ if (currentSize >= maximumSize) {\r
+ // look at 20%\r
+ CachedItem item = rotateClock(0.2f);\r
+ if (item != null)\r
+ return item;\r
+ }\r
+\r
+ // However, if the cache contains a large number of invalid\r
+ // items then we should see if we can avoid growing.\r
+ // This avoids simple use of Derby looking like\r
+ // a memory leak, as the page cache fills the holders array\r
+ // with page objects including the 4k (or 32k) pages.\r
+ // size() is the number of valid entries in the hash table\r
+\r
+\r
+ // no need to sync on getting the sizes since if they are\r
+ // wrong we will discover it in the loop.\r
+ if (validItemCount < holders.size()) {\r
+\r
+ synchronized (this) {\r
+\r
+ // 1) find out how many invalid items there are in the\r
+ // cache\r
+ // 2) search for a free invalid item\r
+ // 3) stop searching when there are no more invalid\r
+ // items to find\r
+\r
+ int invalidItems = holders.size() - validItemCount;\r
+\r
+ // Invalid items might occur in the cache when\r
+ // a) a new item is created in growCache(), but it\r
+ // is not in use yet, or\r
+ // b) an item is deleted (usually when a table is\r
+ // dropped)\r
+\r
+ // It is critical to break out of the loop as soon as\r
+ // possible since we are blocking others trying to\r
+ // access the page cache. New items are added to the\r
+ // end of the page cache, so the search for invalid\r
+ // items should start from the end.\r
+\r
+ for (int i = holders.size() - 1; (invalidItems > 0) && (i >= 0) ; i--) {\r
+ CachedItem item = (CachedItem) holders.get(i);\r
+\r
+ if (item.isKept()) {\r
+ if (!item.isValid()) invalidItems--;\r
+ continue;\r
+ }\r
+\r
+ // found a free item, just use it\r
+ if (!item.isValid()) {\r
+ item.keepForCreate();\r
+ return item;\r
+ }\r
+ }\r
+ } // synchronized(this)\r
+ }\r
+\r
+\r
+ return growCache();\r
+ }\r
+\r
+ /**\r
+ Go through the list of holder objects and find a free one.\r
+ <P>MT - must be MT-safe. The moving of the clockHand and finding of an\r
+ eviction candidate is synchronized. The cleaning of the cachable is\r
+ handled by the cacheable itself.\r
+ */\r
+ private CachedItem rotateClock(float percentOfClock) throws StandardException\r
+ {\r
+ // statistics -- only used in debug\r
+ int evictions = 0;\r
+ int cleaned = 0;\r
+ int resetUsed = 0;\r
+ int iskept = 0;\r
+\r
+ // When we are managing the entry count (useByteCount == false) this method just\r
+ // has to find or manufacture an available item (a cache slot). When we are managing\r
+ // the total byte count then this method must find both available space and an\r
+ // available item.\r
+ CachedItem availableItem = null;\r
+\r
+ boolean kickCleaner = false;\r
+\r
+ try {\r
+\r
+\r
+ // this can be approximate\r
+ int itemCount = holders.size();\r
+ int itemsToCheck;\r
+ if (itemCount < 20)\r
+ itemsToCheck = 2 * itemCount;\r
+ else\r
+ itemsToCheck = (int) (((float) itemCount) * percentOfClock);\r
+\r
+\r
+ // if we can grow then shrinking is OK too, if we can't grow\r
+ // then shrinking the cache won't help us find an item.\r
+ long toShrink = shrinkSize(getCurrentSize());\r
+\r
+restartClock:\r
+ for (; itemsToCheck > 0;) {\r
+\r
+ CachedItem item = null;\r
+\r
+ synchronized (this) {\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace))\r
+ {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace, name + " rotateClock starting " +\r
+ clockHand + " itemsToCheck " + itemsToCheck);\r
+ }\r
+ }\r
+\r
+ // size of holders cannot change while in the synchronized block.\r
+ int size = holders.size();\r
+ for (; itemsToCheck > 0; item = null, itemsToCheck--, incrClockHand())\r
+ {\r
+ //\r
+ // This uses a very simple clock algorithm.\r
+ //\r
+ // The cache consist of a circular list of cachedItems. Each cached item\r
+ // has a 'recentlyUsed' bit which is set every time that item is kept.\r
+ // Each clock cache manager keeps a global variable clockHand which\r
+ // refers to the item that is most recently replaced.\r
+ //\r
+ // to find a free item, the clock Hand moves to the next cached Item.\r
+ // If it is kept, or in the middle of being created, the clock hand\r
+ // moves on. \r
+ // If it is recentlyUsed, clear the recently used bit and moves on. \r
+ // If it is not recentlyUsed, clean the item and use\r
+ //\r
+ // If all the cached item is kept, then create a new entry.\r
+ // So it is possible, although very unlikely, that, in time, the cache\r
+ // will grow beyond the maximum size.\r
+\r
+ \r
+\r
+ if (clockHand >= size) {\r
+ if (size == 0)\r
+ break;\r
+ clockHand = 0;\r
+ }\r
+\r
+ item = (CachedItem) holders.get(clockHand);\r
+\r
+ if (item.isKept())\r
+ {\r
+ if (SanityManager.DEBUG) // stats only in debug mode\r
+ iskept++;\r
+ continue;\r
+ }\r
+\r
+ if (!item.isValid()) // found a free item, just use it\r
+ {\r
+ if( null != availableItem)\r
+ // We have found an available item, now we are looking for bytes\r
+ continue;\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace))\r
+ {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace,\r
+ name + " found free item at " + clockHand + " item " + item);\r
+ }\r
+ }\r
+\r
+ item.keepForCreate();\r
+ if (useByteCount &&\r
+ getCurrentSizeNoSync() > maximumSize)\r
+ {\r
+ availableItem = item;\r
+ // now look for bytes.\r
+ continue;\r
+ }\r
+ // since we are using this item, move the clock past it.\r
+ incrClockHand();\r
+\r
+ return item;\r
+ }\r
+\r
+ if (item.recentlyUsed())\r
+ {\r
+\r
+ if (SanityManager.DEBUG) // stats only in debug mode\r
+ resetUsed++;\r
+ item.setUsed(false);\r
+ continue;\r
+ }\r
+\r
+\r
+ if (toShrink > 0) {\r
+ if (!cleanerRunning) {\r
+\r
+ // try an get the cleaner to shrink the cache\r
+ kickCleaner = true;\r
+ cleanerRunning = true;\r
+ needService = true;\r
+ }\r
+ }\r
+\r
+ // we are seeing valid, not recently used buffers. Evict this.\r
+ if (SanityManager.DEBUG) {\r
+ evictions++;\r
+\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace))\r
+ {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace,\r
+ name + " evicting item at " +\r
+ clockHand + " item " + item);\r
+ }\r
+ }\r
+\r
+ if (!item.getEntry().isDirty()) {\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace)) \r
+ {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace,\r
+ name + " Evicting Item " +\r
+ item + ", not dirty");\r
+ }\r
+ }\r
+\r
+ // a valid, unkept, clean item, clear its identity\r
+ // and use it.\r
+ long itemSize = removeIdentity(item);\r
+\r
+ if( useByteCount)\r
+ {\r
+ toShrink -= itemSize;\r
+ if (getCurrentSizeNoSync() > maximumSize &&\r
+ 0 < toShrink)\r
+ {\r
+ if( null == availableItem)\r
+ {\r
+ item.keepForCreate();\r
+ availableItem = item;\r
+ }\r
+ continue;\r
+ }\r
+ }\r
+ // since we are using it move the clock past it\r
+ incrClockHand();\r
+\r
+ if( null != availableItem)\r
+ return availableItem;\r
+\r
+ // item is kept but not valid when returned\r
+ item.keepForCreate();\r
+ return item;\r
+ }\r
+ // item is valid, unkept, and dirty. clean it.\r
+ if ((cleaner != null) && !cleanerRunning) {\r
+ kickCleaner = true;\r
+ wokenToClean = true;\r
+ cleanerRunning = true; // at least it soon will be\r
+ }\r
+ item.keepForClean();\r
+\r
+ // leave the clock hand where it is so that we will pick it\r
+ // up if no-one else uses the cache. Other hunters will\r
+ // skip over it as it is kept, and thus move the clock\r
+ // hand past it.\r
+ break;\r
+ }\r
+ if (item == null) {\r
+ return availableItem;\r
+ }\r
+\r
+ } // synchronized(this)\r
+\r
+ // clean the entry outside of a sync block \r
+ try \r
+ {\r
+ if ( SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace)) {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace,name + " cleaning item " + item);\r
+ }\r
+ }\r
+\r
+ item.clean(false);\r
+\r
+ if (SanityManager.DEBUG) // stats only in debug mode\r
+ {\r
+ cleaned++;\r
+ }\r
+ }\r
+ finally {\r
+ release(item);\r
+ item = null;\r
+ }\r
+\r
+ // at this point the item we cleaned could be in any state\r
+ // so we can't just re-use it. Continue searching\r
+ continue restartClock;\r
+ }\r
+ return availableItem;\r
+ } finally {\r
+\r
+\r
+ if (SanityManager.DEBUG)\r
+ {\r
+ // report statistics\r
+ if (\r
+ SanityManager.DEBUG_ON(ClockFactory.CacheTrace))\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace, name + " evictions " + evictions +\r
+ ", cleaned " + cleaned + \r
+ ", resetUsed " + resetUsed +\r
+ ", isKept " + iskept +\r
+ ", size " + holders.size());\r
+ }\r
+\r
+ if (kickCleaner && (cleaner != null))\r
+ {\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace)) {\r
+ SanityManager.DEBUG(DaemonService.DaemonTrace, name + " client # " + myClientNumber + " calling cleaner ");\r
+ }\r
+ }\r
+\r
+ cleaner.serviceNow(myClientNumber);\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace)) {\r
+ SanityManager.DEBUG(DaemonService.DaemonTrace, name + Thread.currentThread().getName() + " cleaner called");\r
+ }\r
+ }\r
+ }\r
+ }\r
+ } // end of rotateClock\r
+\r
+ /**\r
+ Synchronously increment clock hand position\r
+ */\r
+ private int incrClockHand()\r
+ {\r
+ if (++clockHand >= holders.size())\r
+ clockHand = 0;\r
+ return clockHand;\r
+ }\r
+\r
+ /*\r
+ * Serviceable methods\r
+ */\r
+\r
+ public int performWork(ContextManager contextMgr /* ignored */) {\r
+\r
+ int ret = performWork(false);\r
+ synchronized (this) {\r
+ cleanerRunning = false;\r
+ }\r
+ return ret;\r
+ }\r
+\r
+ \r
+ /**\r
+ <P>MT - read only. \r
+ */\r
+ public boolean serviceASAP()\r
+ {\r
+ return needService;\r
+ } \r
+\r
+\r
+ // @return true, if this work needs to be done on a user thread immediately\r
+ public boolean serviceImmediately()\r
+ {\r
+ return false;\r
+ } \r
+\r
+\r
+ public synchronized int getNumberInUse() {\r
+\r
+ int size = holders.size();\r
+ int inUse = 0;\r
+\r
+ for (int position = 0; position < size; position++) {\r
+\r
+ CachedItem item = (CachedItem) holders.get(position);\r
+\r
+ if (item.isValid()) {\r
+ inUse++;\r
+ }\r
+\r
+ }\r
+ return inUse;\r
+ }\r
+/*\r
+ private int getNumberKept() {\r
+\r
+ synchronized (this) {\r
+\r
+ int size = holders.size();\r
+ int inUse = 0;\r
+\r
+ for (int position = 0; position < size; position++) {\r
+\r
+ CachedItem item = (CachedItem) holders.get(position);\r
+\r
+ if (item.isValid() && item.isKept()) {\r
+ inUse++;\r
+ }\r
+\r
+ }\r
+ return inUse;\r
+ }\r
+ }\r
+*/\r
+\r
+ /**\r
+ Grow the cache and return a unused, kept item.\r
+\r
+ @exception StandardException Thrown if the cache cannot be grown.\r
+ */\r
+\r
+ private CachedItem growCache() {\r
+\r
+ CachedItem item = new CachedItem();\r
+ item.keepForCreate();\r
+\r
+ // if we run out of memory below here we don't\r
+ // know what state the holders could be in\r
+ // so don't trap it\r
+ synchronized (this) {\r
+ holders.add(item);\r
+ // Do not adjust currentByteCount until we put the entry into the CachedItem.\r
+ }\r
+\r
+ return item;\r
+ }\r
+\r
+\r
+ /**\r
+ Clear an item's identity. Item must be \r
+ unkept and valid. This is called for\r
+ dirty items from the discard code.\r
+\r
+ Caller must hold the cache synchronization.\r
+\r
+ @return the amount by which this shrinks the cache.\r
+ */\r
+ private long removeIdentity(CachedItem item) {\r
+\r
+ long shrink = 1;\r
+ \r
+ if (SanityManager.DEBUG) {\r
+ SanityManager.ASSERT(!item.isKept(), "item is kept");\r
+ SanityManager.ASSERT(item.isValid(), "item is not valid");\r
+\r
+ }\r
+\r
+ if( useByteCount)\r
+ shrink = ((SizedCacheable) item.getEntry()).getSize();\r
+ cache_.remove(item.getEntry().getIdentity());\r
+ item.setValidState(false);\r
+ validItemCount--;\r
+ item.getEntry().clearIdentity();\r
+ if( useByteCount)\r
+ {\r
+ shrink -= ((SizedCacheable) item.getEntry()).getSize();\r
+ currentByteCount -= shrink;\r
+ }\r
+ return shrink;\r
+ }\r
+\r
+ /**\r
+ Write out all dirty buffers.\r
+\r
+ <P>MT - must be MT safe.\r
+ Single thread on the part that finds the next dirty buffer to write\r
+ out, the synchronization of cleaning of the individual cachable is\r
+ provided by the cacheable itself.\r
+ */\r
+ private void cleanCache(Matchable partialKey) throws StandardException {\r
+ \r
+ int position;\r
+\r
+ synchronized(this)\r
+ {\r
+ // this is at many dirty buffers as the cleaner is ever going to\r
+ // see \r
+ position = holders.size() - 1;\r
+ }\r
+\r
+\r
+outerscan:\r
+ for (;;) {\r
+\r
+ CachedItem item = null;\r
+\r
+ synchronized (this) {\r
+\r
+ // the cache may have shrunk by quite a bit since we last came\r
+ // in here\r
+ int size = holders.size();\r
+ if (position >= size)\r
+ position = size - 1;\r
+\r
+innerscan:\r
+ // go from position (the last cached item in the holder array\r
+ // to 0 (the first). Otherwise, if we go from 0 to\r
+ // position, some other thread may come in and shrink items\r
+ // which are between 0 and position. Since a shrink moves all\r
+ // items up, we may skip some items without cleaning.\r
+ for ( ; position >= 0; position--, item = null) {\r
+\r
+ item = (CachedItem) holders.get(position);\r
+\r
+ if (!item.isValid())\r
+ continue innerscan;\r
+\r
+ if (!item.getEntry().isDirty())\r
+ continue innerscan;\r
+\r
+ if (partialKey != null) {\r
+\r
+ Object key = item.getEntry().getIdentity();\r
+\r
+ if (!partialKey.match(key))\r
+ continue;\r
+ }\r
+\r
+ item.keepForClean();\r
+ break innerscan;\r
+ }\r
+ } // synchronized(this)\r
+\r
+ if (position < 0)\r
+ {\r
+ return;\r
+ }\r
+\r
+ try {\r
+\r
+ item.clean(false);\r
+ } finally {\r
+ release(item);\r
+ }\r
+ position--;\r
+ \r
+ } // for (;;)\r
+ }\r
+\r
+\r
+ private long shrinkSize(long currentSize) {\r
+\r
+ long maxSize = getMaximumSize();\r
+\r
+ long toShrink = currentSize - maxSize;\r
+ if (toShrink <= 0)\r
+ return 0;\r
+\r
+ // only shrink 10% of the maximum size\r
+ long shrinkLimit = maxSize / 10;\r
+ if (shrinkLimit == 0)\r
+ shrinkLimit = 2;\r
+\r
+ if (toShrink < shrinkLimit)\r
+ return toShrink;\r
+ else\r
+ return shrinkLimit;\r
+ }\r
+\r
+ /**\r
+ The background cleaner tries to make sure that there are serveral\r
+ cleaned or invalied buffers ahead of the clock hand so that when they\r
+ are evicted, they don't need to be cleaned.\r
+\r
+ The way this routine work is as follows, starting at the current clock\r
+ hand position, go forward around the cache buffers, moving the same\r
+ route that the clock hand moves. It keep tracks of the number of\r
+ invalid or not recently used buffers it sees along the way. If it sees\r
+ a not recently used buffer, it will clean it. After it has seen N\r
+ invalid or not recently used buffers, or it has gone around and visited\r
+ all buffers in the cache, it finished.\r
+\r
+ It does not clean recently used buffers.\r
+\r
+ <P>MT - must be MT-safe. It takes a snapshot of the current clock hand\r
+ position (a synchronous call). Getting and looking at the next\r
+ serveral cached item is synchronized on this (RESOLVE: probably doesn't\r
+ need to be). Cleaning of the cacheable is handle by the cacheable itself.\r
+\r
+ */\r
+ private int performWork(boolean shrinkOnly)\r
+ {\r
+ long target;\r
+ long toShrink;\r
+ int maxLooks;\r
+\r
+ synchronized(this)\r
+ {\r
+ if (!active) {\r
+ needService = false;\r
+ return Serviceable.DONE;\r
+ }\r
+ else {\r
+ long currentSize = getCurrentSizeNoSync();\r
+ target = currentSize / 20; // attempt to get 5% of the cache clean\r
+ toShrink = wokenToClean ? 0 : shrinkSize(currentSize);\r
+ }\r
+\r
+ if (target == 0) {\r
+ wokenToClean = false;\r
+ needService = false;\r
+ return Serviceable.DONE;\r
+ }\r
+\r
+ if (!wokenToClean && (toShrink <= 0)) {\r
+ needService = false;\r
+ return Serviceable.DONE;\r
+ }\r
+\r
+ maxLooks = useByteCount ? (holders.size()/10) : (int) (target * 2);\r
+ } // synchronized(this)\r
+\r
+ // try to clean the next N (target) cached item, \r
+ long clean = 0;\r
+ int cleaned = 0; // only used in debug\r
+ CachedItem item = null;\r
+ int currentPosition = 0;\r
+\r
+ String ThreadName = null;\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace))\r
+ {\r
+ ThreadName = Thread.currentThread().getName();\r
+ SanityManager.DEBUG(DaemonService.DaemonTrace, ThreadName + " Cleaning " + name + " clientNumber " + myClientNumber);\r
+ }\r
+ }\r
+\r
+\r
+ synchronized(this)\r
+ {\r
+ int itemCount = holders.size();\r
+ currentPosition = clockHand;\r
+ \r
+ // see if the cache needs to shrink\r
+ boolean shrunk = false;\r
+ long currentSize = getCurrentSizeNoSync();\r
+\r
+ for (; shrinkOnly ? (currentSize > maximumSize && toShrink > 0) : (clean < target); item = null)\r
+ { \r
+ if (++currentPosition >= itemCount) {\r
+ if (itemCount == 0)\r
+ break;\r
+\r
+ currentPosition = 0;\r
+\r
+ }\r
+\r
+ if (maxLooks-- <= 0)\r
+ {\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace)) {\r
+ SanityManager.DEBUG(DaemonService.DaemonTrace, ThreadName + " done one round of " + name);\r
+ }\r
+ }\r
+\r
+ break; // done one round\r
+ }\r
+\r
+ item = (CachedItem) holders.get(currentPosition);\r
+\r
+ if (item.isKept())\r
+ continue;\r
+\r
+ if (!item.isValid())\r
+ {\r
+ if (toShrink > 0) {\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace)) {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace, name + \r
+ " shrinking item " + item + " at position " + currentPosition);\r
+ }\r
+ }\r
+\r
+ toShrink -= currentSize;\r
+ holders.remove(currentPosition);\r
+ if( useByteCount)\r
+ currentByteCount -= getItemSize( item);\r
+ currentSize = getCurrentSizeNoSync();\r
+ toShrink += currentSize;\r
+ itemCount--;\r
+\r
+ // account for the fact all the items have shifted down\r
+ currentPosition--;\r
+\r
+ shrunk = true;\r
+ } \r
+ continue;\r
+ }\r
+\r
+ if (item.recentlyUsed())\r
+ continue;\r
+\r
+ // found a valid, not kept, and not recently used item\r
+ // this item will be cleaned\r
+ int itemSize = getItemSize( item);\r
+ clean += itemSize;\r
+ if (!item.getEntry().isDirty()) {\r
+\r
+ if (toShrink > 0) {\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(ClockFactory.CacheTrace)) {\r
+ SanityManager.DEBUG(ClockFactory.CacheTrace, name + \r
+ " shrinking item " + item + " at position " + currentPosition);\r
+ }\r
+ }\r
+\r
+ toShrink -= currentSize;\r
+ removeIdentity(item);\r
+ holders.remove(currentPosition);\r
+ if( useByteCount)\r
+ currentByteCount -= getItemSize( item);\r
+ currentSize = getCurrentSizeNoSync();\r
+ toShrink += currentSize;\r
+ itemCount--;\r
+ shrunk = true;\r
+\r
+ // account for the fact all the items have shifted down\r
+ currentPosition--;\r
+ } \r
+ continue;\r
+ }\r
+\r
+ if (shrinkOnly)\r
+ continue;\r
+\r
+ // found one that needs cleaning, keep it to clean\r
+ item.keepForClean();\r
+ break;\r
+ } // end of for loop\r
+\r
+ if (shrunk)\r
+ trimToSize();\r
+\r
+ if (item == null) {\r
+ wokenToClean = false;\r
+ needService = false;\r
+ return Serviceable.DONE;\r
+ }\r
+ } // synchronized(this)\r
+\r
+ try\r
+ {\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace)) {\r
+ SanityManager.DEBUG(DaemonService.DaemonTrace, ThreadName + " cleaning entry in " + name);\r
+ }\r
+ }\r
+\r
+ item.clean(false);\r
+ if (SanityManager.DEBUG) // only need stats for debug\r
+ cleaned++;\r
+ \r
+ } catch (StandardException se) {\r
+ // RESOLVE - should probably throw the error into the log.\r
+ }\r
+ finally\r
+ {\r
+ release(item);\r
+ item = null;\r
+ }\r
+\r
+ if (SanityManager.DEBUG) {\r
+ if (SanityManager.DEBUG_ON(DaemonService.DaemonTrace)) {\r
+ SanityManager.DEBUG(DaemonService.DaemonTrace, ThreadName + " Found " + clean + " clean items, cleaned " +\r
+ cleaned + " items in " + name );\r
+ }\r
+ }\r
+\r
+ needService = true;\r
+ return Serviceable.REQUEUE; // return is actually ignored.\r
+ } // end of performWork\r
+\r
+ private int getItemSize( CachedItem item)\r
+ {\r
+ if( ! useByteCount)\r
+ return 1;\r
+ SizedCacheable entry = (SizedCacheable) item.getEntry();\r
+ if( null == entry)\r
+ return 0;\r
+ return entry.getSize();\r
+ } // end of getItemSize\r
+ \r
+ /**\r
+ Return statistics about cache that may be implemented.\r
+ **/\r
+ public synchronized long[] getCacheStats()\r
+ {\r
+ stat.currentSize = getCurrentSizeNoSync();\r
+ return stat.getStats();\r
+ }\r
+\r
+ /**\r
+ Reset the statistics to 0.\r
+ **/\r
+ public void resetCacheStats()\r
+ {\r
+ stat.reset();\r
+ }\r
+\r
+ /**\r
+ * @return the current maximum size of the cache.\r
+ */\r
+ public synchronized long getMaximumSize()\r
+ {\r
+ return maximumSize;\r
+ }\r
+ \r
+ /**\r
+ * Change the maximum size of the cache. If the size is decreased then cache entries\r
+ * will be thrown out.\r
+ *\r
+ * @param newSize the new maximum cache size\r
+ *\r
+ * @exception StandardException Standard Derby error policy\r
+ */\r
+ public void resize( long newSize) throws StandardException\r
+ {\r
+ boolean shrink;\r
+\r
+ synchronized( this)\r
+ {\r
+ maximumSize = newSize;\r
+ stat.maxSize = maximumSize;\r
+ shrink = (shrinkSize(getCurrentSizeNoSync()) > 0);\r
+ }\r
+ if( shrink)\r
+ {\r
+ performWork(true /* shrink only */);\r
+ /* performWork does not remove recently used entries nor does it mark them as\r
+ * not recently used. Therefore if the cache has not shrunk enough we will call rotateClock\r
+ * to free up some entries.\r
+ */\r
+ if( shrinkSize( getCurrentSize()) > 0)\r
+ {\r
+ CachedItem freeItem = rotateClock( (float) 2.0);\r
+ /* rotateClock(2.0) means that the clock will rotate through the cache as much as\r
+ * twice. If it does not find sufficient unused items the first time through it\r
+ * will almost certainly find enough of them the second time through, because it\r
+ * marked all the items as not recently used in the first pass.\r
+ *\r
+ * If the cache is very heavily used by other threads then a lot of the items marked as\r
+ * unused in the first pass may be used before rotateClock passes over them again. In this\r
+ * unlikely case rotateClock( 2.0) may not be able to clear out enough space to bring the\r
+ * current size down to the maximum. However the cache size should come down as rotateClock\r
+ * is called in the normal course of operation.\r
+ */\r
+ if( freeItem != null)\r
+ freeItem.unkeepForCreate();\r
+ }\r
+ }\r
+ \r
+ } // end of resize;\r
+ \r
+\r
+ private synchronized long getCurrentSize() {\r
+ return getCurrentSizeNoSync();\r
+ }\r
+\r
+ private long getCurrentSizeNoSync() {\r
+ if (!useByteCount) {\r
+ return holders.size();\r
+ }\r
+ return currentByteCount + holders.size()*ITEM_OVERHEAD;\r
+ }\r
+\r
+ /**\r
+ * Perform an operation on (approximately) all entries that matches the filter,\r
+ * or all entries if the filter is null. Entries that are added while the\r
+ * cache is being scanned might or might not be missed.\r
+ *\r
+ * @param filter\r
+ * @param operator\r
+ */\r
+ public void scan( Matchable filter, Operator operator)\r
+ {\r
+ int itemCount = 1;\r
+ Cacheable entry = null;\r
+ CachedItem item = null;\r
+\r
+ // Do not call the operator while holding the synchronization lock.\r
+ // However we cannot access an item's links without holding the synchronization lock,\r
+ // nor can we assume that an item is still in the cache unless we hold the synchronization\r
+ // lock or the item is marked as kept.\r
+ for( int position = 0;; position++)\r
+ {\r
+ synchronized( this)\r
+ {\r
+ if( null != item)\r
+ {\r
+ release( item);\r
+ item = null;\r
+ }\r
+ \r
+ for( ; position < holders.size(); position++)\r
+ {\r
+ item = (CachedItem) holders.get( position);\r
+ if( null != item)\r
+ {\r
+ try\r
+ {\r
+ entry = item.use();\r
+ }\r
+ catch( StandardException se)\r
+ {\r
+ continue;\r
+ }\r
+ \r
+ if( null != entry && (null == filter || filter.match( entry)))\r
+ {\r
+ item.keepForClean();\r
+ break;\r
+ }\r
+ }\r
+ }\r
+ if( position >= holders.size())\r
+ return;\r
+\r
+ } // synchronized(this)\r
+ operator.operate( entry);\r
+ // Do not release the item until we have re-acquired the synchronization lock.\r
+ // Otherwise the item may be removed and its next link invalidated.\r
+ }\r
+ } // end of scan\r
+\r
+ private int trimRequests = 0;\r
+ \r
+ /* Trim out invalid items from holders if there are a lot of them. This is expensive if\r
+ * holders is large.\r
+ * The caller must hold the cache synchronization lock.\r
+ */\r
+ private void trimToSize()\r
+ {\r
+ int size = holders.size();\r
+\r
+ // Trimming is expensive, don't do it often.\r
+ trimRequests++;\r
+ if( trimRequests < size/8)\r
+ return;\r
+ trimRequests = 0;\r
+ \r
+ // move invalid items to the end.\r
+ int endPosition = size - 1;\r
+\r
+ int invalidCount = 0;\r
+ for (int i = 0; i <= endPosition; i++)\r
+ {\r
+ CachedItem item = (CachedItem) holders.get(i);\r
+\r
+ if (item.isKept())\r
+ continue;\r
+\r
+ if (item.isValid())\r
+ continue;\r
+\r
+ invalidCount++;\r
+\r
+ // swap with an item later in the list\r
+ // try to keep free items at the end of the holders array.\r
+ for (; endPosition > i; endPosition--) {\r
+ CachedItem last = (CachedItem) holders.get(endPosition);\r
+ if (last.isValid()) {\r
+ holders.set(i, last);\r
+ holders.set(endPosition, item);\r
+ endPosition--;\r
+ break;\r
+ }\r
+ }\r
+ }\r
+ // small cache - don't shrink.\r
+ if (size < 32)\r
+ return;\r
+ \r
+ // now decide if we need to shrink the holder array or not.\r
+ int validItems = size - invalidCount;\r
+\r
+ // over 75% entries used, don't shrink.\r
+ if (validItems > ((3 * size) / 4))\r
+ return;\r
+\r
+ // keep 10% new items.\r
+ int newSize = validItems + (validItems / 10);\r
+\r
+ if (newSize >= size)\r
+ return;\r
+\r
+ // remove items, starting at the end, where\r
+ // hopefully most of the free items are.\r
+ for (int r = size - 1; r > newSize; r--) {\r
+ CachedItem remove = (CachedItem) holders.get(r);\r
+ if (remove.isKept() || remove.isValid()) {\r
+ continue;\r
+ }\r
+\r
+ if (useByteCount) {\r
+ currentByteCount -= getItemSize(remove);\r
+ }\r
+\r
+ holders.remove(r);\r
+ }\r
+\r
+ holders.trimToSize();\r
+ // move the clock hand to the start of the invalid items.\r
+ clockHand = validItems + 1;\r
+\r
+ } // end of trimToSize\r
+\r
+ /**\r
+ * Tell if a key exists in the cache.\r
+ * @param k the key to test for\r
+ * @return true if k is a key in the cache\r
+ */\r
+ public synchronized boolean containsKey(Object k) {\r
+ return cache_.containsKey(k);\r
+ }\r
+\r
+ /**\r
+ * Return a Collection of the Cacheables currently in the\r
+ * cache. The Collection is a snapshot (copy) so external\r
+ * synchronization isn't required. Part of the CacheManager\r
+ * interface.\r
+ * @return a Collection of the cache elements.\r
+ */\r
+ public synchronized Collection values() {\r
+ ArrayList al = new ArrayList();\r
+ for (Iterator i = cache_.values().iterator(); i.hasNext();){\r
+ al.add(((CachedItem)i.next()).getEntry());\r
+ }\r
+ return al;\r
+ }\r
+}\r