From: navid Date: Wed, 21 Jan 2009 19:13:22 +0000 (+0000) Subject: *** empty log message *** X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=c789cd7c0e0ea44754176f8afcc80226aa696332;p=IRC.git *** empty log message *** --- diff --git a/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/AtomicSuperClass.java b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/AtomicSuperClass.java index fdb5c84d..0336901a 100644 --- a/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/AtomicSuperClass.java +++ b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/AtomicSuperClass.java @@ -1,3 +1,8 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + package dstm2; /** diff --git a/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/ArrayList.java b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/ArrayList.java new file mode 100644 index 00000000..3d38afe9 --- /dev/null +++ b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/ArrayList.java @@ -0,0 +1,111 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package dstm2.util; + +import dstm2.AtomicArray; +import dstm2.factory.Factory; +import dstm2.Thread; +import dstm2.atomic; +import java.util.Arrays; + + +/** + * + * @author navid + */ + +public class ArrayList{ + transient volatile int modCount; + private volatile int size; + AtomicArray> elementData; + private Factory factory; + + public ArrayList() { + this(10); + } + + public ArrayList(int initialCapacity) { + super (); + if (initialCapacity < 0) + throw new IllegalArgumentException("Illegal Capacity: " + initialCapacity); + this .elementData = new AtomicArray>(entry.class,initialCapacity); + } + + + public void ensureCapacity(int minCapacity) { + modCount++; + int oldCapacity = elementData.size(); + if (minCapacity > oldCapacity) { + //Object oldData[] = elementData; + int newCapacity = (oldCapacity * 3) / 2 + 1; + if (newCapacity < minCapacity) + newCapacity = minCapacity; + // minCapacity is usually close to size, so this is a win: + AtomicArray> newar = new AtomicArray>(entry.class, newCapacity); + for (int i=0; i e = factory.create(); + e.setValue(value); + elementData.set(size, e); + size++; + return true; + } + + public V get(int index){ + return elementData.get(index).getValue(); + } + + + public V remove(int index) { + rangeCheck(index); + modCount++; + entry oldValue = elementData.get(index); + + int numMoved = size - index - 1; + if (numMoved > 0) + System.arraycopy(elementData, index + 1, elementData, + index, numMoved); + size--; + elementData.set(size, null); // Let gc do its work + return oldValue.getValue(); + } + + private void rangeCheck(int index) { + if (index < 0 || index >= this .size) + throw new IndexOutOfBoundsException(); + } + + public void clear() { + modCount++; + // Let gc do its work + for (int i = 0; i < size; i++) + elementData.set(i, null); + size = 0; + } + + @atomic interface entry{ + V getValue(); + void setValue(V val); + } + + + +} diff --git a/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/Calendar.java b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/Calendar.java new file mode 100644 index 00000000..d0fae02d --- /dev/null +++ b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/Calendar.java @@ -0,0 +1,115 @@ + + +/*package dstm2.util; + + +import java.util.Locale; +import java.util.TimeZone; +import sun.util.calendar.CalendarSystem; +import sun.util.calendar.JulianCalendar; +import sun.util.calendar.ZoneInfo; + + +public class Calendar { + public final static int ERA = 0; + public final static int YEAR = 1; + public final static int MONTH = 2; + public final static int WEEK_OF_YEAR = 3; + public final static int DATE = 5; + protected boolean isTimeSet; + protected boolean areFieldsSet; + protected boolean isSet[]; + transient private int stamp[]; + private int nextStamp = MINIMUM_USER_STAMP; + private static final int MINIMUM_USER_STAMP = 2; + transient boolean areAllFieldsSet; + private boolean lenient = true; + protected int fields[]; + transient private boolean sharedZone = false; + private TimeZone zone; + protected long time; + public final static int FIELD_COUNT = 17; + + public void set(int field, int value) + { + if (isLenient() && areFieldsSet && !areAllFieldsSet) { + computeFields(); + } + internalSet(field, value); + isTimeSet = false; + areFieldsSet = false; + isSet[field] = true; + stamp[field] = nextStamp++; + if (nextStamp == Integer.MAX_VALUE) { + adjustStamp(); + } + } + + public boolean isLenient() + { + return lenient; + } + + final void internalSet(int field, int value) + { + fields[field] = value; + } + + public void setTimeZone(TimeZone value) + { + zone = value; + sharedZone = false; + + + areAllFieldsSet = areFieldsSet = false; + } + + + public final void set(int year, int month, int date) + { + set(YEAR, year); + set(MONTH, month); + set(DATE, date); + } + + public static Calendar getInstance() + { + Calendar cal = createCalendar(TimeZone.getDefault(), Locale.getDefault()); + cal.sharedZone = true; + return cal; + } + + private static Calendar createCalendar(TimeZone zone, + Locale aLocale) + { + + // else create the default calendar + return new GregorianCalendar(zone, aLocale); + } + + public void setTimeInMillis(long millis) { + // If we don't need to recalculate the calendar field values, + // do nothing. + if (time == millis && isTimeSet && areFieldsSet && areAllFieldsSet + && (zone instanceof ZoneInfo) && !((ZoneInfo)zone).isDirty()) { + return; + } + time = millis; + isTimeSet = true; + areFieldsSet = false; + computeFields(); + areAllFieldsSet = areFieldsSet = true; + } + + protected Calendar(TimeZone zone, Locale aLocale) + { + fields = new int[FIELD_COUNT]; + isSet = new boolean[FIELD_COUNT]; + stamp = new int[FIELD_COUNT]; + this.zone = zone; + setWeekCountData(aLocale); + } + + + +}*/ diff --git a/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/GregorianCalendar.java b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/GregorianCalendar.java new file mode 100644 index 00000000..c202a22b --- /dev/null +++ b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/GregorianCalendar.java @@ -0,0 +1,51 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +/*package dstm2.util; + +import java.util.Locale; +import java.util.TimeZone; +import sun.util.calendar.CalendarSystem; +import sun.util.calendar.Gregorian; +import sun.util.calendar.JulianCalendar; + + + +public class GregorianCalendar extends Calendar{ + + + + private static final Gregorian gcal = CalendarSystem.getGregorianCalendar(); + private static JulianCalendar jcal; + public GregorianCalendar(TimeZone zone, Locale local) + { + super(zone, aLocale); + gdate = (BaseCalendar.Date) gcal.newCalendarDate(zone); + setTimeInMillis(System.currentTimeMillis()) + } + + protected void computeFields() { + int mask = 0; + if (isPartiallyNormalized()) { + // Determine which calendar fields need to be computed. + mask = getSetStateFields(); + int fieldMask = ~mask & ALL_FIELDS; + // We have to call computTime in case calsys == null in + // order to set calsys and cdate. (6263644) + if (fieldMask != 0 || calsys == null) { + mask |= computeFields(fieldMask, + mask & (ZONE_OFFSET_MASK|DST_OFFSET_MASK)); + assert mask == ALL_FIELDS; + } + } else { + mask = ALL_FIELDS; + computeFields(mask, 0); + } + // After computing all the fields, set the field state to `COMPUTED'. + setFieldsComputed(mask); + } + + +}*/ diff --git a/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/HashMap.java b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/HashMap.java new file mode 100644 index 00000000..8d9792c3 --- /dev/null +++ b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/HashMap.java @@ -0,0 +1,894 @@ +package dstm2.util; + +import java.io.IOException; +import java.util.AbstractSet; +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import dstm2.Thread; +import dstm2.atomic; +import dstm2.factory.Factory; + +public class HashMap implements Iterable{ + + /** + * The default initial capacity - MUST be a power of two. + */ + static final int DEFAULT_INITIAL_CAPACITY = 16; + + /** + * The maximum capacity, used if a higher value is implicitly specified + * by either of the constructors with arguments. + * MUST be a power of two <= 1<<30. + */ + static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The load factor used when none specified in constructor. + **/ + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + /** + * The table, resized as necessary. Length MUST Always be a power of two. + */ + //transient TEntry[] table; + dstm2.AtomicArray table; + final private Factory factory; + + //transient Entry[] table; + + /** + * The number of key-value mappings contained in this identity hash map. + */ + //transient int size; + + java.util.concurrent.atomic.AtomicInteger size; + + /* + * The capacity of the table + */ + transient int capacity; + + /** + * The next size value at which to resize (capacity * load factor). + * @serial + */ + int threshold; + + /** + * The load factor for the hash table. + * + * @serial + */ + final float loadFactor; + + /** + * The number of times this HashMap has been structurally modified + * Structural modifications are those that change the number of mappings in + * the HashMap or otherwise modify its internal structure (e.g., + * rehash). This field is used to make iterators on Collection-views of + * the HashMap fail-fast. (See ConcurrentModificationException). + */ + transient volatile int modCount; + + /** + * Constructs an empty HashMap with the specified initial + * capacity and load factor. + * + * @param initialCapacity The initial capacity. + * @param loadFactor The load factor. + * @throws IllegalArgumentException if the initial capacity is negative + * or the load factor is nonpositive. + */ + public HashMap(int initialCapacity, float loadFactor) { + size = new AtomicInteger(0); + factory = Thread.makeFactory(TEntry.class); + if (initialCapacity < 0) + throw new IllegalArgumentException("Illegal initial capacity: " + + initialCapacity); + if (initialCapacity > MAXIMUM_CAPACITY) + initialCapacity = MAXIMUM_CAPACITY; + if (loadFactor <= 0 || Float.isNaN(loadFactor)) + throw new IllegalArgumentException("Illegal load factor: " + + loadFactor); + + // Find a power of 2 >= initialCapacity + int capacity = 1; + while (capacity < initialCapacity) + capacity <<= 1; + + this.capacity = capacity; + this.loadFactor = loadFactor; + threshold = (int)(capacity * loadFactor); + table = new dstm2.AtomicArray(TEntry.class, capacity); +// for(int i = 0; i < capacity; i++) { +// table[i] = factory.create(); +// } + //table = new Entry[capacity]; + init(); + } + + /** + * Constructs an empty HashMap with the specified initial + * capacity and the default load factor (0.75). + * + * @param initialCapacity the initial capacity. + * @throws IllegalArgumentException if the initial capacity is negative. + */ + public HashMap(int initialCapacity) { + this(initialCapacity, DEFAULT_LOAD_FACTOR); + } + + /** + * Constructs an empty HashMap with the default initial capacity + * (16) and the default load factor (0.75). + */ + public HashMap() { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + /** + * Constructs a new HashMap with the same mappings as the + * specified Map. The HashMap is created with + * default load factor (0.75) and an initial capacity sufficient to + * hold the mappings in the specified Map. + * + * @param m the map whose mappings are to be placed in this map. + * @throws NullPointerException if the specified map is null. + */ + public HashMap(HashMap m) { + this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, + DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR); + putAllForCreate(m); + } + + // internal utilities + + /** + * Initialization hook for subclasses. This method is called + * in all constructors and pseudo-constructors (clone, readObject) + * after HashMap has been initialized but before any entries have + * been inserted. (In the absence of this method, readObject would + * require explicit knowledge of subclasses.) + */ + void init() { + } + public dstm2.AtomicArray getBuckets() { + return table; + } + /** + * Value representing null keys inside tables. + */ + static final Object NULL_KEY = new Object(); + + /** + * Returns internal representation for key. Use NULL_KEY if key is null. + */ + static T maskNull(T key) { + return key == null ? (T)NULL_KEY : key; + } + + /** + * Returns key represented by specified internal representation. + */ + static T unmaskNull(T key) { + return (key == NULL_KEY ? null : key); + } + + /** + * Whether to prefer the old supplemental hash function, for + * compatibility with broken applications that rely on the + * internal hashing order. + * + * Set to true only by hotspot when invoked via + * -XX:+UseNewHashFunction or -XX:+AggressiveOpts + */ + private static final boolean useNewHash; + static { useNewHash = false; } + + private static int oldHash(int h) { + h += ~(h << 9); + h ^= (h >>> 14); + h += (h << 4); + h ^= (h >>> 10); + return h; + } + + private static int newHash(int h) { + // This function ensures that hashCodes that differ only by + // constant multiples at each bit position have a bounded + // number of collisions (approximately 8 at default load factor). + h ^= (h >>> 20) ^ (h >>> 12); + return h ^ (h >>> 7) ^ (h >>> 4); + } + + /** + * Applies a supplemental hash function to a given hashCode, which + * defends against poor quality hash functions. This is critical + * because HashMap uses power-of-two length hash tables, that + * otherwise encounter collisions for hashCodes that do not differ + * in lower bits. + */ + public static int hash(int h) { + return useNewHash ? newHash(h) : oldHash(h); + } + + public static int hash(Object key) { + return hash(key.hashCode()); + } + + /** + * Check for equality of non-null reference x and possibly-null y. + */ + static boolean eq(Object x, Object y) { + return x == y || x.equals(y); + } + + /** + * Returns index for hash code h. + */ + public static int indexFor(int h, int length) { + return h & (length-1); + } + + /** + * Returns the number of key-value mappings in this map. + * + * @return the number of key-value mappings in this map. + */ + public int size() { + return size.get(); + } + + /** + * Returns true if this map contains no key-value mappings. + * + * @return true if this map contains no key-value mappings. + */ + public boolean isEmpty() { + return size.get() == 0; + } + + /** + * Returns the value to which the specified key is mapped in this identity + * hash map, or null if the map contains no mapping for this key. + * A return value of null does not necessarily indicate + * that the map contains no mapping for the key; it is also possible that + * the map explicitly maps the key to null. The + * containsKey method may be used to distinguish these two cases. + * + * @param key the key whose associated value is to be returned. + * @return the value to which this map maps the specified key, or + * null if the map contains no mapping for this key. + * @see #put(Object, Object) + */ + public V get(int key) { +// if (key == null) +// return getForNullKey(); + int hash = hash(key); + for (TEntry e = table.get(indexFor(hash, capacity)); + e != null; + e = e.getNext()) { + Object k; + if (e.getHash() == hash) + return e.getValue(); + } + return null; + } + +// private V getForNullKey() { +// int hash = hash(NULL_KEY.hashCode()); +// int i = indexFor(hash, capacity); +// TEntry e = table[i]; +// //Entry e = table[i]; +// while (true) { +// if (e == null) +// return null; +// if (e.getKey() == NULL_KEY) +// return e.getValue(); +// e = e.getNext(); +// } +// } + + /** + * Returns true if this map contains a mapping for the + * specified key. + * + * @param key The key whose presence in this map is to be tested + * @return true if this map contains a mapping for the specified + * key. + */ + public boolean containsKey(int key) { + Object k = maskNull(key); + int hash = hash(k); + int i = indexFor(hash, capacity); + TEntry e = table.get(i); + while (e != null) { + if (e.getHash() == hash) + return true; + e = e.getNext(); + } + return false; + } + + /** + * Returns the entry associated with the specified key in the + * HashMap. Returns null if the HashMap contains no mapping + * for this key. + */ + TEntry getEntry(int key) { + int hash = hash(key); + int i = indexFor(hash, capacity); + TEntry e = table.get(i); + while (e != null && !(e.getHash() == hash)) + e = e.getNext(); + return e; + } + + /** + * Associates the specified value with the specified key in this map. + * If the map previously contained a mapping for this key, the old + * value is replaced. + * + * @param key key with which the specified value is to be associated. + * @param value value to be associated with the specified key. + * @return previous value associated with specified key, or null + * if there was no mapping for key. A null return can + * also indicate that the HashMap previously associated + * null with the specified key. + */ + public V put(int key, V value) { + int hash = hash(key); + int i = indexFor(hash, capacity); + for (TEntry e = table.get(i); e != null; e = e.getNext()) { + if (e.getHash() == hash) { + V oldValue = e.getValue(); + e.setValue(value); +// e.recordAccess(this); + return oldValue; + } + } + modCount++; + addEntry(hash, value, i); + return null; + } + + + + /** + * This method is used instead of put by constructors and + * pseudoconstructors (clone, readObject). It does not resize the table, + * check for comodification, etc. It calls createEntry rather than + * addEntry. + */ + private void putForCreate(int key, V value) { + int hash = hash(key); + int i = indexFor(hash, capacity); + + /** + * Look for preexisting entry for key. This will never happen for + * clone or deserialize. It will only happen for construction if the + * input Map is a sorted map whose ordering is inconsistent w/ equals. + */ + for (TEntry e = table.get(i); e != null; e = e.getNext()) { + if (e.getHash() == hash) { + e.setValue(value); + return; + } + } + + createEntry(hash, value, i); + } + + void putAllForCreate(HashMap m) { + for (Iterator> i = m.entrySet().iterator(); i.hasNext(); ) { + HashMap.TEntry e = i.next(); + putForCreate(e.getHash(), e.getValue()); + } + } + + /** + * Rehashes the contents of this map into a new array with a + * larger capacity. This method is called automatically when the + * number of keys in this map reaches its threshold. + * + * If current capacity is MAXIMUM_CAPACITY, this method does not + * resize the map, but sets threshold to Integer.MAX_VALUE. + * This has the effect of preventing future calls. + * + * @param newCapacity the new capacity, MUST be a power of two; + * must be greater than current capacity unless current + * capacity is MAXIMUM_CAPACITY (in which case value + * is irrelevant). + */ + void resize(int newCapacity) { + dstm2.AtomicArray oldTable = table; + int oldCapacity = capacity; + if (oldCapacity == MAXIMUM_CAPACITY) { + threshold = Integer.MAX_VALUE; + return; + } + + dstm2.AtomicArray newTable = new dstm2.AtomicArray(TEntry.class, newCapacity); + transfer(newTable, newCapacity); + table = newTable; + threshold = (int)(newCapacity * loadFactor); + capacity = newCapacity; + } + + /** + * Transfer all entries from current table to newTable. + */ + void transfer(dstm2.AtomicArray newTable, int nc) { + dstm2.AtomicArray src = table; + int newCapacity = nc; + for (int j = 0; j < capacity; j++) { + TEntry e = src.get(j); + if (e != null) { + src.set(j, null); + do { + TEntry next = e.getNext(); + int i = indexFor(e.getHash(), newCapacity); + e.setNext(newTable.get(i)); + newTable.set(i, e); + e = next; + } while (e != null); + } + } + } + + /** + * Copies all of the mappings from the specified map to this map + * These mappings will replace any mappings that + * this map had for any of the keys currently in the specified map. + * + * @param m mappings to be stored in this map. + * @throws NullPointerException if the specified map is null. + */ + public void putAll(HashMap m) { + int numKeysToBeAdded = m.size(); + if (numKeysToBeAdded == 0) + return; + + /* + * Expand the map if the map if the number of mappings to be added + * is greater than or equal to threshold. This is conservative; the + * obvious condition is (m.size() + size) >= threshold, but this + * condition could result in a map with twice the appropriate capacity, + * if the keys to be added overlap with the keys already in this map. + * By using the conservative calculation, we subject ourself + * to at most one extra resize. + */ + if (numKeysToBeAdded > threshold) { + int targetCapacity = (int)(numKeysToBeAdded / loadFactor + 1); + if (targetCapacity > MAXIMUM_CAPACITY) + targetCapacity = MAXIMUM_CAPACITY; + int newCapacity = capacity; + while (newCapacity < targetCapacity) + newCapacity <<= 1; + if (newCapacity > capacity) + resize(newCapacity); + } + + for (Iterator> i = m.entrySet().iterator(); i.hasNext(); ) { + HashMap.TEntry e = i.next(); + put(e.getHash(), e.getValue()); + } + } + + /** + * Removes the mapping for this key from this map if present. + * + * @param key key whose mapping is to be removed from the map. + * @return previous value associated with specified key, or null + * if there was no mapping for key. A null return can + * also indicate that the map previously associated null + * with the specified key. + */ + public V remove(int key) { + TEntry e = removeEntryForKey(key); + return (e == null ? null : e.getValue()); + } + + /** + * Removes and returns the entry associated with the specified key + * in the HashMap. Returns null if the HashMap contains no mapping + * for this key. + */ + TEntry removeEntryForKey(int key) { + int hash = hash(key); + int i = indexFor(hash, capacity); + TEntry prev = table.get(i); + TEntry e = prev; + + while (e != null) { + TEntry next = e.getNext(); + if (e.getHash() == hash) { + modCount++; + size.decrementAndGet(); + if (prev == e) + table.set(i, next); + else + prev.setNext(next); +// e.recordRemoval(this); + return e; + } + prev = e; + e = next; + } + + return e; + } + + /** + * Special version of remove for EntrySet. + */ + TEntry removeMapping(TEntry o) { + + TEntry entry = o; + int hash = hash(o.getHash()); + int i = indexFor(hash, capacity); + TEntry prev = table.get(i); + TEntry e = prev; + + while (e != null) { + TEntry next = e.getNext(); + if (e.getHash() == hash) { + modCount++; + size.decrementAndGet(); + if (prev == e) + table.set(i, next); + else + prev.setNext(next); +// e.recordRemoval(this); + return e; + } + prev = e; + e = next; + } + + return e; + } + + /** + * Removes all mappings from this map. + */ + public void clear() { + modCount++; + dstm2.AtomicArray tab = table; + for (int i = 0; i < capacity; i++) + table.set(i, null); + size.set(0); + } + + /** + * Returns true if this map maps one or more keys to the + * specified value. + * + * @param value value whose presence in this map is to be tested. + * @return true if this map maps one or more keys to the + * specified value. + */ + public boolean containsValue(Object value) { + if (value == null) + return containsNullValue(); + + dstm2.AtomicArray tab = table; + for (int i = 0; i < capacity; i++) + for (TEntry e = tab.get(i); e != null ; e = e.getNext()) + if (value.equals(e.getValue())) + return true; + return false; + } + + /** + * Special-case code for containsValue with null argument + **/ + private boolean containsNullValue() { + dstm2.AtomicArray tab = table; + for (int i = 0; i < capacity ; i++) + for (TEntry e = tab.get(i) ; e != null ; e = e.getNext()) + if (e.getValue() == null) + return true; + return false; + } + + @atomic public interface TEntry { + int getHash(); + V getValue(); + TEntry getNext(); + void setHash(int h); + void setValue(V v); + void setNext(TEntry n); + + } + + + /** + * Add a new entry with the specified key, value and hash code to + * the specified bucket. It is the responsibility of this + * method to resize the table if appropriate. + * + * Subclass overrides this to alter the behavior of put method. + */ + void addEntry(int hash, V value, int bucketIndex) { + TEntry e = table.get(bucketIndex); + TEntry n = factory.create(); + n.setHash(hash); + n.setValue(value); + n.setNext(e); + table.set(bucketIndex, n); + if (size.incrementAndGet() >= threshold) { + synchronized(this) { + if(size.get() >= threshold) + resize(2 * capacity); + } + } + } + + /** + * Like addEntry except that this version is used when creating entries + * as part of Map construction or "pseudo-construction" (cloning, + * deserialization). This version needn't worry about resizing the table. + * + * Subclass overrides this to alter the behavior of HashMap(Map), + * clone, and readObject. + */ + void createEntry(int hash, V value, int bucketIndex) { + TEntry e = table.get(bucketIndex); + TEntry n = factory.create(); + n.setHash(hash); + n.setValue(value); + n.setNext(e); + table.set(bucketIndex, n); + size.incrementAndGet(); + } + + private abstract class HashIterator implements Iterator { + TEntry next; // next entry to return + int expectedModCount; // For fast-fail + int index; // current slot + TEntry current; // current entry + + HashIterator() { + expectedModCount = modCount; + dstm2.AtomicArray t = table; + int i = capacity; + TEntry n = null; + if (size.get() != 0) { // advance to first entry + while (i > 0 && (n = t.get(--i)) == null) + ; + } + next = n; + index = i; + } + + public boolean hasNext() { + return next != null; + } + + TEntry nextEntry() { + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + TEntry e = next; + if (e == null) + throw new NoSuchElementException(); + + TEntry n = e.getNext(); + dstm2.AtomicArray t = table; + int i = index; + while (n == null && i > 0) + n = t.get(--i); + index = i; + next = n; + return current = e; + } + + public void remove() { + if (current == null) + throw new IllegalStateException(); + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + int k = current.getHash(); + current = null; + HashMap.this.removeEntryForKey(k); + expectedModCount = modCount; + } + + } + + private class ValueIterator extends HashIterator { + public V next() { + return nextEntry().getValue(); + } + } + + private class KeyIterator extends HashIterator { + public Integer next() { + return nextEntry().getHash(); + } + } + +// private class EntryIterator extends HashIterator> { +// public Map.Entry next() { +// return nextEntry(); +// } +// } + + // Subclass overrides these to alter behavior of views' iterator() method + public Iterator newKeyIterator() { + return new KeyIterator(); + } + public Iterator newValueIterator() { + return new ValueIterator(); + } +// Iterator> newEntryIterator() { +// return new EntryIterator(); +// } + + + // Views + + private transient Set> entrySet = null; + + + + private class KeySet extends AbstractSet { + public Iterator iterator() { + return newKeyIterator(); + } + public int size() { + return size.get(); + } + public boolean contains(Integer o) { + return containsKey(o); + } + public boolean remove(Integer o) { + return HashMap.this.removeEntryForKey(o) != null; + } + public void clear() { + HashMap.this.clear(); + } + } + + + + /** + * Returns a collection view of the mappings contained in this map. Each + * element in the returned collection is a Map.Entry. The + * collection is backed by the map, so changes to the map are reflected in + * the collection, and vice-versa. The collection supports element + * removal, which removes the corresponding mapping from the map, via the + * Iterator.remove, Collection.remove, + * removeAll, retainAll, and clear operations. + * It does not support the add or addAll operations. + * + * @return a collection view of the mappings contained in this map. + * @see Map.Entry + */ + public Set> entrySet() { + Set> es = entrySet; + return (es != null ? es : (entrySet = (Set>) (Set) new EntrySet())); + } + + private class EntrySet {//extends AbstractSet/*>*/ { +// public Iterator/*>*/ iterator() { +// return newEntryIterator(); +// } + public boolean contains(HashMap.TEntry o) { + HashMap.TEntry e = (HashMap.TEntry) o; + TEntry candidate = getEntry(e.getHash()); + return candidate != null && candidate.equals(e); + } + public boolean remove(HashMap.TEntry o) { + return removeMapping(o) != null; + } + public int size() { + return size.get(); + } + public void clear() { + HashMap.this.clear(); + } + } + + /** + * Save the state of the HashMap instance to a stream (i.e., + * serialize it). + * + * @serialData The capacity of the HashMap (the length of the + * bucket array) is emitted (int), followed by the + * size of the HashMap (the number of key-value + * mappings), followed by the key (Object) and value (Object) + * for each key-value mapping represented by the HashMap + * The key-value mappings are emitted in the order that they + * are returned by entrySet().iterator(). + * + */ + private void writeObject(java.io.ObjectOutputStream s) + throws IOException + { + Iterator> i = entrySet().iterator(); + + // Write out the threshold, loadfactor, and any hidden stuff + s.defaultWriteObject(); + + // Write out number of buckets + s.writeInt(capacity); + + // Write out size (number of Mappings) + s.writeInt(size.get()); + + // Write out keys and values (alternating) + while (i.hasNext()) { + HashMap.TEntry e = i.next(); + s.writeObject(e.getHash()); + s.writeObject(e.getValue()); + } + } + + private static final long serialVersionUID = 362498820763181265L; + + /** + * Reconstitute the HashMap instance from a stream (i.e., + * deserialize it). + */ + private void readObject(java.io.ObjectInputStream s) + throws IOException, ClassNotFoundException + { + // Read in the threshold, loadfactor, and any hidden stuff + s.defaultReadObject(); + + // Read in number of buckets and allocate the bucket array; + int numBuckets = s.readInt(); + table = new dstm2.AtomicArray(TEntry.class, numBuckets); + + init(); // Give subclass a chance to do its thing. + + // Read in size (number of Mappings) + int size = s.readInt(); + + // Read the keys and values, and put the mappings in the HashMap + for (int i=0; i iterator() { + return new Iterator() { + int tableIndex = 0; + public TEntry cursor = table.get(tableIndex); + public boolean hasNext() { + return cursor != null; + } + public V next() { + TEntry node = cursor; + cursor = cursor.getNext(); + while(cursor==null) { + tableIndex++; + if(tableIndex < capacity) { + cursor = table.get(tableIndex); + } + } + return node.getValue(); + } + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } +} diff --git a/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/IntHashMap.java b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/IntHashMap.java new file mode 100644 index 00000000..ffb67a48 --- /dev/null +++ b/Robust/Transactions/jcarderbenchmarks/mysrc/dstm2/util/IntHashMap.java @@ -0,0 +1,894 @@ +package dstm2.util; + +import java.io.IOException; +import java.util.AbstractSet; +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import dstm2.Thread; +import dstm2.atomic; +import dstm2.factory.Factory; + +public class IntHashMap implements Iterable{ + + /** + * The default initial capacity - MUST be a power of two. + */ + static final int DEFAULT_INITIAL_CAPACITY = 16; + + /** + * The maximum capacity, used if a higher value is implicitly specified + * by either of the constructors with arguments. + * MUST be a power of two <= 1<<30. + */ + static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The load factor used when none specified in constructor. + **/ + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + /** + * The table, resized as necessary. Length MUST Always be a power of two. + */ + //transient TEntry[] table; + dstm2.AtomicArray table; + final private Factory factory; + + //transient Entry[] table; + + /** + * The number of key-value mappings contained in this identity hash map. + */ + //transient int size; + + java.util.concurrent.atomic.AtomicInteger size; + + /* + * The capacity of the table + */ + transient int capacity; + + /** + * The next size value at which to resize (capacity * load factor). + * @serial + */ + int threshold; + + /** + * The load factor for the hash table. + * + * @serial + */ + final float loadFactor; + + /** + * The number of times this HashMap has been structurally modified + * Structural modifications are those that change the number of mappings in + * the HashMap or otherwise modify its internal structure (e.g., + * rehash). This field is used to make iterators on Collection-views of + * the HashMap fail-fast. (See ConcurrentModificationException). + */ + transient volatile int modCount; + + /** + * Constructs an empty HashMap with the specified initial + * capacity and load factor. + * + * @param initialCapacity The initial capacity. + * @param loadFactor The load factor. + * @throws IllegalArgumentException if the initial capacity is negative + * or the load factor is nonpositive. + */ + public IntHashMap(int initialCapacity, float loadFactor) { + size = new AtomicInteger(0); + factory = Thread.makeFactory(TEntry.class); + if (initialCapacity < 0) + throw new IllegalArgumentException("Illegal initial capacity: " + + initialCapacity); + if (initialCapacity > MAXIMUM_CAPACITY) + initialCapacity = MAXIMUM_CAPACITY; + if (loadFactor <= 0 || Float.isNaN(loadFactor)) + throw new IllegalArgumentException("Illegal load factor: " + + loadFactor); + + // Find a power of 2 >= initialCapacity + int capacity = 1; + while (capacity < initialCapacity) + capacity <<= 1; + + this.capacity = capacity; + this.loadFactor = loadFactor; + threshold = (int)(capacity * loadFactor); + table = new dstm2.AtomicArray(TEntry.class, capacity); +// for(int i = 0; i < capacity; i++) { +// table[i] = factory.create(); +// } + //table = new Entry[capacity]; + init(); + } + + /** + * Constructs an empty HashMap with the specified initial + * capacity and the default load factor (0.75). + * + * @param initialCapacity the initial capacity. + * @throws IllegalArgumentException if the initial capacity is negative. + */ + public IntHashMap(int initialCapacity) { + this(initialCapacity, DEFAULT_LOAD_FACTOR); + } + + /** + * Constructs an empty HashMap with the default initial capacity + * (16) and the default load factor (0.75). + */ + public IntHashMap() { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + /** + * Constructs a new HashMap with the same mappings as the + * specified Map. The HashMap is created with + * default load factor (0.75) and an initial capacity sufficient to + * hold the mappings in the specified Map. + * + * @param m the map whose mappings are to be placed in this map. + * @throws NullPointerException if the specified map is null. + */ + public IntHashMap(IntHashMap m) { + this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, + DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR); + putAllForCreate(m); + } + + // internal utilities + + /** + * Initialization hook for subclasses. This method is called + * in all constructors and pseudo-constructors (clone, readObject) + * after HashMap has been initialized but before any entries have + * been inserted. (In the absence of this method, readObject would + * require explicit knowledge of subclasses.) + */ + void init() { + } + public dstm2.AtomicArray getBuckets() { + return table; + } + /** + * Value representing null keys inside tables. + */ + static final Object NULL_KEY = new Object(); + + /** + * Returns internal representation for key. Use NULL_KEY if key is null. + */ + static T maskNull(T key) { + return key == null ? (T)NULL_KEY : key; + } + + /** + * Returns key represented by specified internal representation. + */ + static T unmaskNull(T key) { + return (key == NULL_KEY ? null : key); + } + + /** + * Whether to prefer the old supplemental hash function, for + * compatibility with broken applications that rely on the + * internal hashing order. + * + * Set to true only by hotspot when invoked via + * -XX:+UseNewHashFunction or -XX:+AggressiveOpts + */ + private static final boolean useNewHash; + static { useNewHash = false; } + + private static int oldHash(int h) { + h += ~(h << 9); + h ^= (h >>> 14); + h += (h << 4); + h ^= (h >>> 10); + return h; + } + + private static int newHash(int h) { + // This function ensures that hashCodes that differ only by + // constant multiples at each bit position have a bounded + // number of collisions (approximately 8 at default load factor). + h ^= (h >>> 20) ^ (h >>> 12); + return h ^ (h >>> 7) ^ (h >>> 4); + } + + /** + * Applies a supplemental hash function to a given hashCode, which + * defends against poor quality hash functions. This is critical + * because HashMap uses power-of-two length hash tables, that + * otherwise encounter collisions for hashCodes that do not differ + * in lower bits. + */ + public static int hash(int h) { + return useNewHash ? newHash(h) : oldHash(h); + } + + public static int hash(Object key) { + return hash(key.hashCode()); + } + + /** + * Check for equality of non-null reference x and possibly-null y. + */ + static boolean eq(Object x, Object y) { + return x == y || x.equals(y); + } + + /** + * Returns index for hash code h. + */ + public static int indexFor(int h, int length) { + return h & (length-1); + } + + /** + * Returns the number of key-value mappings in this map. + * + * @return the number of key-value mappings in this map. + */ + public int size() { + return size.get(); + } + + /** + * Returns true if this map contains no key-value mappings. + * + * @return true if this map contains no key-value mappings. + */ + public boolean isEmpty() { + return size.get() == 0; + } + + /** + * Returns the value to which the specified key is mapped in this identity + * hash map, or null if the map contains no mapping for this key. + * A return value of null does not necessarily indicate + * that the map contains no mapping for the key; it is also possible that + * the map explicitly maps the key to null. The + * containsKey method may be used to distinguish these two cases. + * + * @param key the key whose associated value is to be returned. + * @return the value to which this map maps the specified key, or + * null if the map contains no mapping for this key. + * @see #put(Object, Object) + */ + public Integer get(int key) { +// if (key == null) +// return getForNullKey(); + int hash = hash(key); + for (TEntry e = table.get(indexFor(hash, capacity)); + e != null; + e = e.getNext()) { + Object k; + if (e.getHash() == hash) + return e.getValue(); + } + return null; + } + +// private V getForNullKey() { +// int hash = hash(NULL_KEY.hashCode()); +// int i = indexFor(hash, capacity); +// TEntry e = table[i]; +// //Entry e = table[i]; +// while (true) { +// if (e == null) +// return null; +// if (e.getKey() == NULL_KEY) +// return e.getValue(); +// e = e.getNext(); +// } +// } + + /** + * Returns true if this map contains a mapping for the + * specified key. + * + * @param key The key whose presence in this map is to be tested + * @return true if this map contains a mapping for the specified + * key. + */ + public boolean containsKey(int key) { + Object k = maskNull(key); + int hash = hash(k); + int i = indexFor(hash, capacity); + TEntry e = table.get(i); + while (e != null) { + if (e.getHash() == hash) + return true; + e = e.getNext(); + } + return false; + } + + /** + * Returns the entry associated with the specified key in the + * HashMap. Returns null if the HashMap contains no mapping + * for this key. + */ + TEntry getEntry(int key) { + int hash = hash(key); + int i = indexFor(hash, capacity); + TEntry e = table.get(i); + while (e != null && !(e.getHash() == hash)) + e = e.getNext(); + return e; + } + + /** + * Associates the specified value with the specified key in this map. + * If the map previously contained a mapping for this key, the old + * value is replaced. + * + * @param key key with which the specified value is to be associated. + * @param value value to be associated with the specified key. + * @return previous value associated with specified key, or null + * if there was no mapping for key. A null return can + * also indicate that the HashMap previously associated + * null with the specified key. + */ + public Integer put(int key, Integer value) { + int hash = hash(key); + int i = indexFor(hash, capacity); + for (TEntry e = table.get(i); e != null; e = e.getNext()) { + if (e.getHash() == hash) { + Integer oldValue = e.getValue(); + e.setValue(value); +// e.recordAccess(this); + return oldValue; + } + } + modCount++; + addEntry(hash, value, i); + return null; + } + + + + /** + * This method is used instead of put by constructors and + * pseudoconstructors (clone, readObject). It does not resize the table, + * check for comodification, etc. It calls createEntry rather than + * addEntry. + */ + private void putForCreate(int key, Integer value) { + int hash = hash(key); + int i = indexFor(hash, capacity); + + /** + * Look for preexisting entry for key. This will never happen for + * clone or deserialize. It will only happen for construction if the + * input Map is a sorted map whose ordering is inconsistent w/ equals. + */ + for (TEntry e = table.get(i); e != null; e = e.getNext()) { + if (e.getHash() == hash) { + e.setValue(value); + return; + } + } + + createEntry(hash, value, i); + } + + void putAllForCreate(IntHashMap m) { + for (Iterator i = m.entrySet().iterator(); i.hasNext(); ) { + IntHashMap.TEntry e = i.next(); + putForCreate(e.getHash(), e.getValue()); + } + } + + /** + * Rehashes the contents of this map into a new array with a + * larger capacity. This method is called automatically when the + * number of keys in this map reaches its threshold. + * + * If current capacity is MAXIMUM_CAPACITY, this method does not + * resize the map, but sets threshold to Integer.MAX_VALUE. + * This has the effect of preventing future calls. + * + * @param newCapacity the new capacity, MUST be a power of two; + * must be greater than current capacity unless current + * capacity is MAXIMUM_CAPACITY (in which case value + * is irrelevant). + */ + void resize(int newCapacity) { + dstm2.AtomicArray oldTable = table; + int oldCapacity = capacity; + if (oldCapacity == MAXIMUM_CAPACITY) { + threshold = Integer.MAX_VALUE; + return; + } + + dstm2.AtomicArray newTable = new dstm2.AtomicArray(TEntry.class, newCapacity); + transfer(newTable, newCapacity); + table = newTable; + threshold = (int)(newCapacity * loadFactor); + capacity = newCapacity; + } + + /** + * Transfer all entries from current table to newTable. + */ + void transfer(dstm2.AtomicArray newTable, int nc) { + dstm2.AtomicArray src = table; + int newCapacity = nc; + for (int j = 0; j < capacity; j++) { + TEntry e = src.get(j); + if (e != null) { + src.set(j, null); + do { + TEntry next = e.getNext(); + int i = indexFor(e.getHash(), newCapacity); + e.setNext(newTable.get(i)); + newTable.set(i, e); + e = next; + } while (e != null); + } + } + } + + /** + * Copies all of the mappings from the specified map to this map + * These mappings will replace any mappings that + * this map had for any of the keys currently in the specified map. + * + * @param m mappings to be stored in this map. + * @throws NullPointerException if the specified map is null. + */ + public void putAll(IntHashMap m) { + int numKeysToBeAdded = m.size(); + if (numKeysToBeAdded == 0) + return; + + /* + * Expand the map if the map if the number of mappings to be added + * is greater than or equal to threshold. This is conservative; the + * obvious condition is (m.size() + size) >= threshold, but this + * condition could result in a map with twice the appropriate capacity, + * if the keys to be added overlap with the keys already in this map. + * By using the conservative calculation, we subject ourself + * to at most one extra resize. + */ + if (numKeysToBeAdded > threshold) { + int targetCapacity = (int)(numKeysToBeAdded / loadFactor + 1); + if (targetCapacity > MAXIMUM_CAPACITY) + targetCapacity = MAXIMUM_CAPACITY; + int newCapacity = capacity; + while (newCapacity < targetCapacity) + newCapacity <<= 1; + if (newCapacity > capacity) + resize(newCapacity); + } + + for (Iterator i = m.entrySet().iterator(); i.hasNext(); ) { + IntHashMap.TEntry e = i.next(); + put(e.getHash(), e.getValue()); + } + } + + /** + * Removes the mapping for this key from this map if present. + * + * @param key key whose mapping is to be removed from the map. + * @return previous value associated with specified key, or null + * if there was no mapping for key. A null return can + * also indicate that the map previously associated null + * with the specified key. + */ + public Integer remove(int key) { + TEntry e = removeEntryForKey(key); + return (e == null ? null : e.getValue()); + } + + /** + * Removes and returns the entry associated with the specified key + * in the HashMap. Returns null if the HashMap contains no mapping + * for this key. + */ + TEntry removeEntryForKey(int key) { + int hash = hash(key); + int i = indexFor(hash, capacity); + TEntry prev = table.get(i); + TEntry e = prev; + + while (e != null) { + TEntry next = e.getNext(); + if (e.getHash() == hash) { + modCount++; + size.decrementAndGet(); + if (prev == e) + table.set(i, next); + else + prev.setNext(next); +// e.recordRemoval(this); + return e; + } + prev = e; + e = next; + } + + return e; + } + + /** + * Special version of remove for EntrySet. + */ + TEntry removeMapping(TEntry o) { + + TEntry entry = o; + int hash = hash(o.getHash()); + int i = indexFor(hash, capacity); + TEntry prev = table.get(i); + TEntry e = prev; + + while (e != null) { + TEntry next = e.getNext(); + if (e.getHash() == hash) { + modCount++; + size.decrementAndGet(); + if (prev == e) + table.set(i, next); + else + prev.setNext(next); +// e.recordRemoval(this); + return e; + } + prev = e; + e = next; + } + + return e; + } + + /** + * Removes all mappings from this map. + */ + public void clear() { + modCount++; + dstm2.AtomicArray tab = table; + for (int i = 0; i < capacity; i++) + table.set(i, null); + size.set(0); + } + + /** + * Returns true if this map maps one or more keys to the + * specified value. + * + * @param value value whose presence in this map is to be tested. + * @return true if this map maps one or more keys to the + * specified value. + */ + public boolean containsValue(Object value) { + if (value == null) + return containsNullValue(); + + dstm2.AtomicArray tab = table; + for (int i = 0; i < capacity; i++) + for (TEntry e = tab.get(i); e != null ; e = e.getNext()) + if (value.equals(e.getValue())) + return true; + return false; + } + + /** + * Special-case code for containsValue with null argument + **/ + private boolean containsNullValue() { + dstm2.AtomicArray tab = table; + for (int i = 0; i < capacity ; i++) + for (TEntry e = tab.get(i) ; e != null ; e = e.getNext()) + if (e.getValue() == null) + return true; + return false; + } + + @atomic public interface TEntry { + int getHash(); + Integer getValue(); + TEntry getNext(); + void setHash(int h); + void setValue(Integer v); + void setNext(TEntry n); + + } + + + /** + * Add a new entry with the specified key, value and hash code to + * the specified bucket. It is the responsibility of this + * method to resize the table if appropriate. + * + * Subclass overrides this to alter the behavior of put method. + */ + void addEntry(int hash, Integer value, int bucketIndex) { + TEntry e = table.get(bucketIndex); + TEntry n = factory.create(); + n.setHash(hash); + n.setValue(value); + n.setNext(e); + table.set(bucketIndex, n); + if (size.incrementAndGet() >= threshold) { + synchronized(this) { + if(size.get() >= threshold) + resize(2 * capacity); + } + } + } + + /** + * Like addEntry except that this version is used when creating entries + * as part of Map construction or "pseudo-construction" (cloning, + * deserialization). This version needn't worry about resizing the table. + * + * Subclass overrides this to alter the behavior of HashMap(Map), + * clone, and readObject. + */ + void createEntry(int hash, Integer value, int bucketIndex) { + TEntry e = table.get(bucketIndex); + TEntry n = factory.create(); + n.setHash(hash); + n.setValue(value); + n.setNext(e); + table.set(bucketIndex, n); + size.incrementAndGet(); + } + + private abstract class HashIterator implements Iterator { + TEntry next; // next entry to return + int expectedModCount; // For fast-fail + int index; // current slot + TEntry current; // current entry + + HashIterator() { + expectedModCount = modCount; + dstm2.AtomicArray t = table; + int i = capacity; + TEntry n = null; + if (size.get() != 0) { // advance to first entry + while (i > 0 && (n = t.get(--i)) == null) + ; + } + next = n; + index = i; + } + + public boolean hasNext() { + return next != null; + } + + TEntry nextEntry() { + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + TEntry e = next; + if (e == null) + throw new NoSuchElementException(); + + TEntry n = e.getNext(); + dstm2.AtomicArray t = table; + int i = index; + while (n == null && i > 0) + n = t.get(--i); + index = i; + next = n; + return current = e; + } + + public void remove() { + if (current == null) + throw new IllegalStateException(); + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + int k = current.getHash(); + current = null; + IntHashMap.this.removeEntryForKey(k); + expectedModCount = modCount; + } + + } + + private class ValueIterator extends HashIterator { + public Integer next() { + return nextEntry().getValue(); + } + } + + private class KeyIterator extends HashIterator { + public Integer next() { + return nextEntry().getHash(); + } + } + +// private class EntryIterator extends HashIterator> { +// public Map.Entry next() { +// return nextEntry(); +// } +// } + + // Subclass overrides these to alter behavior of views' iterator() method + public Iterator newKeyIterator() { + return new KeyIterator(); + } + public Iterator newValueIterator() { + return new ValueIterator(); + } +// Iterator> newEntryIterator() { +// return new EntryIterator(); +// } + + + // Views + + private transient Set entrySet = null; + + + + private class KeySet extends AbstractSet { + public Iterator iterator() { + return newKeyIterator(); + } + public int size() { + return size.get(); + } + public boolean contains(Integer o) { + return containsKey(o); + } + public boolean remove(Integer o) { + return IntHashMap.this.removeEntryForKey(o) != null; + } + public void clear() { + IntHashMap.this.clear(); + } + } + + + + /** + * Returns a collection view of the mappings contained in this map. Each + * element in the returned collection is a Map.Entry. The + * collection is backed by the map, so changes to the map are reflected in + * the collection, and vice-versa. The collection supports element + * removal, which removes the corresponding mapping from the map, via the + * Iterator.remove, Collection.remove, + * removeAll, retainAll, and clear operations. + * It does not support the add or addAll operations. + * + * @return a collection view of the mappings contained in this map. + * @see Map.Entry + */ + public Set entrySet() { + Set es = entrySet; + return (es != null ? es : (entrySet = (Set) (Set) new EntrySet())); + } + + private class EntrySet {//extends AbstractSet/*>*/ { +// public Iterator/*>*/ iterator() { +// return newEntryIterator(); +// } + public boolean contains(IntHashMap.TEntry o) { + IntHashMap.TEntry e = (IntHashMap.TEntry) o; + TEntry candidate = getEntry(e.getHash()); + return candidate != null && candidate.equals(e); + } + public boolean remove(IntHashMap.TEntry o) { + return removeMapping(o) != null; + } + public int size() { + return size.get(); + } + public void clear() { + IntHashMap.this.clear(); + } + } + + /** + * Save the state of the HashMap instance to a stream (i.e., + * serialize it). + * + * @serialData The capacity of the HashMap (the length of the + * bucket array) is emitted (int), followed by the + * size of the HashMap (the number of key-value + * mappings), followed by the key (Object) and value (Object) + * for each key-value mapping represented by the HashMap + * The key-value mappings are emitted in the order that they + * are returned by entrySet().iterator(). + * + */ + private void writeObject(java.io.ObjectOutputStream s) + throws IOException + { + Iterator i = entrySet().iterator(); + + // Write out the threshold, loadfactor, and any hidden stuff + s.defaultWriteObject(); + + // Write out number of buckets + s.writeInt(capacity); + + // Write out size (number of Mappings) + s.writeInt(size.get()); + + // Write out keys and values (alternating) + while (i.hasNext()) { + IntHashMap.TEntry e = i.next(); + s.writeObject(e.getHash()); + s.writeObject(e.getValue()); + } + } + + private static final long serialVersionUID = 362498820763181265L; + + /** + * Reconstitute the HashMap instance from a stream (i.e., + * deserialize it). + */ + private void readObject(java.io.ObjectInputStream s) + throws IOException, ClassNotFoundException + { + // Read in the threshold, loadfactor, and any hidden stuff + s.defaultReadObject(); + + // Read in number of buckets and allocate the bucket array; + int numBuckets = s.readInt(); + table = new dstm2.AtomicArray(TEntry.class, numBuckets); + + init(); // Give subclass a chance to do its thing. + + // Read in size (number of Mappings) + int size = s.readInt(); + + // Read the keys and values, and put the mappings in the HashMap + for (int i=0; i iterator() { + return new Iterator() { + int tableIndex = 0; + public TEntry cursor = table.get(tableIndex); + public boolean hasNext() { + return cursor != null; + } + public Integer next() { + TEntry node = cursor; + cursor = cursor.getNext(); + while(cursor==null) { + tableIndex++; + if(tableIndex < capacity) { + cursor = table.get(tableIndex); + } + } + return node.getValue(); + } + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } +} diff --git a/Robust/Transactions/jcarderbenchmarks/philosophers/Chopstick.java b/Robust/Transactions/jcarderbenchmarks/philosophers/Chopstick.java new file mode 100644 index 00000000..0025b680 --- /dev/null +++ b/Robust/Transactions/jcarderbenchmarks/philosophers/Chopstick.java @@ -0,0 +1,4 @@ + + +public class Chopstick { +} diff --git a/Robust/Transactions/jcarderbenchmarks/philosophers/Main.java b/Robust/Transactions/jcarderbenchmarks/philosophers/Main.java new file mode 100644 index 00000000..1805c779 --- /dev/null +++ b/Robust/Transactions/jcarderbenchmarks/philosophers/Main.java @@ -0,0 +1,41 @@ + + +public class Main { + public static void main(String[] args) throws Exception { + // for (int i=0; i<1000; i++){ + long start = System.currentTimeMillis(); + System.out.println("Start time" + start); + Chopstick stick1 = new Chopstick(); + Chopstick stick2 = new Chopstick(); + Chopstick stick3 = new Chopstick(); + Chopstick stick4 = new Chopstick(); + Chopstick stick5 = new Chopstick(); + + Philosopher phil1 = new Philosopher("Philosopher1", 1, stick1, stick2); + Philosopher phil2 = new Philosopher("Philosopher2", 2, stick2, stick3); + Philosopher phil3 = new Philosopher("Philosopher3", 3, stick3, stick4); + Philosopher phil4 = new Philosopher("Philosopher4", 4, stick4, stick5); + Philosopher phil5 = new Philosopher("Philosopher5", 5, stick5, stick1); + + phil1.start(); + phil2.start(); + phil3.start(); + phil4.start(); + phil5.start(); + + phil1.join(); + phil2.join(); + phil3.join(); + phil4.join(); + phil5.join(); + + long end = System.currentTimeMillis(); + System.out.println("End time" + end); + System.out.println("Elapsed Time " + (end - start)); + + + + System.out.println("Program finished successfully"); + } + // } +} diff --git a/Robust/Transactions/jcarderbenchmarks/philosophers/Philosopher.java b/Robust/Transactions/jcarderbenchmarks/philosophers/Philosopher.java new file mode 100644 index 00000000..c3c8791f --- /dev/null +++ b/Robust/Transactions/jcarderbenchmarks/philosophers/Philosopher.java @@ -0,0 +1,43 @@ + + +public class Philosopher extends Thread { + private final Chopstick mLeftChopstick; + private final Chopstick mRightChopstick; + int id; + + + public Philosopher(String name, int id, + Chopstick leftChopstick, + Chopstick rightChopstick) { + super(name); + this.id = id; + mLeftChopstick = leftChopstick; + mRightChopstick = rightChopstick; + } + + public void run() { + long endTime = System.currentTimeMillis() + 100; + int i =0; + //while (endTime > System.currentTimeMillis()) { + while(i<100) + { + pickUpSticksAndEat(); + i++; + } + } + + private void pickUpSticksAndEat() { + if (id%2 == 0) + synchronized (mLeftChopstick) { + synchronized (mRightChopstick) { + System.out.println(getName() + " is eating."); + } + } + else + synchronized (mRightChopstick) { + synchronized (mLeftChopstick) { + System.out.println(getName() + " is eating."); + } + } + } +}