From: navid Date: Thu, 22 Jan 2009 20:14:30 +0000 (+0000) Subject: *** empty log message *** X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=2aa1b435e8ea90f4a7b5fb4764feb4b355b1552b;p=IRC.git *** empty log message *** --- diff --git a/Robust/Transactions/TransactionalIO/src/TransactionalIO/core/TransactionalFile.java b/Robust/Transactions/TransactionalIO/src/TransactionalIO/core/TransactionalFile.java index e54efa93..dcadae6d 100644 --- a/Robust/Transactions/TransactionalIO/src/TransactionalIO/core/TransactionalFile.java +++ b/Robust/Transactions/TransactionalIO/src/TransactionalIO/core/TransactionalFile.java @@ -293,7 +293,50 @@ public class TransactionalFile implements Comparable { tmp.setLocaloffset(offset); } + + + public int skipBytes(int n) throws IOException { + long pos; + long len; + long newpos; + + if (n <= 0) { + return 0; + } + pos = getFilePointer(); + len = length(); + newpos = pos + n; + if (newpos > len) { + newpos = len; + } + seek(newpos); + /* return the actual number of bytes skipped */ + return (int) (newpos - pos); + } + + public final int readByte(){ + byte[] data = new byte[1]; + read(data); + int result = (byte)(data[0]); + return result; + } + + public final int readChar(){ + byte[] data = new byte[2]; + read(data); + int result = (char)((data[0] << 8) | data[0]); + return result; + } + + public final int readShort(){ + byte[] data = new byte[2]; + read(data); + int result = (short)((data[0] << 8) | data[1]); + return result; + } + + public final int readInt(){ byte[] data = new byte[4]; read(data); @@ -308,6 +351,43 @@ public class TransactionalFile implements Comparable { return result; } + + public final void writeByte(int b){ + try{ + byte[] result = new byte[1]; + result[0] = (byte)b; + write(result); + }catch(IOException ex){ + Logger.getLogger(TransactionalFile.class.getName()).log(Level.SEVERE, null, ex); + } + + } + + public final void writeChar(int value){ + try{ + byte[] result = new byte[2]; + result[0] = (byte)(value >> 8); + result[1] = (byte)(value); + write(result); + }catch(IOException ex){ + Logger.getLogger(TransactionalFile.class.getName()).log(Level.SEVERE, null, ex); + } + + } + + + public final void writeShort(int value){ + try{ + byte[] result = new byte[2]; + result[0] = (byte)(value >> 8); + result[1] = (byte)(value); + write(result); + }catch(IOException ex){ + Logger.getLogger(TransactionalFile.class.getName()).log(Level.SEVERE, null, ex); + } + + } + public final void writeInt(int value){ try { byte[] result = new byte[4]; @@ -338,6 +418,19 @@ public class TransactionalFile implements Comparable { Logger.getLogger(TransactionalFile.class.getName()).log(Level.SEVERE, null, ex); } + } + + public final void writeChars(String s) throws IOException { + int clen = s.length(); + int blen = 2*clen; + byte[] b = new byte[blen]; + char[] c = new char[clen]; + s.getChars(0, clen, c, 0); + for (int i = 0, j = 0; i < clen; i++) { + b[j++] = (byte)(c[i] >>> 8); + b[j++] = (byte)(c[i] >>> 0); + } + write(b); } public int read(byte[] b) { diff --git a/Robust/Transactions/dstm2/src/dstm2/util/StringKeyHashMap.java b/Robust/Transactions/dstm2/src/dstm2/util/StringKeyHashMap.java new file mode 100644 index 00000000..dde4153d --- /dev/null +++ b/Robust/Transactions/dstm2/src/dstm2/util/StringKeyHashMap.java @@ -0,0 +1,902 @@ +package dstm2.util; + +import java.io.IOException; +import java.util.AbstractSet; +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; + +import dstm2.Thread; +import dstm2.atomic; +import dstm2.factory.Factory; + +public class StringKeyHashMap implements Iterable{ + + /** + * The default initial capacity - MUST be a power of two. + */ + static final int DEFAULT_INITIAL_CAPACITY = 16; + + /** + * The maximum capacity, used if a higher value is implicitly specified + * by either of the constructors with arguments. + * MUST be a power of two <= 1<<30. + */ + static final int MAXIMUM_CAPACITY = 1 << 30; + + /** + * The load factor used when none specified in constructor. + **/ + static final float DEFAULT_LOAD_FACTOR = 0.75f; + + /** + * The table, resized as necessary. Length MUST Always be a power of two. + */ + //transient TEntry[] table; + dstm2.AtomicArray table; + final private Factory factory; + + //transient Entry[] table; + + /** + * The number of key-value mappings contained in this identity hash map. + */ + //transient int size; + + java.util.concurrent.atomic.AtomicInteger size; + + /* + * The capacity of the table + */ + transient int capacity; + + /** + * The next size value at which to resize (capacity * load factor). + * @serial + */ + int threshold; + + /** + * The load factor for the hash table. + * + * @serial + */ + final float loadFactor; + + /** + * The number of times this HashMap has been structurally modified + * Structural modifications are those that change the number of mappings in + * the HashMap or otherwise modify its internal structure (e.g., + * rehash). This field is used to make iterators on Collection-views of + * the HashMap fail-fast. (See ConcurrentModificationException). + */ + transient volatile int modCount; + + /** + * Constructs an empty HashMap with the specified initial + * capacity and load factor. + * + * @param initialCapacity The initial capacity. + * @param loadFactor The load factor. + * @throws IllegalArgumentException if the initial capacity is negative + * or the load factor is nonpositive. + */ + public StringKeyHashMap(int initialCapacity, float loadFactor) { + size = new AtomicInteger(0); + factory = Thread.makeFactory(TEntry.class); + if (initialCapacity < 0) + throw new IllegalArgumentException("Illegal initial capacity: " + + initialCapacity); + if (initialCapacity > MAXIMUM_CAPACITY) + initialCapacity = MAXIMUM_CAPACITY; + if (loadFactor <= 0 || Float.isNaN(loadFactor)) + throw new IllegalArgumentException("Illegal load factor: " + + loadFactor); + + // Find a power of 2 >= initialCapacity + int capacity = 1; + while (capacity < initialCapacity) + capacity <<= 1; + + this.capacity = capacity; + this.loadFactor = loadFactor; + threshold = (int)(capacity * loadFactor); + table = new dstm2.AtomicArray(TEntry.class, capacity); +// for(int i = 0; i < capacity; i++) { +// table[i] = factory.create(); +// } + //table = new Entry[capacity]; + init(); + } + + /** + * Constructs an empty HashMap with the specified initial + * capacity and the default load factor (0.75). + * + * @param initialCapacity the initial capacity. + * @throws IllegalArgumentException if the initial capacity is negative. + */ + public StringKeyHashMap(int initialCapacity) { + this(initialCapacity, DEFAULT_LOAD_FACTOR); + } + + /** + * Constructs an empty HashMap with the default initial capacity + * (16) and the default load factor (0.75). + */ + public StringKeyHashMap() { + this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + } + + /** + * Constructs a new HashMap with the same mappings as the + * specified Map. The HashMap is created with + * default load factor (0.75) and an initial capacity sufficient to + * hold the mappings in the specified Map. + * + * @param m the map whose mappings are to be placed in this map. + * @throws NullPointerException if the specified map is null. + */ + public StringKeyHashMap(StringKeyHashMap m) { + this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, + DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR); + putAllForCreate(m); + } + + // internal utilities + + /** + * Initialization hook for subclasses. This method is called + * in all constructors and pseudo-constructors (clone, readObject) + * after HashMap has been initialized but before any entries have + * been inserted. (In the absence of this method, readObject would + * require explicit knowledge of subclasses.) + */ + void init() { + } + public dstm2.AtomicArray getBuckets() { + return table; + } + /** + * Value representing null keys inside tables. + */ + static final Object NULL_KEY = new Object(); + + /** + * Returns internal representation for key. Use NULL_KEY if key is null. + */ + static T maskNull(T key) { + return key == null ? (T)NULL_KEY : key; + } + + /** + * Returns key represented by specified internal representation. + */ + static T unmaskNull(T key) { + return (key == NULL_KEY ? null : key); + } + + /** + * Whether to prefer the old supplemental hash function, for + * compatibility with broken applications that rely on the + * internal hashing order. + * + * Set to true only by hotspot when invoked via + * -XX:+UseNewHashFunction or -XX:+AggressiveOpts + */ + private static final boolean useNewHash; + static { useNewHash = false; } + + private static int oldHash(int h) { + h += ~(h << 9); + h ^= (h >>> 14); + h += (h << 4); + h ^= (h >>> 10); + return h; + } + + private static int newHash(int h) { + // This function ensures that hashCodes that differ only by + // constant multiples at each bit position have a bounded + // number of collisions (approximately 8 at default load factor). + h ^= (h >>> 20) ^ (h >>> 12); + return h ^ (h >>> 7) ^ (h >>> 4); + } + + /** + * Applies a supplemental hash function to a given hashCode, which + * defends against poor quality hash functions. This is critical + * because HashMap uses power-of-two length hash tables, that + * otherwise encounter collisions for hashCodes that do not differ + * in lower bits. + */ + public static int hash(int h) { + return useNewHash ? newHash(h) : oldHash(h); + } + + static int hash(Object key) { + return hash(key.hashCode()); + } + + /** + * Check for equality of non-null reference x and possibly-null y. + */ + static boolean eq(Object x, Object y) { + return x == y || x.equals(y); + } + + /** + * Returns index for hash code h. + */ + public static int indexFor(int h, int length) { + return h & (length-1); + } + + /** + * Returns the number of key-value mappings in this map. + * + * @return the number of key-value mappings in this map. + */ + public int size() { + return size.get(); + } + + /** + * Returns true if this map contains no key-value mappings. + * + * @return true if this map contains no key-value mappings. + */ + public boolean isEmpty() { + return size.get() == 0; + } + + /** + * Returns the value to which the specified key is mapped in this identity + * hash map, or null if the map contains no mapping for this key. + * A return value of null does not necessarily indicate + * that the map contains no mapping for the key; it is also possible that + * the map explicitly maps the key to null. The + * containsKey method may be used to distinguish these two cases. + * + * @param key the key whose associated value is to be returned. + * @return the value to which this map maps the specified key, or + * null if the map contains no mapping for this key. + * @see #put(Object, Object) + */ + public V get(Object key) { +// if (key == null) +// return getForNullKey(); + int hash = hash(key); + for (TEntry e = table.get(indexFor(hash, capacity)); + e != null; + e = e.getNext()) { + Object k; + if (e.getHash() == hash && ((k = e.getKey()) == key || key.equals(k))) + return e.getValue(); + } + return null; + } + +// private V getForNullKey() { +// int hash = hash(NULL_KEY.hashCode()); +// int i = indexFor(hash, capacity); +// TEntry e = table[i]; +// //Entry e = table[i]; +// while (true) { +// if (e == null) +// return null; +// if (e.getKey() == NULL_KEY) +// return e.getValue(); +// e = e.getNext(); +// } +// } + + /** + * Returns true if this map contains a mapping for the + * specified key. + * + * @param key The key whose presence in this map is to be tested + * @return true if this map contains a mapping for the specified + * key. + */ + public boolean containsKey(String key) { + Object k = maskNull(key); + int hash = hash(k); + int i = indexFor(hash, capacity); + TEntry e = table.get(i); + while (e != null) { + if (e.getHash() == hash && ((k = e.getKey()) == key || (key != null && key.equals(k)))) + return true; + e = e.getNext(); + } + return false; + } + + /** + * Returns the entry associated with the specified key in the + * HashMap. Returns null if the HashMap contains no mapping + * for this key. + */ + TEntry getEntry(int key) { + int hash = hash(key); + int i = indexFor(hash, capacity); + TEntry e = table.get(i); + while (e != null && !(e.getHash() == hash)) + e = e.getNext(); + return e; + } + + /** + * Associates the specified value with the specified key in this map. + * If the map previously contained a mapping for this key, the old + * value is replaced. + * + * @param key key with which the specified value is to be associated. + * @param value value to be associated with the specified key. + * @return previous value associated with specified key, or null + * if there was no mapping for key. A null return can + * also indicate that the HashMap previously associated + * null with the specified key. + */ + public V put(String key, V value) { + int hash = hash(key.hashCode()); + int i = indexFor(hash, capacity); + for (TEntry e = table.get(i); e != null; e = e.getNext()) { + Object k; + if (e.getHash() == hash && ((k = e.getKey()) == key || key.equals(k))) { + V oldValue = e.getValue(); + e.setValue(value); + return oldValue; + } + + } + modCount++; + addEntry(hash, key, value, i); + return null; + } + + + + /** + * This method is used instead of put by constructors and + * pseudoconstructors (clone, readObject). It does not resize the table, + * check for comodification, etc. It calls createEntry rather than + * addEntry. + */ + private void putForCreate(String key, V value) { + int hash = hash(key); + int i = indexFor(hash, capacity); + + /** + * Look for preexisting entry for key. This will never happen for + * clone or deserialize. It will only happen for construction if the + * input Map is a sorted map whose ordering is inconsistent w/ equals. + */ + for (TEntry e = table.get(i); e != null; e = e.getNext()) { + if (e.getHash() == hash) { + e.setValue(value); + return; + } + } + + createEntry(hash, key, value, i); + } + + void putAllForCreate(StringKeyHashMap m) { + for (Iterator> i = m.entrySet().iterator(); i.hasNext(); ) { + StringKeyHashMap.TEntry e = i.next(); + putForCreate(e.getKey(), e.getValue()); + } + } + + /** + * Rehashes the contents of this map into a new array with a + * larger capacity. This method is called automatically when the + * number of keys in this map reaches its threshold. + * + * If current capacity is MAXIMUM_CAPACITY, this method does not + * resize the map, but sets threshold to Integer.MAX_VALUE. + * This has the effect of preventing future calls. + * + * @param newCapacity the new capacity, MUST be a power of two; + * must be greater than current capacity unless current + * capacity is MAXIMUM_CAPACITY (in which case value + * is irrelevant). + */ + void resize(int newCapacity) { + dstm2.AtomicArray oldTable = table; + int oldCapacity = capacity; + if (oldCapacity == MAXIMUM_CAPACITY) { + threshold = Integer.MAX_VALUE; + return; + } + + dstm2.AtomicArray newTable = new dstm2.AtomicArray(TEntry.class, newCapacity); + transfer(newTable, newCapacity); + table = newTable; + threshold = (int)(newCapacity * loadFactor); + capacity = newCapacity; + } + + /** + * Transfer all entries from current table to newTable. + */ + void transfer(dstm2.AtomicArray newTable, int nc) { + dstm2.AtomicArray src = table; + int newCapacity = nc; + for (int j = 0; j < capacity; j++) { + TEntry e = src.get(j); + if (e != null) { + src.set(j, null); + do { + TEntry next = e.getNext(); + int i = indexFor(e.getHash(), newCapacity); + e.setNext(newTable.get(i)); + newTable.set(i, e); + e = next; + } while (e != null); + } + } + } + + /** + * Copies all of the mappings from the specified map to this map + * These mappings will replace any mappings that + * this map had for any of the keys currently in the specified map. + * + * @param m mappings to be stored in this map. + * @throws NullPointerException if the specified map is null. + */ + public void putAll(StringKeyHashMap m) { + int numKeysToBeAdded = m.size(); + if (numKeysToBeAdded == 0) + return; + + /* + * Expand the map if the map if the number of mappings to be added + * is greater than or equal to threshold. This is conservative; the + * obvious condition is (m.size() + size) >= threshold, but this + * condition could result in a map with twice the appropriate capacity, + * if the keys to be added overlap with the keys already in this map. + * By using the conservative calculation, we subject ourself + * to at most one extra resize. + */ + if (numKeysToBeAdded > threshold) { + int targetCapacity = (int)(numKeysToBeAdded / loadFactor + 1); + if (targetCapacity > MAXIMUM_CAPACITY) + targetCapacity = MAXIMUM_CAPACITY; + int newCapacity = capacity; + while (newCapacity < targetCapacity) + newCapacity <<= 1; + if (newCapacity > capacity) + resize(newCapacity); + } + + for (Iterator> i = m.entrySet().iterator(); i.hasNext(); ) { + StringKeyHashMap.TEntry e = i.next(); + put(e.getKey(), e.getValue()); + } + } + + /** + * Removes the mapping for this key from this map if present. + * + * @param key key whose mapping is to be removed from the map. + * @return previous value associated with specified key, or null + * if there was no mapping for key. A null return can + * also indicate that the map previously associated null + * with the specified key. + */ + public V remove(String key) { + TEntry e = removeEntryForKey(key); + return (e == null ? null : e.getValue()); + } + + /** + * Removes and returns the entry associated with the specified key + * in the HashMap. Returns null if the HashMap contains no mapping + * for this key. + */ + TEntry removeEntryForKey(String key) { + int hash = hash(key); + int i = indexFor(hash, capacity); + TEntry prev = table.get(i); + TEntry e = prev; + + while (e != null) { + TEntry next = e.getNext(); + Object k; + if (e.getHash() == hash && ((k = e.getKey()) == key || (key != null && key.equals(k)))) + { + modCount++; + size.decrementAndGet(); + if (prev == e) + table.set(i, next); + else + prev.setNext(next); +// e.recordRemoval(this); + return e; + } + prev = e; + e = next; + } + + return e; + } + + /** + * Special version of remove for EntrySet. + */ + TEntry removeMapping(TEntry o) { + + TEntry entry = o; + int hash = hash(o.getHash()); + int i = indexFor(hash, capacity); + TEntry prev = table.get(i); + TEntry e = prev; + + while (e != null) { + TEntry next = e.getNext(); + if (e.getHash() == hash) { + modCount++; + size.decrementAndGet(); + if (prev == e) + table.set(i, next); + else + prev.setNext(next); +// e.recordRemoval(this); + return e; + } + prev = e; + e = next; + } + + return e; + } + + /** + * Removes all mappings from this map. + */ + public void clear() { + modCount++; + dstm2.AtomicArray tab = table; + for (int i = 0; i < capacity; i++) + table.set(i, null); + size.set(0); + } + + /** + * Returns true if this map maps one or more keys to the + * specified value. + * + * @param value value whose presence in this map is to be tested. + * @return true if this map maps one or more keys to the + * specified value. + */ + public boolean containsValue(Object value) { + if (value == null) + return containsNullValue(); + + dstm2.AtomicArray tab = table; + for (int i = 0; i < capacity; i++) + for (TEntry e = tab.get(i); e != null ; e = e.getNext()) + if (value.equals(e.getValue())) + return true; + return false; + } + + /** + * Special-case code for containsValue with null argument + **/ + private boolean containsNullValue() { + dstm2.AtomicArray tab = table; + for (int i = 0; i < capacity ; i++) + for (TEntry e = tab.get(i) ; e != null ; e = e.getNext()) + if (e.getValue() == null) + return true; + return false; + } + + @atomic public interface TEntry { + public String getKey(); + public int getHash(); + public V getValue(); + public TEntry getNext(); + public void setKey(String key); + public void setHash(int h); + public void setValue(V v); + public void setNext(TEntry n); + + } + + + /** + * Add a new entry with the specified key, value and hash code to + * the specified bucket. It is the responsibility of this + * method to resize the table if appropriate. + * + * Subclass overrides this to alter the behavior of put method. + */ + void addEntry(int hash, String key, V value, int bucketIndex) { + TEntry e = table.get(bucketIndex); + TEntry n = factory.create(); + n.setKey(key); + n.setHash(hash); + n.setValue(value); + n.setNext(e); + table.set(bucketIndex, n); + if (size.incrementAndGet() >= threshold) { + synchronized(this) { + if(size.get() >= threshold) + resize(2 * capacity); + } + } + } + + /** + * Like addEntry except that this version is used when creating entries + * as part of Map construction or "pseudo-construction" (cloning, + * deserialization). This version needn't worry about resizing the table. + * + * Subclass overrides this to alter the behavior of HashMap(Map), + * clone, and readObject. + */ + void createEntry(int hash, String key, V value, int bucketIndex) { + TEntry e = table.get(bucketIndex); + TEntry n = factory.create(); + n.setKey(key); + n.setHash(hash); + n.setValue(value); + n.setNext(e); + table.set(bucketIndex, n); + size.incrementAndGet(); + } + + private abstract class HashIterator implements Iterator { + TEntry next; // next entry to return + int expectedModCount; // For fast-fail + int index; // current slot + TEntry current; // current entry + + HashIterator() { + expectedModCount = modCount; + dstm2.AtomicArray t = table; + int i = capacity; + TEntry n = null; + if (size.get() != 0) { // advance to first entry + while (i > 0 && (n = t.get(--i)) == null) + ; + } + next = n; + index = i; + } + + public boolean hasNext() { + return next != null; + } + + TEntry nextEntry() { + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + TEntry e = next; + if (e == null) + throw new NoSuchElementException(); + + TEntry n = e.getNext(); + dstm2.AtomicArray t = table; + int i = index; + while (n == null && i > 0) + n = t.get(--i); + index = i; + next = n; + return current = e; + } + + public void remove() { + if (current == null) + throw new IllegalStateException(); + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + String k = current.getKey(); + current = null; + StringKeyHashMap.this.removeEntryForKey(k); + expectedModCount = modCount; + } + + } + + private class ValueIterator extends HashIterator { + public V next() { + return nextEntry().getValue(); + } + } + + private class KeyIterator extends HashIterator { + public Integer next() { + return nextEntry().getHash(); + } + } + +// private class EntryIterator extends HashIterator> { +// public Map.Entry next() { +// return nextEntry(); +// } +// } + + // Subclass overrides these to alter behavior of views' iterator() method + public Iterator newKeyIterator() { + return new KeyIterator(); + } + public Iterator newValueIterator() { + return new ValueIterator(); + } +// Iterator> newEntryIterator() { +// return new EntryIterator(); +// } + + + // Views + + private transient Set> entrySet = null; + + + + private class KeySet extends AbstractSet { + public Iterator iterator() { + return newKeyIterator(); + } + public int size() { + return size.get(); + } + public boolean contains(String o) { + return containsKey(o); + } + public boolean remove(String o) { + return StringKeyHashMap.this.removeEntryForKey(o) != null; + } + public void clear() { + StringKeyHashMap.this.clear(); + } + } + + + + /** + * Returns a collection view of the mappings contained in this map. Each + * element in the returned collection is a Map.Entry. The + * collection is backed by the map, so changes to the map are reflected in + * the collection, and vice-versa. The collection supports element + * removal, which removes the corresponding mapping from the map, via the + * Iterator.remove, Collection.remove, + * removeAll, retainAll, and clear operations. + * It does not support the add or addAll operations. + * + * @return a collection view of the mappings contained in this map. + * @see Map.Entry + */ + public Set> entrySet() { + Set> es = entrySet; + return (es != null ? es : (entrySet = (Set>) (Set) new EntrySet())); + } + + private class EntrySet {//extends AbstractSet/*>*/ { +// public Iterator/*>*/ iterator() { +// return newEntryIterator(); +// } + public boolean contains(StringKeyHashMap.TEntry o) { + StringKeyHashMap.TEntry e = (StringKeyHashMap.TEntry) o; + TEntry candidate = getEntry(e.getHash()); + return candidate != null && candidate.equals(e); + } + public boolean remove(StringKeyHashMap.TEntry o) { + return removeMapping(o) != null; + } + public int size() { + return size.get(); + } + public void clear() { + StringKeyHashMap.this.clear(); + } + } + + /** + * Save the state of the HashMap instance to a stream (i.e., + * serialize it). + * + * @serialData The capacity of the HashMap (the length of the + * bucket array) is emitted (int), followed by the + * size of the HashMap (the number of key-value + * mappings), followed by the key (Object) and value (Object) + * for each key-value mapping represented by the HashMap + * The key-value mappings are emitted in the order that they + * are returned by entrySet().iterator(). + * + */ + private void writeObject(java.io.ObjectOutputStream s) + throws IOException + { + Iterator> i = entrySet().iterator(); + + // Write out the threshold, loadfactor, and any hidden stuff + s.defaultWriteObject(); + + // Write out number of buckets + s.writeInt(capacity); + + // Write out size (number of Mappings) + s.writeInt(size.get()); + + // Write out keys and values (alternating) + while (i.hasNext()) { + StringKeyHashMap.TEntry e = i.next(); + s.writeObject(e.getHash()); + s.writeObject(e.getValue()); + } + } + + private static final long serialVersionUID = 362498820763181265L; + + /** + * Reconstitute the HashMap instance from a stream (i.e., + * deserialize it). + */ + /* private void readObject(java.io.ObjectInputStream s) + throws IOException, ClassNotFoundException + { + // Read in the threshold, loadfactor, and any hidden stuff + s.defaultReadObject(); + + // Read in number of buckets and allocate the bucket array; + int numBuckets = s.readInt(); + table = new dstm2.AtomicArray(TEntry.class, numBuckets); + + init(); // Give subclass a chance to do its thing. + + // Read in size (number of Mappings) + int size = s.readInt(); + + // Read the keys and values, and put the mappings in the HashMap + for (int i=0; i iterator() { + return new Iterator() { + int tableIndex = 0; + public TEntry cursor = table.get(tableIndex); + public boolean hasNext() { + return cursor != null; + } + public V next() { + TEntry node = cursor; + cursor = cursor.getNext(); + while(cursor==null) { + tableIndex++; + if(tableIndex < capacity) { + cursor = table.get(tableIndex); + } + } + return node.getValue(); + } + public void remove() { + throw new UnsupportedOperationException(); + } + + }; + } +} + diff --git a/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/DualFileTable.java b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/DualFileTable.java index 61dbecf0..8bfbe140 100644 --- a/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/DualFileTable.java +++ b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/DualFileTable.java @@ -35,7 +35,6 @@ import java.io.*; import java.util.*; import java.nio.channels.*; import com.solidosystems.tuplesoup.filter.*; -import dstm2.atomic; /** * The table stores a group of rows. @@ -70,34 +69,8 @@ public class DualFileTable implements Table{ private int indexcacheusage; private Hashtable indexcache; - - DualFileTableTSInf atomicfields; // Statistic counters - public @atomic interface DualFileTableTSInf{ - long getstat_add(); - long getstat_update(); - long getstat_delete(); - long getstat_add_size(); - long getstat_update_size(); - long getstat_read_size(); - long getstat_read(); - long getstat_cache_hit(); - long getstat_cache_miss(); - long getstat_cache_drop(); - - void setstat_add(long val); - void setstat_update(long val); - void setstat_delete(long val); - void setstat_add_size(long val); - void setstat_update_size(long val); - void setstat_read_size(long val); - void setstat_read(long val); - void setstat_cache_hit(long val); - void setstat_cache_miss(long val); - void setstat_cache_drop(long val); - } - - /*long stat_add=0; + long stat_add=0; long stat_update=0; long stat_delete=0; long stat_add_size=0; @@ -106,7 +79,7 @@ public class DualFileTable implements Table{ long stat_read=0; long stat_cache_hit=0; long stat_cache_miss=0; - long stat_cache_drop=0;*/ + long stat_cache_drop=0; protected String statlock="stat-dummy"; @@ -130,26 +103,26 @@ public class DualFileTable implements Table{ public Hashtable readStatistics(){ Hashtable hash=new Hashtable(); synchronized(statlock){ - hash.put("stat_table_add",atomicfields.getstat_add()); - hash.put("stat_table_update",atomicfields.getstat_update()); - hash.put("stat_table_delete",atomicfields.getstat_delete()); - hash.put("stat_table_add_size",atomicfields.getstat_add_size()); - hash.put("stat_table_update_size",atomicfields.getstat_update_size()); - hash.put("stat_table_read_size",atomicfields.getstat_read_size()); - hash.put("stat_table_read",atomicfields.getstat_read()); - hash.put("stat_table_cache_hit",atomicfields.getstat_cache_hit()); - hash.put("stat_table_cache_miss",atomicfields.getstat_cache_miss()); - hash.put("stat_table_cache_drop",atomicfields.getstat_cache_drop()); - atomicfields.setstat_add(0); - atomicfields.setstat_update(0); - atomicfields.setstat_delete(0); - atomicfields.setstat_add_size(0); - atomicfields.setstat_update_size(0); - atomicfields.setstat_read_size(0); - atomicfields.setstat_read(0); - atomicfields.setstat_cache_hit(0); - atomicfields.setstat_cache_miss(0); - atomicfields.setstat_cache_drop(0); + hash.put("stat_table_add",stat_add); + hash.put("stat_table_update",stat_update); + hash.put("stat_table_delete",stat_delete); + hash.put("stat_table_add_size",stat_add_size); + hash.put("stat_table_update_size",stat_update_size); + hash.put("stat_table_read_size",stat_read_size); + hash.put("stat_table_read",stat_read); + hash.put("stat_table_cache_hit",stat_cache_hit); + hash.put("stat_table_cache_miss",stat_cache_miss); + hash.put("stat_table_cache_drop",stat_cache_drop); + stat_add=0; + stat_update=0; + stat_delete=0; + stat_add_size=0; + stat_update_size=0; + stat_read_size=0; + stat_read=0; + stat_cache_hit=0; + stat_cache_miss=0; + stat_cache_drop=0; Hashtable ihash=index.readStatistics(); hash.putAll(ihash); } @@ -279,7 +252,7 @@ public class DualFileTable implements Table{ indexcache.remove(node.getData().getId()); indexcacheusage--; synchronized(statlock){ - atomicfields.setstat_cache_drop(atomicfields.getstat_cache_drop()+1); + stat_cache_drop++; } indexcachefirst=node.getNext(); if(indexcachefirst==null){ @@ -308,12 +281,10 @@ public class DualFileTable implements Table{ row.writeToStream(fileastream); int post=fileastream.size(); fileastream.flush(); - synchronized(statlock){ - atomicfields.setstat_add(atomicfields.getstat_add()+1); - atomicfields.setstat_add_size(atomicfields.getstat_add_size()+row.getSize()); - } - + stat_add++; + stat_add_size+=row.getSize(); + } index.addEntry(row.getId(),row.getSize(),FILEA,fileaposition); if(INDEXCACHESIZE>0){ TableIndexEntry entry=new TableIndexEntry(row.getId(),row.getSize(),FILEA,fileaposition); @@ -330,8 +301,8 @@ public class DualFileTable implements Table{ int post=filebstream.size(); filebstream.flush(); synchronized(statlock){ - atomicfields.setstat_add(atomicfields.getstat_add()+1); - atomicfields.setstat_add_size(atomicfields.getstat_add_size()+row.getSize()); + stat_add++; + stat_add_size+=row.getSize(); } index.addEntry(row.getId(),row.getSize(),FILEB,filebposition); if(INDEXCACHESIZE>0){ @@ -386,7 +357,7 @@ public class DualFileTable implements Table{ } indexcacheusage--; synchronized(statlock){ - atomicfields.setstat_cache_drop(atomicfields.getstat_cache_drop()+1); + stat_cache_drop++; } } } @@ -407,13 +378,13 @@ public class DualFileTable implements Table{ indexcachelast=node; } synchronized(statlock){ - atomicfields.setstat_cache_hit(atomicfields.getstat_cache_hit()+1); + stat_cache_hit++; } return node.getData(); } } synchronized(statlock){ - atomicfields.setstat_cache_miss(atomicfields.getstat_cache_miss()+1); + stat_cache_miss++; } return null; } @@ -483,8 +454,8 @@ public class DualFileTable implements Table{ rowswitch=!rowswitch; } synchronized(statlock){ - atomicfields.setstat_update(atomicfields.getstat_update()+1); - atomicfields.setstat_update_size(atomicfields.getstat_update_size()+row.getSize()); + stat_update++; + stat_update_size+=row.getSize(); } } @@ -533,7 +504,7 @@ public class DualFileTable implements Table{ } index.updateEntry(row.getId(),row.getSize(),DELETE,0); synchronized(statlock){ - atomicfields.setstat_delete(atomicfields.getstat_delete()+1); + stat_delete++; } } @@ -601,8 +572,8 @@ public class DualFileTable implements Table{ Row row=Row.readFromStream(data); data.close(); synchronized(statlock){ - atomicfields.setstat_read(atomicfields.getstat_read()+1); - atomicfields.setstat_read_size(atomicfields.getstat_read_size()+row.getSize()); + stat_read++; + stat_read_size+=row.getSize(); } return row; } diff --git a/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/DualFileTableTransactional.java b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/DualFileTableTransactional.java new file mode 100644 index 00000000..d506fa38 --- /dev/null +++ b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/DualFileTableTransactional.java @@ -0,0 +1,617 @@ +/* + * Copyright (c) 2007, Solido Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of Solido Systems nor the names of its contributors may be + * used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +package com.solidosystems.tuplesoup.core; + + +import java.io.*; +import java.util.*; +import java.nio.channels.*; +import com.solidosystems.tuplesoup.filter.*; +import dstm2.atomic; +import dstm2.util.StringKeyHashMap; + +/** + * The table stores a group of rows. + * Every row must have a unique id within a table. + */ +public class DualFileTableTransactional implements TableTransactional{ + + + private int INDEXCACHESIZE=8192; + + private String filealock="filea-dummy"; + private String fileblock="fileb-dummy"; + + private DataOutputStream fileastream=null; + private DataOutputStream filebstream=null; + private RandomAccessFile filearandom=null; + private RandomAccessFile filebrandom=null; + FileChannel fca=null; + FileChannel fcb=null; + private TableIndexTransactional index=null; + + private long fileaposition=0; + private long filebposition=0; + + private boolean rowswitch=true; + + private String title; + private String location; + + private TableIndexNodeTransactional indexcachefirst; + private TableIndexNodeTransactional indexcachelast; + private int indexcacheusage; + + private StringKeyHashMap indexcache; + //private Hashtable indexcache; + + + DualFileTableTSInf atomicfields; + // Statistic counters + public @atomic interface DualFileTableTSInf{ + long getstat_add(); + long getstat_update(); + long getstat_delete(); + long getstat_add_size(); + long getstat_update_size(); + long getstat_read_size(); + long getstat_read(); + long getstat_cache_hit(); + long getstat_cache_miss(); + long getstat_cache_drop(); + + void setstat_add(long val); + void setstat_update(long val); + void setstat_delete(long val); + void setstat_add_size(long val); + void setstat_update_size(long val); + void setstat_read_size(long val); + void setstat_read(long val); + void setstat_cache_hit(long val); + void setstat_cache_miss(long val); + void setstat_cache_drop(long val); + } + + /*long stat_add=0; + long stat_update=0; + long stat_delete=0; + long stat_add_size=0; + long stat_update_size=0; + long stat_read_size=0; + long stat_read=0; + long stat_cache_hit=0; + long stat_cache_miss=0; + long stat_cache_drop=0;*/ + + protected String statlock="stat-dummy"; + + /** + * Return the current values of the statistic counters and reset them. + * The current counters are: + *
    + *
  • stat_table_add + *
  • stat_table_update + *
  • stat_table_delete + *
  • stat_table_add_size + *
  • stat_table_update_size + *
  • stat_table_read_size + *
  • stat_table_read + *
  • stat_table_cache_hit + *
  • stat_table_cache_miss + *
  • stat_table_cache_drop + *
+ * Furthermore, the index will be asked to deliver separate index specific counters + */ + public Hashtable readStatistics(){ + Hashtable hash=new Hashtable(); + synchronized(statlock){ + hash.put("stat_table_add",atomicfields.getstat_add()); + hash.put("stat_table_update",atomicfields.getstat_update()); + hash.put("stat_table_delete",atomicfields.getstat_delete()); + hash.put("stat_table_add_size",atomicfields.getstat_add_size()); + hash.put("stat_table_update_size",atomicfields.getstat_update_size()); + hash.put("stat_table_read_size",atomicfields.getstat_read_size()); + hash.put("stat_table_read",atomicfields.getstat_read()); + hash.put("stat_table_cache_hit",atomicfields.getstat_cache_hit()); + hash.put("stat_table_cache_miss",atomicfields.getstat_cache_miss()); + hash.put("stat_table_cache_drop",atomicfields.getstat_cache_drop()); + atomicfields.setstat_add(0); + atomicfields.setstat_update(0); + atomicfields.setstat_delete(0); + atomicfields.setstat_add_size(0); + atomicfields.setstat_update_size(0); + atomicfields.setstat_read_size(0); + atomicfields.setstat_read(0); + atomicfields.setstat_cache_hit(0); + atomicfields.setstat_cache_miss(0); + atomicfields.setstat_cache_drop(0); + Hashtable ihash=index.readStatistics(); + hash.putAll(ihash); + } + return hash; + } + + /** + * Create a new table object with the default flat index model + */ + + + /** + * Create a new table object with a specific index model + */ + public DualFileTableTransactional(String title,String location, int indextype) throws IOException{ + this.title=title; + this.location=location; + if(!this.location.endsWith(File.separator))this.location+=File.separator; + switch(indextype){ + case PAGED : index=new PagedIndexTransactional(getFileName(INDEX)); + break; + + } + indexcachefirst=null; + indexcachelast=null; + indexcacheusage=0; + indexcache=new StringKeyHashMap(); + } + + /** + * Set the maximal allowable size of the index cache. + */ + public void setIndexCacheSize(int newsize){ + INDEXCACHESIZE=newsize; + } + + /** + * Close all open file streams + */ + public void close(){ + try{ + if(fileastream!=null)fileastream.close(); + if(filebstream!=null)filebstream.close(); + if(filearandom!=null)filearandom.close(); + if(filebrandom!=null)filebrandom.close(); + index.close(); + }catch(Exception e){} + } + + /** + * Returns the name of this table + */ + public String getTitle(){ + return title; + } + + /** + * Returns the location of this tables datafiles + */ + public String getLocation(){ + return location; + } + + protected String getFileName(int type){ + switch(type){ + case FILEB : return location+title+".a"; + case FILEA : return location+title+".b"; + case INDEX : return location+title+".index"; + } + return null; + } + + /** + * Delete the files created by this table object. + * Be aware that this will delete any data stored in this table! + */ + public void deleteFiles(){ + try{ + File ftest=new File(getFileName(FILEA)); + ftest.delete(); + }catch(Exception e){} + try{ + File ftest=new File(getFileName(FILEB)); + ftest.delete(); + }catch(Exception e){} + try{ + File ftest=new File(getFileName(INDEX)); + ftest.delete(); + }catch(Exception e){} + } + + private synchronized void openFile(int type) throws IOException{ + switch(type){ + case FILEA : if(fileastream==null){ + fileastream=new DataOutputStream(new BufferedOutputStream(new FileOutputStream(getFileName(FILEA),true))); + File ftest=new File(getFileName(FILEA)); + fileaposition=ftest.length(); + } + break; + case FILEB : if(filebstream==null){ + filebstream=new DataOutputStream(new BufferedOutputStream(new FileOutputStream(getFileName(FILEB),true))); + File ftest=new File(getFileName(FILEB)); + filebposition=ftest.length(); + } + break; + } + } + + /** + * Adds a row of data to this table. + */ + public void addRow(RowTransactional row) throws IOException{ + // Distribute new rows between the two datafiles by using the rowswitch, but don't spend time synchronizing... this does not need to be acurate! + if(rowswitch){ + addRowA(row); + }else{ + addRowB(row); + } + rowswitch=!rowswitch; + } + + private void addCacheEntry(TableIndexEntryTransactional entry){ + synchronized(indexcache){ + if(indexcacheusage>INDEXCACHESIZE){ + // remove first entry + TableIndexNodeTransactional node=indexcachefirst; + indexcache.remove(node.getData().getId()); + indexcacheusage--; + synchronized(statlock){ + atomicfields.setstat_cache_drop(atomicfields.getstat_cache_drop()+1); + } + indexcachefirst=node.getNext(); + if(indexcachefirst==null){ + indexcachelast=null; + }else{ + indexcachefirst.setPrevious(null); + } + } + TableIndexNodeTransactional node=new TableIndexNodeTransactional(indexcachelast,entry); + if(indexcachelast!=null){ + indexcachelast.setNext(node); + } + if(indexcachefirst==null){ + indexcachefirst=node; + } + indexcachelast=node; + indexcache.put(entry.getId(),node); + indexcacheusage++; + } + } + + private void addRowA(RowTransactional row) throws IOException{ + synchronized(filealock){ + openFile(FILEA); + int pre=fileastream.size(); + row.writeToStream(fileastream); + int post=fileastream.size(); + fileastream.flush(); + + synchronized(statlock){ + atomicfields.setstat_add(atomicfields.getstat_add()+1); + atomicfields.setstat_add_size(atomicfields.getstat_add_size()+row.getSize()); + } + + index.addEntry(row.getId(),row.getSize(),FILEA,fileaposition); + if(INDEXCACHESIZE>0){ + TableIndexEntryTransactional entry=new TableIndexEntryTransactional(row.getId(),row.getSize(),FILEA,fileaposition); + addCacheEntry(entry); + } + fileaposition+=Row.calcSize(pre,post); + } + } + private void addRowB(RowTransactional row) throws IOException{ + synchronized(fileblock){ + openFile(FILEB); + int pre=filebstream.size(); + row.writeToStream(filebstream); + int post=filebstream.size(); + filebstream.flush(); + synchronized(statlock){ + atomicfields.setstat_add(atomicfields.getstat_add()+1); + atomicfields.setstat_add_size(atomicfields.getstat_add_size()+row.getSize()); + } + index.addEntry(row.getId(),row.getSize(),FILEB,filebposition); + if(INDEXCACHESIZE>0){ + TableIndexEntryTransactional entry=new TableIndexEntryTransactional(row.getId(),row.getSize(),FILEB,filebposition); + addCacheEntry(entry); + } + filebposition+=RowTransactional.calcSize(pre,post); + } + } + + + private void updateCacheEntry(TableIndexEntryTransactional entry){ + synchronized(indexcache){ + if(indexcache.containsKey(entry.getId())){ + TableIndexNodeTransactional node=indexcache.get(entry.getId()); + node.setData(entry); + if(node!=indexcachelast){ + if(node==indexcachefirst){ + indexcachefirst=node.getNext(); + } + node.remove(); + indexcachelast.setNext(node); + node.setPrevious(indexcachelast); + node.setNext(null); + indexcachelast=node; + } + }else{ + addCacheEntry(entry); + } + } + } + + private void removeCacheEntry(String id){ + synchronized(indexcache){ + if(indexcache.containsKey(id)){ + TableIndexNodeTransactional node=indexcache.get(id); + indexcache.remove(id); + if(indexcacheusage==1){ + indexcachefirst=null; + indexcachelast=null; + indexcacheusage=0; + return; + } + if(node==indexcachefirst){ + indexcachefirst=node.getNext(); + indexcachefirst.setPrevious(null); + }else if(node==indexcachelast){ + indexcachelast=node.getPrevious(); + indexcachelast.setNext(null); + }else{ + node.remove(); + } + indexcacheusage--; + synchronized(statlock){ + atomicfields.setstat_cache_drop(atomicfields.getstat_cache_drop()+1); + } + } + } + } + + private TableIndexEntryTransactional getCacheEntry(String id){ + synchronized(indexcache){ + if(indexcache.containsKey(id)){ + TableIndexNodeTransactional node=indexcache.get(id); + if(node!=indexcachelast){ + if(node==indexcachefirst){ + indexcachefirst=node.getNext(); + } + node.remove(); + indexcachelast.setNext(node); + node.setPrevious(indexcachelast); + node.setNext(null); + indexcachelast=node; + } + synchronized(statlock){ + atomicfields.setstat_cache_hit(atomicfields.getstat_cache_hit()+1); + } + return node.getData(); + } + } + synchronized(statlock){ + atomicfields.setstat_cache_miss(atomicfields.getstat_cache_miss()+1); + } + return null; + } + + /** + * Adds a row to this table if it doesn't already exist, if it does it updates the row instead. + * This method is much slower than directly using add or update, so only use it if you don't know wether or not the row already exists. + */ + public void addOrUpdateRow(RowTransactional row) throws IOException{ + RowTransactional tmprow=getRow(row.getId()); + if(tmprow==null){ + addRow(row); + }else{ + updateRow(row); + } + } + + /** + * Updates a row stored in this table. + */ + public void updateRow(RowTransactional row) throws IOException{ + TableIndexEntryTransactional entry=null; + // Handle index entry caching + if(INDEXCACHESIZE>0){ + synchronized(indexcache){ + entry=getCacheEntry(row.getId()); + if(entry==null){ + entry=index.scanIndex(row.getId()); + addCacheEntry(entry); + } + } + }else{ + entry=index.scanIndex(row.getId()); + } + if(entry.getRowSize()>=row.getSize()){ + // Add to the existing location + switch(entry.getLocation()){ + case FILEA :synchronized(filealock){ + if(filearandom==null){ + filearandom=new RandomAccessFile(getFileName(FILEA),"rw"); + fca=filearandom.getChannel(); + } + filearandom.seek(entry.getPosition()); + row.writeToFile(filearandom); + + fca.force(false); + } + break; + case FILEB :synchronized(fileblock){ + if(filebrandom==null){ + filebrandom=new RandomAccessFile(getFileName(FILEB),"rw"); + fcb=filebrandom.getChannel(); + } + filebrandom.seek(entry.getPosition()); + row.writeToFile(filebrandom); + + fcb.force(false); + } + break; + } + }else{ + if(rowswitch){ + updateRowA(row); + }else{ + updateRowB(row); + } + rowswitch=!rowswitch; + } + synchronized(statlock){ + atomicfields.setstat_update(atomicfields.getstat_update()+1); + atomicfields.setstat_update_size(atomicfields.getstat_update_size()+row.getSize()); + } + } + + private void updateRowA(RowTransactional row) throws IOException{ + synchronized(filealock){ + openFile(FILEA); + int pre=fileastream.size(); + row.writeToStream(fileastream); + int post=fileastream.size(); + fileastream.flush(); + index.updateEntry(row.getId(),row.getSize(),FILEA,fileaposition); + + // Handle index entry caching + if(INDEXCACHESIZE>0){ + updateCacheEntry(new TableIndexEntryTransactional(row.getId(),row.getSize(),FILEA,fileaposition)); + } + fileaposition+=Row.calcSize(pre,post); + } + } + + private void updateRowB(RowTransactional row) throws IOException{ + synchronized(fileblock){ + openFile(FILEB); + int pre=filebstream.size(); + row.writeToStream(filebstream); + int post=filebstream.size(); + filebstream.flush(); + index.updateEntry(row.getId(),row.getSize(),FILEB,filebposition); + // Handle index entry caching + // Handle index entry caching + if(INDEXCACHESIZE>0){ + updateCacheEntry(new TableIndexEntryTransactional(row.getId(),row.getSize(),FILEB,filebposition)); + } + filebposition+=Row.calcSize(pre,post); + } + } + + /** + * Marks a row as deleted in the index. + * Be aware that the space consumed by the row is not actually reclaimed. + */ + public void deleteRow(RowTransactional row) throws IOException{ + // Handle index entry caching + if(INDEXCACHESIZE>0){ + removeCacheEntry(row.getId()); + } + index.updateEntry(row.getId(),row.getSize(),DELETE,0); + synchronized(statlock){ + atomicfields.setstat_delete(atomicfields.getstat_delete()+1); + } + } + + /** + * Returns a tuplestream containing the given list of rows + */ + public TupleStreamTransactional getRows(List rows) throws IOException{ + return new IndexedTableReaderTransactional(this,index.scanIndex(rows)); + } + + /** + * Returns a tuplestream containing the rows matching the given rowmatcher + */ + public TupleStreamTransactional getRows(RowMatcherTransactional matcher) throws IOException{ + return new IndexedTableReaderTransactional(this,index.scanIndex(),matcher); + } + + /** + * Returns a tuplestream containing those rows in the given list that matches the given RowMatcher + */ + public TupleStreamTransactional getRows(List rows,RowMatcherTransactional matcher) throws IOException{ + return new IndexedTableReaderTransactional(this,index.scanIndex(rows),matcher); + } + + /** + * Returns a tuplestream of all rows in this table. + */ + public TupleStreamTransactional getRows() throws IOException{ + // return new TableReader(this); + return new IndexedTableReaderTransactional(this,index.scanIndex()); + } + + /** + * Returns a single row stored in this table. + * If the row does not exist in the table, null will be returned. + */ + public RowTransactional getRow(String id) throws IOException{ + TableIndexEntryTransactional entry=null; + // Handle index entry caching + if(INDEXCACHESIZE>0){ + synchronized(indexcache){ + entry=getCacheEntry(id); + if(entry==null){ + entry=index.scanIndex(id); + if(entry!=null){ + addCacheEntry(entry); + } + } + } + }else{ + entry=index.scanIndex(id); + } + if(entry!=null){ + long dataoffset=0; + DataInputStream data=null; + if(entry.getLocation()==Table.FILEA){ + data=new DataInputStream(new BufferedInputStream(new FileInputStream(getFileName(Table.FILEA)))); + }else if(entry.getLocation()==Table.FILEB){ + data=new DataInputStream(new BufferedInputStream(new FileInputStream(getFileName(Table.FILEB)))); + } + if(data!=null){ + while(dataoffset!=entry.getPosition()){ + dataoffset+=data.skipBytes((int)(entry.getPosition()-dataoffset)); + } + RowTransactional row=RowTransactional.readFromStream(data); + data.close(); + synchronized(statlock){ + atomicfields.setstat_read(atomicfields.getstat_read()+1); + atomicfields.setstat_read_size(atomicfields.getstat_read_size()+row.getSize()); + } + return row; + } + + } + return null; + } + } \ No newline at end of file diff --git a/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/IndexedTableReaderTransactional.java b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/IndexedTableReaderTransactional.java new file mode 100644 index 00000000..2a038807 --- /dev/null +++ b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/IndexedTableReaderTransactional.java @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2007, Solido Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of Solido Systems nor the names of its contributors may be + * used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + package com.solidosystems.tuplesoup.core; + + import com.solidosystems.tuplesoup.filter.*; + import java.io.*; + import java.util.*; + +public class IndexedTableReaderTransactional extends TupleStreamTransactional{ + private DataInputStream fileastream=null; + private DataInputStream filebstream=null; + private long fileaposition=0; + private long filebposition=0; + + private Listfileaentries; + private Listfilebentries; + + private Listentries; + + private Hashtablefileabuffer; + private Hashtablefilebbuffer; + + private Listrows; + private int rowpointer; + private RowTransactional next=null; + + private DualFileTableTransactional table; + + private RowMatcherTransactional matcher=null; + + public IndexedTableReaderTransactional(DualFileTableTransactional table,Listentries) throws IOException{ + this.table=table; + this.rows=rows; + rowpointer=0; + + this.entries=entries; + fileaentries=new ArrayList(); + filebentries=new ArrayList(); + + Iterator it=entries.iterator(); + while(it.hasNext()){ + TableIndexEntryTransactional entry=it.next(); + // TODO: we really shouldn't get nulls here + if(entry!=null){ + if(entry.getLocation()==Table.FILEA){ + fileaentries.add(entry); + }else if(entry.getLocation()==Table.FILEB){ + filebentries.add(entry); + } + } + } + + Collections.sort(fileaentries); + Collections.sort(filebentries); + + fileabuffer=new Hashtable(); + filebbuffer=new Hashtable(); + + readNext(); + } + + + public IndexedTableReaderTransactional(DualFileTableTransactional table,Listentries,RowMatcherTransactional matcher) throws IOException{ + this.table=table; + this.rows=rows; + rowpointer=0; + this.matcher=matcher; + + this.entries=entries; + fileaentries=new ArrayList(); + filebentries=new ArrayList(); + + Iterator it=entries.iterator(); + while(it.hasNext()){ + TableIndexEntryTransactional entry=it.next(); + // TODO: we really shouldn't get nulls here + if(entry!=null){ + if(entry.getLocation()==Table.FILEA){ + fileaentries.add(entry); + }else if(entry.getLocation()==Table.FILEB){ + filebentries.add(entry); + } + } + } + + Collections.sort(fileaentries); + Collections.sort(filebentries); + + fileabuffer=new Hashtable(); + filebbuffer=new Hashtable(); + + readNext(); + } + + private void readNextFromFileA(TableIndexEntryTransactional entry) throws IOException{ + if(fileabuffer.containsKey(entry.getId())){ + next=fileabuffer.remove(entry.getId()); + return; + } + while(true){ + if(fileaentries.size()>0){ + TableIndexEntryTransactional nextfilea=fileaentries.remove(0); + if(fileastream==null){ + fileastream=new DataInputStream(new BufferedInputStream(new FileInputStream(table.getFileName(Table.FILEA)))); + fileaposition=0; + } + if(fileaposition>nextfilea.getPosition()){ + // We have already read this entry... skip it + // readNextFromFileA(entry); + // return; + }else{ + while(fileaposition!=nextfilea.getPosition()){ + fileaposition+=fileastream.skipBytes((int)(nextfilea.getPosition()-fileaposition)); + } + RowTransactional row=RowTransactional.readFromStream(fileastream); + synchronized(table.statlock){ + table.atomicfields.setstat_read_size(table.atomicfields.getstat_read_size()+row.getSize()); + table.atomicfields.setstat_read(table.atomicfields.getstat_read()+1); + } + fileaposition+=row.getSize(); + if(row.getId().equals(entry.getId())){ + next=row; + return; + }else{ + fileabuffer.put(row.getId(),row); + // readNextFromFileA(entry); + } + } + }else{ + next=null; + return; + } + } + } + + private void readNextFromFileB(TableIndexEntryTransactional entry) throws IOException{ + if(filebbuffer.containsKey(entry.getId())){ + next=filebbuffer.remove(entry.getId()); + return; + } + while(true){ + if(filebentries.size()>0){ + TableIndexEntryTransactional nextfileb=filebentries.remove(0); + if(filebstream==null){ + filebstream=new DataInputStream(new BufferedInputStream(new FileInputStream(table.getFileName(Table.FILEB)))); + filebposition=0; + } + if(filebposition>nextfileb.getPosition()){ + // We have already read this entry... skip it + // readNextFromFileB(entry); + // return; + }else{ + while(filebposition!=nextfileb.getPosition()){ + filebposition+=filebstream.skipBytes((int)(nextfileb.getPosition()-filebposition)); + } + RowTransactional row=RowTransactional.readFromStream(filebstream); + synchronized(table.statlock){ + table.atomicfields.setstat_read_size(table.atomicfields.getstat_read_size()+row.getSize()); + table.atomicfields.setstat_read(table.atomicfields.getstat_read()+1); + } + filebposition+=row.getSize(); + if(row.getId().equals(entry.getId())){ + next=row; + return; + }else{ + filebbuffer.put(row.getId(),row); + // readNextFromFileB(entry); + } + } + }else{ + next=null; + return; + } + } + } + + private void readNext() throws IOException{ + if(entries.size()>rowpointer){ + TableIndexEntryTransactional entry=entries.get(rowpointer++); + if(entry!=null){ + switch(entry.getLocation()){ + case Table.FILEA : readNextFromFileA(entry); + // return; + break; + case Table.FILEB : readNextFromFileB(entry); + // return; + break; + } + if(next!=null){ + if(matcher!=null){ + if(!matcher.matches(next)){ + readNext(); + } + } + } + return; + }else{ + readNext(); + return; + } + } + try{ + if(fileastream!=null)fileastream.close(); + }catch(Exception e){} + try{ + if(filebstream!=null)filebstream.close(); + }catch(Exception e){} + next=null; + } + + public boolean hasNext(){ + if(next!=null)return true; + return false; + } + + public RowTransactional next(){ + try{ + if(next!=null){ + RowTransactional tmp=next; + readNext(); + return tmp; + } + }catch(Exception e){ + e.printStackTrace(); + } + return null; + } + + public void remove(){ + + } +} \ No newline at end of file diff --git a/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/PagedIndexTransactional.java b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/PagedIndexTransactional.java index 3fea9160..90602ed0 100644 --- a/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/PagedIndexTransactional.java +++ b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/PagedIndexTransactional.java @@ -5,16 +5,27 @@ package com.solidosystems.tuplesoup.core; +import TransactionalIO.core.TransactionalFile; import dstm2.AtomicArray; import dstm2.atomic; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Hashtable; +import java.util.List; /** * * @author navid */ -public class PagedIndexTransactional { +public class PagedIndexTransactional implements TableIndexTransactional{ - public @atomic interface PageIndexTSInf{ + PageIndexTSInf atomicfields; + + + + public @atomic interface PageIndexTSInf{ + String getFilename(); Long getStat_read(); Long getStat_write(); Long getStat_create_page(); @@ -22,6 +33,7 @@ public class PagedIndexTransactional { Long getStat_page_branch(); AtomicArray getRoots(); + void setFilename(String val); void setRoots(AtomicArray roots); void setStat_read(Long val); void setStat_write(Long val); @@ -30,6 +42,99 @@ public class PagedIndexTransactional { void setStat_page_branch(Long val); } + + private TransactionalFile out=null; protected static final int INITIALPAGEHASH=1024; protected static final int PAGESIZE=2048; + + public PagedIndexTransactional(String filename) throws IOException{ + this.atomicfields.setFilename(filename); + File ftest=new File(filename); + if(!ftest.exists())ftest.createNewFile(); + out=new TransactionalFile(filename,"rw"); + atomicfields.setRoots(new AtomicArray(TableIndexPageTransactional.class, INITIALPAGEHASH)); + + if(out.length()>0){ + for(int i=0;i readStatistics(){ + Hashtable hash=new Hashtable(); + hash.put("stat_index_read",atomicfields.getStat_read()); + hash.put("stat_index_write",atomicfields.getStat_write()); + hash.put("stat_index_create_page",atomicfields.getStat_create_page()); + hash.put("stat_index_page_next",atomicfields.getStat_page_next()); + hash.put("stat_index_page_branch",atomicfields.getStat_page_branch()); + atomicfields.setStat_read((long)0); + atomicfields.setStat_write((long)0); + atomicfields.setStat_create_page((long)0); + atomicfields.setStat_page_next((long)0); + atomicfields.setStat_page_branch((long)0); + return hash; + } + + private int rootHash(String id){ + return id.hashCode() & (INITIALPAGEHASH-1); + } + + private synchronized TableIndexPageTransactional getFirstFreePage(String id) throws IOException{ + return atomicfields.getRoots().get(rootHash(id)).getFirstFreePage(id, id.hashCode()); + } + + private synchronized long getOffset(String id) throws IOException{ + if(atomicfields.getRoots()==null)return -1; + return atomicfields.getRoots().get(rootHash(id)).getOffset(id,id.hashCode()); + } + + public synchronized void updateEntry(String id,int rowsize,int location,long position) throws IOException{ + long offset=getOffset(id); + out.seek(offset); + TableIndexEntryTransactional entry=new TableIndexEntryTransactional(id,rowsize,location,position); + entry.updateData(out); + atomicfields.setStat_write(atomicfields.getStat_write()+1); + } + public synchronized void addEntry(String id,int rowsize,int location,long position) throws IOException{ + TableIndexPageTransactional page=getFirstFreePage(id); + page.addEntry(id,rowsize,location,position); + atomicfields.setStat_write(atomicfields.getStat_write()+1); + } + public synchronized TableIndexEntryTransactional scanIndex(String id) throws IOException{ + if(atomicfields.getRoots()==null)return null; + return atomicfields.getRoots().get(rootHash(id)).scanIndex(id,id.hashCode()); + } + public synchronized List scanIndex(List rows) throws IOException{ + List lst=new ArrayList(); + for(int i=0;i scanIndex() throws IOException{ + ArrayList lst=new ArrayList(); + for(int i=0;i{ @@ -40,6 +41,8 @@ public class TableIndexEntry implements Comparable{ private int size; private int rowsize; + + public TableIndexEntry(String id,int rowsize,int location,long position){ this.id=id; this.location=location; diff --git a/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TableIndexEntryTransactional.java b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TableIndexEntryTransactional.java new file mode 100644 index 00000000..f9166e54 --- /dev/null +++ b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TableIndexEntryTransactional.java @@ -0,0 +1,232 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package com.solidosystems.tuplesoup.core; + +import TransactionalIO.core.TransactionalFile; +import dstm2.AtomicSuperClass; +import dstm2.atomic; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.EOFException; +import java.io.IOException; +import java.io.RandomAccessFile; + +/** + * + * @author navid + */ +public class TableIndexEntryTransactional implements AtomicSuperClass, Comparable{ + + TableIndexEntryTSInf atomicfields; + public @atomic interface TableIndexEntryTSInf{ + String getId(); + int getLocation(); + long getPosition(); + int getSize(); + int getRowsize(); + + void setId(String val); + void setLocation(int val); + void setPosition(long val); + void setSize(int val); + void setRowsize(int val); + } + + + public TableIndexEntryTransactional(String id,int rowsize,int location,long position){ + this.atomicfields.setId(id); + this.atomicfields.setLocation(location); + this.atomicfields.setPosition(position); + this.atomicfields.setRowsize(rowsize); + this.atomicfields.setSize(-1); + } + public String getId(){ + return atomicfields.getId(); + } + public void setPosition(long position){ + this.atomicfields.setPosition(position); + } + public long getPosition(){ + return atomicfields.getPosition(); + } + public void setLocation(int location){ + this.atomicfields.setLocation(location); + } + public int getLocation(){ + return atomicfields.getLocation(); + } + + public int getRowSize(){ + return atomicfields.getRowsize(); + } + + public int compareTo(TableIndexEntryTransactional obj) throws ClassCastException{ + TableIndexEntryTransactional ent=(TableIndexEntryTransactional)obj; + if(atomicfields.getPosition()0)starthash=file.readInt(); + this.atomicfields.setIndex(index); + this.atomicfields.setFirst(false); + this.atomicfields.setLocation(file.getFilePointer()); + this.atomicfields.setSize(file.readInt()); + this.atomicfields.setNext(file.readLong()); + this.atomicfields.setLower(file.readLong()); + this.atomicfields.setOffset(file.readInt()); + this.atomicfields.setEndhash(file.readInt()); + if(this.atomicfields.getOffset()>0) + this.atomicfields.setStarthash(file.readInt()); } - public static TableIndexPage createNewPage(PagedIndex index,RandomAccessFile file,int size) throws IOException{ + public static TableIndexPageTransactional createNewPage(PagedIndexTransactional index,TransactionalFile file,int size) throws IOException{ long pre=file.length(); - file.setLength(file.length()+size+BASEOFFSET); +// file.setLength(file.length()+size+BASEOFFSET); file.seek(pre); file.writeInt(size); file.writeLong(-1l); @@ -107,185 +96,185 @@ public class TableIndexPageTransactional implements AtomicSuperClass{ file.writeInt(0); file.writeInt(-1); file.seek(pre); - index.stat_create_page++; - return new TableIndexPage(index,file); + index.atomicfields.setStat_create_page(index.atomicfields.getStat_create_page()+1); + return new TableIndexPageTransactional(index,file); } public void setFirst(){ - first=true; + this.atomicfields.setFirst(true); } public long getLocation(){ - return location; + return atomicfields.getLocation(); } public long getEndLocation(){ - return location+size+BASEOFFSET; + return this.atomicfields.getLocation()+atomicfields.getSize()+BASEOFFSET; } public String toString(){ StringBuffer buf=new StringBuffer(); buf.append("{\n"); - buf.append(" location "+location+"\n"); - buf.append(" size "+size+"\n"); - buf.append(" next "+next+"\n"); - buf.append(" lower "+lower+"\n"); - buf.append(" offset "+offset+"\n"); - buf.append(" starthash "+starthash+"\n"); - buf.append(" endhash "+endhash+"\n"); + buf.append(" location "+this.atomicfields.getLocation()+"\n"); + buf.append(" size "+this.atomicfields.getSize()+"\n"); + buf.append(" next "+this.atomicfields.getNext()+"\n"); + buf.append(" lower "+this.atomicfields.getLower()+"\n"); + buf.append(" offset "+this.atomicfields.getOffset()+"\n"); + buf.append(" starthash "+this.atomicfields.getStarthash()+"\n"); + buf.append(" endhash "+this.atomicfields.getEndhash()+"\n"); buf.append("}\n"); return buf.toString(); } private void updateMeta() throws IOException{ - file.seek(location); - file.writeInt(size); - file.writeLong(next); - file.writeLong(lower); - file.writeInt(offset); - file.writeInt(endhash); + file.seek(this.atomicfields.getLocation()); + file.writeInt(this.atomicfields.getSize()); + file.writeLong(this.atomicfields.getNext()); + file.writeLong(this.atomicfields.getLower()); + file.writeInt(this.atomicfields.getOffset()); + file.writeInt(this.atomicfields.getEndhash()); } - public void addEntriesToList(List lst) throws IOException{ - if(lower>-1){ - if(lowerpage==null){ - file.seek(lower); - lowerpage=new TableIndexPage(index,file); + public void addEntriesToList(List lst) throws IOException{ + if(this.atomicfields.getLower()>-1){ + if(this.atomicfields.getLowerpage()==null){ + file.seek(this.atomicfields.getLower()); + this.atomicfields.setLowerpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file)); } - lowerpage.addEntriesToList(lst); + this.atomicfields.getLowerpage().addEntriesToList(lst); } - if(next>-1){ - if(nextpage==null){ - file.seek(next); - nextpage=new TableIndexPage(index,file); + if(this.atomicfields.getNext()>-1){ + if(this.atomicfields.getNextpage()==null){ + file.seek(this.atomicfields.getNext()); + this.atomicfields.setNextpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file)); } - nextpage.addEntriesToList(lst); + this.atomicfields.getNextpage().addEntriesToList(lst); } - file.seek(location+BASEOFFSET); + file.seek(this.atomicfields.getLocation()+BASEOFFSET); long pre=file.getFilePointer(); - while(file.getFilePointer()endhash){ - if(next==-1)return null; - if(nextpage==null){ - file.seek(next); - nextpage=new TableIndexPage(index,file); + if(hashcode>this.atomicfields.getEndhash()){ + if(this.atomicfields.getNext()==-1)return null; + if(this.atomicfields.getNextpage()==null){ + file.seek(this.atomicfields.getNext()); + this.atomicfields.setNextpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file)); } - index.stat_page_next++; - return nextpage.scanIndex(id,hashcode); + atomicfields.getIndex().atomicfields.setStat_page_next(atomicfields.getIndex().atomicfields.getStat_page_next()+1); + return this.atomicfields.getNextpage().scanIndex(id,hashcode); } - file.seek(location+BASEOFFSET); + file.seek(this.atomicfields.getLocation()+BASEOFFSET); long pre=file.getFilePointer(); - while(file.getFilePointer()endhash){ - if(next==-1)return -1; - if(nextpage==null){ - file.seek(next); - nextpage=new TableIndexPage(index,file); + if(hashcode>this.atomicfields.getEndhash()){ + if(this.atomicfields.getNext()==-1)return -1; + if(this.atomicfields.getNextpage()==null){ + file.seek(this.atomicfields.getNext()); + this.atomicfields.setNextpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file)); } - index.stat_page_next++; - return nextpage.getOffset(id,hashcode); + atomicfields.getIndex().atomicfields.setStat_page_next(atomicfields.getIndex().atomicfields.getStat_page_next()+1); + return this.atomicfields.getNextpage().getOffset(id,hashcode); } - file.seek(location+BASEOFFSET); + file.seek(this.atomicfields.getLocation()+BASEOFFSET); long pre=file.getFilePointer(); - while(file.getFilePointer()id.length()*2+4+4+8+1+2)return this; + if(this.atomicfields.getSize()-this.atomicfields.getOffset()>id.length()*2+4+4+8+1+2)return this; // Check next - if(next==-1){ - next=file.length(); + if(this.atomicfields.getNext()==-1){ + this.atomicfields.setNext(file.length()); updateMeta(); - return createNewPage(index,file,PagedIndex.PAGESIZE); + return createNewPage(this.atomicfields.getIndex(),file,PagedIndexTransactional.PAGESIZE); } - if(nextpage==null){ - file.seek(next); - nextpage=new TableIndexPage(index,file); + if(this.atomicfields.getNextpage()==null){ + file.seek(this.atomicfields.getNext()); + this.atomicfields.setNextpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file)); } - index.stat_page_next++; - return nextpage.getFirstFreePage(id,hashcode); + atomicfields.getIndex().atomicfields.setStat_page_next(atomicfields.getIndex().atomicfields.getStat_page_next()+1); + return this.atomicfields.getNextpage().getFirstFreePage(id,hashcode); } public void addEntry(String id,int rowsize,int location,long position) throws IOException{ - if(offset==0)starthash=id.hashCode(); - file.seek(this.location+BASEOFFSET+offset); - TableIndexEntry entry=new TableIndexEntry(id,rowsize,location,position); + if(atomicfields.getOffset()==0)this.atomicfields.setStarthash(id.hashCode()); + file.seek(this.atomicfields.getLocation()+BASEOFFSET+this.atomicfields.getOffset()); + TableIndexEntryTransactional entry=new TableIndexEntryTransactional(id,rowsize,location,position); entry.writeData(file); - offset+=entry.getSize(); - if(id.hashCode()>endhash)endhash=id.hashCode(); + this.atomicfields.setOffset(this.atomicfields.getOffset()+entry.getSize()); + if(id.hashCode()>this.atomicfields.getEndhash()) this.atomicfields.setEndhash(id.hashCode()); updateMeta(); } -}*/ \ No newline at end of file +} \ No newline at end of file diff --git a/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TableIndexTransactional.java b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TableIndexTransactional.java new file mode 100644 index 00000000..27ed2a65 --- /dev/null +++ b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TableIndexTransactional.java @@ -0,0 +1,20 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package com.solidosystems.tuplesoup.core; + +import dstm2.AtomicSuperClass; +import java.util.*; +import java.io.*; + +public interface TableIndexTransactional extends AtomicSuperClass{ + public Hashtable readStatistics(); + public void updateEntry(String id,int rowsize,int location,long position) throws IOException; + public void addEntry(String id,int rowsize,int location,long position) throws IOException; + public TableIndexEntryTransactional scanIndex(String id) throws IOException; + public List scanIndex(List rows) throws IOException; + public List scanIndex() throws IOException; + public void close(); +} \ No newline at end of file diff --git a/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TableTransactional.java b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TableTransactional.java new file mode 100644 index 00000000..7f7ba772 --- /dev/null +++ b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TableTransactional.java @@ -0,0 +1,157 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package com.solidosystems.tuplesoup.core; + +/** + * + * @author navid + */ +/* + * Copyright (c) 2007, Solido Systems + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * Neither the name of Solido Systems nor the names of its contributors may be + * used to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +import java.io.*; +import java.util.*; +import java.nio.channels.*; +import com.solidosystems.tuplesoup.filter.*; + +/** + * The table stores a group of rows. + * Every row must have a unique id within a table. + */ +public interface TableTransactional{ + // Index type constants + public static final int MEMORY=0; + public static final int FLAT=1; + public static final int PAGED=2; + + // Row location constants + public static final int FILEA=0; + public static final int FILEB=1; + public static final int DELETE=2; + public static final int INDEX=3; + + /** + * Return the current values of the statistic counters and reset them. + * The current counters are: + *
    + *
  • stat_table_add + *
  • stat_table_update + *
  • stat_table_delete + *
  • stat_table_add_size + *
  • stat_table_update_size + *
  • stat_table_read_size + *
  • stat_table_read + *
  • stat_table_cache_hit + *
  • stat_table_cache_miss + *
  • stat_table_cache_drop + *
+ * Furthermore, the index will be asked to deliver separate index specific counters + */ + public Hashtable readStatistics(); + + /** + * Set the maximal allowable size of the index cache. + */ + public void setIndexCacheSize(int newsize); + + /** + * Close all open file streams + */ + public void close(); + + /** + * Returns the name of this table + */ + public String getTitle(); + + /** + * Returns the location of this tables datafiles + */ + public String getLocation(); + + /** + * Delete the files created by this table object. + * Be aware that this will delete any data stored in this table! + */ + public void deleteFiles(); + + /** + * Adds a row of data to this table. + */ + public void addRow(RowTransactional row) throws IOException; + + /** + * Adds a row to this table if it doesn't already exist, if it does it updates the row instead. + * This method is much slower than directly using add or update, so only use it if you don't know wether or not the row already exists. + */ + public void addOrUpdateRow(RowTransactional row) throws IOException; + + /** + * Updates a row stored in this table. + */ + public void updateRow(RowTransactional row) throws IOException; + + /** + * Marks a row as deleted in the index. + * Be aware that the space consumed by the row is not actually reclaimed. + */ + public void deleteRow(RowTransactional row) throws IOException; + + /** + * Returns a tuplestream containing the given list of rows + */ + public TupleStreamTransactional getRows(List rows) throws IOException; + + /** + * Returns a tuplestream containing the rows matching the given rowmatcher + */ + public TupleStreamTransactional getRows(RowMatcherTransactional matcher) throws IOException; + + /** + * Returns a tuplestream containing those rows in the given list that matches the given RowMatcher + */ + public TupleStreamTransactional getRows(List rows,RowMatcherTransactional matcher) throws IOException; + + /** + * Returns a tuplestream of all rows in this table. + */ + public TupleStreamTransactional getRows() throws IOException; + + /** + * Returns a single row stored in this table. + * If the row does not exist in the table, null will be returned. + */ + public RowTransactional getRow(String id) throws IOException; + } \ No newline at end of file diff --git a/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TupleStreamTransactional.java b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TupleStreamTransactional.java new file mode 100644 index 00000000..d65dd22c --- /dev/null +++ b/Robust/Transactions/mytuplesoup/src/com/solidosystems/tuplesoup/core/TupleStreamTransactional.java @@ -0,0 +1,17 @@ +/* + * To change this template, choose Tools | Templates + * and open the template in the editor. + */ + +package com.solidosystems.tuplesoup.core; + +import java.io.IOException; + +/** + * + * @author navid + */ +public abstract class TupleStreamTransactional { + public abstract boolean hasNext() throws IOException; + public abstract RowTransactional next() throws IOException; +}