tmp.setLocaloffset(offset);
}
+
+
+ public int skipBytes(int n) throws IOException {
+ long pos;
+ long len;
+ long newpos;
+
+ if (n <= 0) {
+ return 0;
+ }
+ pos = getFilePointer();
+ len = length();
+ newpos = pos + n;
+ if (newpos > len) {
+ newpos = len;
+ }
+ seek(newpos);
+ /* return the actual number of bytes skipped */
+ return (int) (newpos - pos);
+ }
+
+ public final int readByte(){
+ byte[] data = new byte[1];
+ read(data);
+ int result = (byte)(data[0]);
+ return result;
+ }
+
+ public final int readChar(){
+ byte[] data = new byte[2];
+ read(data);
+ int result = (char)((data[0] << 8) | data[0]);
+ return result;
+ }
+
+ public final int readShort(){
+ byte[] data = new byte[2];
+ read(data);
+ int result = (short)((data[0] << 8) | data[1]);
+ return result;
+ }
+
+
public final int readInt(){
byte[] data = new byte[4];
read(data);
return result;
}
+
+ public final void writeByte(int b){
+ try{
+ byte[] result = new byte[1];
+ result[0] = (byte)b;
+ write(result);
+ }catch(IOException ex){
+ Logger.getLogger(TransactionalFile.class.getName()).log(Level.SEVERE, null, ex);
+ }
+
+ }
+
+ public final void writeChar(int value){
+ try{
+ byte[] result = new byte[2];
+ result[0] = (byte)(value >> 8);
+ result[1] = (byte)(value);
+ write(result);
+ }catch(IOException ex){
+ Logger.getLogger(TransactionalFile.class.getName()).log(Level.SEVERE, null, ex);
+ }
+
+ }
+
+
+ public final void writeShort(int value){
+ try{
+ byte[] result = new byte[2];
+ result[0] = (byte)(value >> 8);
+ result[1] = (byte)(value);
+ write(result);
+ }catch(IOException ex){
+ Logger.getLogger(TransactionalFile.class.getName()).log(Level.SEVERE, null, ex);
+ }
+
+ }
+
public final void writeInt(int value){
try {
byte[] result = new byte[4];
Logger.getLogger(TransactionalFile.class.getName()).log(Level.SEVERE, null, ex);
}
+ }
+
+ public final void writeChars(String s) throws IOException {
+ int clen = s.length();
+ int blen = 2*clen;
+ byte[] b = new byte[blen];
+ char[] c = new char[clen];
+ s.getChars(0, clen, c, 0);
+ for (int i = 0, j = 0; i < clen; i++) {
+ b[j++] = (byte)(c[i] >>> 8);
+ b[j++] = (byte)(c[i] >>> 0);
+ }
+ write(b);
}
public int read(byte[] b) {
--- /dev/null
+package dstm2.util;
+
+import java.io.IOException;
+import java.util.AbstractSet;
+import java.util.ConcurrentModificationException;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import dstm2.Thread;
+import dstm2.atomic;
+import dstm2.factory.Factory;
+
+public class StringKeyHashMap<V extends dstm2.AtomicSuperClass> implements Iterable<V>{
+
+ /**
+ * The default initial capacity - MUST be a power of two.
+ */
+ static final int DEFAULT_INITIAL_CAPACITY = 16;
+
+ /**
+ * The maximum capacity, used if a higher value is implicitly specified
+ * by either of the constructors with arguments.
+ * MUST be a power of two <= 1<<30.
+ */
+ static final int MAXIMUM_CAPACITY = 1 << 30;
+
+ /**
+ * The load factor used when none specified in constructor.
+ **/
+ static final float DEFAULT_LOAD_FACTOR = 0.75f;
+
+ /**
+ * The table, resized as necessary. Length MUST Always be a power of two.
+ */
+ //transient TEntry<V>[] table;
+ dstm2.AtomicArray<TEntry> table;
+ final private Factory<TEntry> factory;
+
+ //transient Entry<K, V>[] table;
+
+ /**
+ * The number of key-value mappings contained in this identity hash map.
+ */
+ //transient int size;
+
+ java.util.concurrent.atomic.AtomicInteger size;
+
+ /*
+ * The capacity of the table
+ */
+ transient int capacity;
+
+ /**
+ * The next size value at which to resize (capacity * load factor).
+ * @serial
+ */
+ int threshold;
+
+ /**
+ * The load factor for the hash table.
+ *
+ * @serial
+ */
+ final float loadFactor;
+
+ /**
+ * The number of times this HashMap has been structurally modified
+ * Structural modifications are those that change the number of mappings in
+ * the HashMap or otherwise modify its internal structure (e.g.,
+ * rehash). This field is used to make iterators on Collection-views of
+ * the HashMap fail-fast. (See ConcurrentModificationException).
+ */
+ transient volatile int modCount;
+
+ /**
+ * Constructs an empty <tt>HashMap</tt> with the specified initial
+ * capacity and load factor.
+ *
+ * @param initialCapacity The initial capacity.
+ * @param loadFactor The load factor.
+ * @throws IllegalArgumentException if the initial capacity is negative
+ * or the load factor is nonpositive.
+ */
+ public StringKeyHashMap(int initialCapacity, float loadFactor) {
+ size = new AtomicInteger(0);
+ factory = Thread.makeFactory(TEntry.class);
+ if (initialCapacity < 0)
+ throw new IllegalArgumentException("Illegal initial capacity: " +
+ initialCapacity);
+ if (initialCapacity > MAXIMUM_CAPACITY)
+ initialCapacity = MAXIMUM_CAPACITY;
+ if (loadFactor <= 0 || Float.isNaN(loadFactor))
+ throw new IllegalArgumentException("Illegal load factor: " +
+ loadFactor);
+
+ // Find a power of 2 >= initialCapacity
+ int capacity = 1;
+ while (capacity < initialCapacity)
+ capacity <<= 1;
+
+ this.capacity = capacity;
+ this.loadFactor = loadFactor;
+ threshold = (int)(capacity * loadFactor);
+ table = new dstm2.AtomicArray<TEntry>(TEntry.class, capacity);
+// for(int i = 0; i < capacity; i++) {
+// table[i] = factory.create();
+// }
+ //table = new Entry[capacity];
+ init();
+ }
+
+ /**
+ * Constructs an empty <tt>HashMap</tt> with the specified initial
+ * capacity and the default load factor (0.75).
+ *
+ * @param initialCapacity the initial capacity.
+ * @throws IllegalArgumentException if the initial capacity is negative.
+ */
+ public StringKeyHashMap(int initialCapacity) {
+ this(initialCapacity, DEFAULT_LOAD_FACTOR);
+ }
+
+ /**
+ * Constructs an empty <tt>HashMap</tt> with the default initial capacity
+ * (16) and the default load factor (0.75).
+ */
+ public StringKeyHashMap() {
+ this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR);
+ }
+
+ /**
+ * Constructs a new <tt>HashMap</tt> with the same mappings as the
+ * specified <tt>Map</tt>. The <tt>HashMap</tt> is created with
+ * default load factor (0.75) and an initial capacity sufficient to
+ * hold the mappings in the specified <tt>Map</tt>.
+ *
+ * @param m the map whose mappings are to be placed in this map.
+ * @throws NullPointerException if the specified map is null.
+ */
+ public StringKeyHashMap(StringKeyHashMap<? extends V> m) {
+ this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
+ DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR);
+ putAllForCreate(m);
+ }
+
+ // internal utilities
+
+ /**
+ * Initialization hook for subclasses. This method is called
+ * in all constructors and pseudo-constructors (clone, readObject)
+ * after HashMap has been initialized but before any entries have
+ * been inserted. (In the absence of this method, readObject would
+ * require explicit knowledge of subclasses.)
+ */
+ void init() {
+ }
+ public dstm2.AtomicArray<TEntry> getBuckets() {
+ return table;
+ }
+ /**
+ * Value representing null keys inside tables.
+ */
+ static final Object NULL_KEY = new Object();
+
+ /**
+ * Returns internal representation for key. Use NULL_KEY if key is null.
+ */
+ static <T> T maskNull(T key) {
+ return key == null ? (T)NULL_KEY : key;
+ }
+
+ /**
+ * Returns key represented by specified internal representation.
+ */
+ static <T> T unmaskNull(T key) {
+ return (key == NULL_KEY ? null : key);
+ }
+
+ /**
+ * Whether to prefer the old supplemental hash function, for
+ * compatibility with broken applications that rely on the
+ * internal hashing order.
+ *
+ * Set to true only by hotspot when invoked via
+ * -XX:+UseNewHashFunction or -XX:+AggressiveOpts
+ */
+ private static final boolean useNewHash;
+ static { useNewHash = false; }
+
+ private static int oldHash(int h) {
+ h += ~(h << 9);
+ h ^= (h >>> 14);
+ h += (h << 4);
+ h ^= (h >>> 10);
+ return h;
+ }
+
+ private static int newHash(int h) {
+ // This function ensures that hashCodes that differ only by
+ // constant multiples at each bit position have a bounded
+ // number of collisions (approximately 8 at default load factor).
+ h ^= (h >>> 20) ^ (h >>> 12);
+ return h ^ (h >>> 7) ^ (h >>> 4);
+ }
+
+ /**
+ * Applies a supplemental hash function to a given hashCode, which
+ * defends against poor quality hash functions. This is critical
+ * because HashMap uses power-of-two length hash tables, that
+ * otherwise encounter collisions for hashCodes that do not differ
+ * in lower bits.
+ */
+ public static int hash(int h) {
+ return useNewHash ? newHash(h) : oldHash(h);
+ }
+
+ static int hash(Object key) {
+ return hash(key.hashCode());
+ }
+
+ /**
+ * Check for equality of non-null reference x and possibly-null y.
+ */
+ static boolean eq(Object x, Object y) {
+ return x == y || x.equals(y);
+ }
+
+ /**
+ * Returns index for hash code h.
+ */
+ public static int indexFor(int h, int length) {
+ return h & (length-1);
+ }
+
+ /**
+ * Returns the number of key-value mappings in this map.
+ *
+ * @return the number of key-value mappings in this map.
+ */
+ public int size() {
+ return size.get();
+ }
+
+ /**
+ * Returns <tt>true</tt> if this map contains no key-value mappings.
+ *
+ * @return <tt>true</tt> if this map contains no key-value mappings.
+ */
+ public boolean isEmpty() {
+ return size.get() == 0;
+ }
+
+ /**
+ * Returns the value to which the specified key is mapped in this identity
+ * hash map, or <tt>null</tt> if the map contains no mapping for this key.
+ * A return value of <tt>null</tt> does not <i>necessarily</i> indicate
+ * that the map contains no mapping for the key; it is also possible that
+ * the map explicitly maps the key to <tt>null</tt>. The
+ * <tt>containsKey</tt> method may be used to distinguish these two cases.
+ *
+ * @param key the key whose associated value is to be returned.
+ * @return the value to which this map maps the specified key, or
+ * <tt>null</tt> if the map contains no mapping for this key.
+ * @see #put(Object, Object)
+ */
+ public V get(Object key) {
+// if (key == null)
+// return getForNullKey();
+ int hash = hash(key);
+ for (TEntry<V> e = table.get(indexFor(hash, capacity));
+ e != null;
+ e = e.getNext()) {
+ Object k;
+ if (e.getHash() == hash && ((k = e.getKey()) == key || key.equals(k)))
+ return e.getValue();
+ }
+ return null;
+ }
+
+// private V getForNullKey() {
+// int hash = hash(NULL_KEY.hashCode());
+// int i = indexFor(hash, capacity);
+// TEntry<K,V> e = table[i];
+// //Entry<K,V> e = table[i];
+// while (true) {
+// if (e == null)
+// return null;
+// if (e.getKey() == NULL_KEY)
+// return e.getValue();
+// e = e.getNext();
+// }
+// }
+
+ /**
+ * Returns <tt>true</tt> if this map contains a mapping for the
+ * specified key.
+ *
+ * @param key The key whose presence in this map is to be tested
+ * @return <tt>true</tt> if this map contains a mapping for the specified
+ * key.
+ */
+ public boolean containsKey(String key) {
+ Object k = maskNull(key);
+ int hash = hash(k);
+ int i = indexFor(hash, capacity);
+ TEntry<V> e = table.get(i);
+ while (e != null) {
+ if (e.getHash() == hash && ((k = e.getKey()) == key || (key != null && key.equals(k))))
+ return true;
+ e = e.getNext();
+ }
+ return false;
+ }
+
+ /**
+ * Returns the entry associated with the specified key in the
+ * HashMap. Returns null if the HashMap contains no mapping
+ * for this key.
+ */
+ TEntry<V> getEntry(int key) {
+ int hash = hash(key);
+ int i = indexFor(hash, capacity);
+ TEntry<V> e = table.get(i);
+ while (e != null && !(e.getHash() == hash))
+ e = e.getNext();
+ return e;
+ }
+
+ /**
+ * Associates the specified value with the specified key in this map.
+ * If the map previously contained a mapping for this key, the old
+ * value is replaced.
+ *
+ * @param key key with which the specified value is to be associated.
+ * @param value value to be associated with the specified key.
+ * @return previous value associated with specified key, or <tt>null</tt>
+ * if there was no mapping for key. A <tt>null</tt> return can
+ * also indicate that the HashMap previously associated
+ * <tt>null</tt> with the specified key.
+ */
+ public V put(String key, V value) {
+ int hash = hash(key.hashCode());
+ int i = indexFor(hash, capacity);
+ for (TEntry<V> e = table.get(i); e != null; e = e.getNext()) {
+ Object k;
+ if (e.getHash() == hash && ((k = e.getKey()) == key || key.equals(k))) {
+ V oldValue = e.getValue();
+ e.setValue(value);
+ return oldValue;
+ }
+
+ }
+ modCount++;
+ addEntry(hash, key, value, i);
+ return null;
+ }
+
+
+
+ /**
+ * This method is used instead of put by constructors and
+ * pseudoconstructors (clone, readObject). It does not resize the table,
+ * check for comodification, etc. It calls createEntry rather than
+ * addEntry.
+ */
+ private void putForCreate(String key, V value) {
+ int hash = hash(key);
+ int i = indexFor(hash, capacity);
+
+ /**
+ * Look for preexisting entry for key. This will never happen for
+ * clone or deserialize. It will only happen for construction if the
+ * input Map is a sorted map whose ordering is inconsistent w/ equals.
+ */
+ for (TEntry<V> e = table.get(i); e != null; e = e.getNext()) {
+ if (e.getHash() == hash) {
+ e.setValue(value);
+ return;
+ }
+ }
+
+ createEntry(hash, key, value, i);
+ }
+
+ void putAllForCreate(StringKeyHashMap<? extends V> m) {
+ for (Iterator<? extends StringKeyHashMap.TEntry<? extends V>> i = m.entrySet().iterator(); i.hasNext(); ) {
+ StringKeyHashMap.TEntry<? extends V> e = i.next();
+ putForCreate(e.getKey(), e.getValue());
+ }
+ }
+
+ /**
+ * Rehashes the contents of this map into a new array with a
+ * larger capacity. This method is called automatically when the
+ * number of keys in this map reaches its threshold.
+ *
+ * If current capacity is MAXIMUM_CAPACITY, this method does not
+ * resize the map, but sets threshold to Integer.MAX_VALUE.
+ * This has the effect of preventing future calls.
+ *
+ * @param newCapacity the new capacity, MUST be a power of two;
+ * must be greater than current capacity unless current
+ * capacity is MAXIMUM_CAPACITY (in which case value
+ * is irrelevant).
+ */
+ void resize(int newCapacity) {
+ dstm2.AtomicArray<TEntry> oldTable = table;
+ int oldCapacity = capacity;
+ if (oldCapacity == MAXIMUM_CAPACITY) {
+ threshold = Integer.MAX_VALUE;
+ return;
+ }
+
+ dstm2.AtomicArray<TEntry> newTable = new dstm2.AtomicArray<TEntry>(TEntry.class, newCapacity);
+ transfer(newTable, newCapacity);
+ table = newTable;
+ threshold = (int)(newCapacity * loadFactor);
+ capacity = newCapacity;
+ }
+
+ /**
+ * Transfer all entries from current table to newTable.
+ */
+ void transfer(dstm2.AtomicArray<TEntry> newTable, int nc) {
+ dstm2.AtomicArray<TEntry> src = table;
+ int newCapacity = nc;
+ for (int j = 0; j < capacity; j++) {
+ TEntry<V> e = src.get(j);
+ if (e != null) {
+ src.set(j, null);
+ do {
+ TEntry<V> next = e.getNext();
+ int i = indexFor(e.getHash(), newCapacity);
+ e.setNext(newTable.get(i));
+ newTable.set(i, e);
+ e = next;
+ } while (e != null);
+ }
+ }
+ }
+
+ /**
+ * Copies all of the mappings from the specified map to this map
+ * These mappings will replace any mappings that
+ * this map had for any of the keys currently in the specified map.
+ *
+ * @param m mappings to be stored in this map.
+ * @throws NullPointerException if the specified map is null.
+ */
+ public void putAll(StringKeyHashMap<? extends V> m) {
+ int numKeysToBeAdded = m.size();
+ if (numKeysToBeAdded == 0)
+ return;
+
+ /*
+ * Expand the map if the map if the number of mappings to be added
+ * is greater than or equal to threshold. This is conservative; the
+ * obvious condition is (m.size() + size) >= threshold, but this
+ * condition could result in a map with twice the appropriate capacity,
+ * if the keys to be added overlap with the keys already in this map.
+ * By using the conservative calculation, we subject ourself
+ * to at most one extra resize.
+ */
+ if (numKeysToBeAdded > threshold) {
+ int targetCapacity = (int)(numKeysToBeAdded / loadFactor + 1);
+ if (targetCapacity > MAXIMUM_CAPACITY)
+ targetCapacity = MAXIMUM_CAPACITY;
+ int newCapacity = capacity;
+ while (newCapacity < targetCapacity)
+ newCapacity <<= 1;
+ if (newCapacity > capacity)
+ resize(newCapacity);
+ }
+
+ for (Iterator<? extends StringKeyHashMap.TEntry<? extends V>> i = m.entrySet().iterator(); i.hasNext(); ) {
+ StringKeyHashMap.TEntry<? extends V> e = i.next();
+ put(e.getKey(), e.getValue());
+ }
+ }
+
+ /**
+ * Removes the mapping for this key from this map if present.
+ *
+ * @param key key whose mapping is to be removed from the map.
+ * @return previous value associated with specified key, or <tt>null</tt>
+ * if there was no mapping for key. A <tt>null</tt> return can
+ * also indicate that the map previously associated <tt>null</tt>
+ * with the specified key.
+ */
+ public V remove(String key) {
+ TEntry<V> e = removeEntryForKey(key);
+ return (e == null ? null : e.getValue());
+ }
+
+ /**
+ * Removes and returns the entry associated with the specified key
+ * in the HashMap. Returns null if the HashMap contains no mapping
+ * for this key.
+ */
+ TEntry<V> removeEntryForKey(String key) {
+ int hash = hash(key);
+ int i = indexFor(hash, capacity);
+ TEntry<V> prev = table.get(i);
+ TEntry<V> e = prev;
+
+ while (e != null) {
+ TEntry<V> next = e.getNext();
+ Object k;
+ if (e.getHash() == hash && ((k = e.getKey()) == key || (key != null && key.equals(k))))
+ {
+ modCount++;
+ size.decrementAndGet();
+ if (prev == e)
+ table.set(i, next);
+ else
+ prev.setNext(next);
+// e.recordRemoval(this);
+ return e;
+ }
+ prev = e;
+ e = next;
+ }
+
+ return e;
+ }
+
+ /**
+ * Special version of remove for EntrySet.
+ */
+ TEntry<V> removeMapping(TEntry<V> o) {
+
+ TEntry<V> entry = o;
+ int hash = hash(o.getHash());
+ int i = indexFor(hash, capacity);
+ TEntry<V> prev = table.get(i);
+ TEntry<V> e = prev;
+
+ while (e != null) {
+ TEntry<V> next = e.getNext();
+ if (e.getHash() == hash) {
+ modCount++;
+ size.decrementAndGet();
+ if (prev == e)
+ table.set(i, next);
+ else
+ prev.setNext(next);
+// e.recordRemoval(this);
+ return e;
+ }
+ prev = e;
+ e = next;
+ }
+
+ return e;
+ }
+
+ /**
+ * Removes all mappings from this map.
+ */
+ public void clear() {
+ modCount++;
+ dstm2.AtomicArray<TEntry> tab = table;
+ for (int i = 0; i < capacity; i++)
+ table.set(i, null);
+ size.set(0);
+ }
+
+ /**
+ * Returns <tt>true</tt> if this map maps one or more keys to the
+ * specified value.
+ *
+ * @param value value whose presence in this map is to be tested.
+ * @return <tt>true</tt> if this map maps one or more keys to the
+ * specified value.
+ */
+ public boolean containsValue(Object value) {
+ if (value == null)
+ return containsNullValue();
+
+ dstm2.AtomicArray<TEntry> tab = table;
+ for (int i = 0; i < capacity; i++)
+ for (TEntry e = tab.get(i); e != null ; e = e.getNext())
+ if (value.equals(e.getValue()))
+ return true;
+ return false;
+ }
+
+ /**
+ * Special-case code for containsValue with null argument
+ **/
+ private boolean containsNullValue() {
+ dstm2.AtomicArray<TEntry> tab = table;
+ for (int i = 0; i < capacity ; i++)
+ for (TEntry e = tab.get(i) ; e != null ; e = e.getNext())
+ if (e.getValue() == null)
+ return true;
+ return false;
+ }
+
+ @atomic public interface TEntry<V> {
+ public String getKey();
+ public int getHash();
+ public V getValue();
+ public TEntry<V> getNext();
+ public void setKey(String key);
+ public void setHash(int h);
+ public void setValue(V v);
+ public void setNext(TEntry<V> n);
+
+ }
+
+
+ /**
+ * Add a new entry with the specified key, value and hash code to
+ * the specified bucket. It is the responsibility of this
+ * method to resize the table if appropriate.
+ *
+ * Subclass overrides this to alter the behavior of put method.
+ */
+ void addEntry(int hash, String key, V value, int bucketIndex) {
+ TEntry<V> e = table.get(bucketIndex);
+ TEntry<V> n = factory.create();
+ n.setKey(key);
+ n.setHash(hash);
+ n.setValue(value);
+ n.setNext(e);
+ table.set(bucketIndex, n);
+ if (size.incrementAndGet() >= threshold) {
+ synchronized(this) {
+ if(size.get() >= threshold)
+ resize(2 * capacity);
+ }
+ }
+ }
+
+ /**
+ * Like addEntry except that this version is used when creating entries
+ * as part of Map construction or "pseudo-construction" (cloning,
+ * deserialization). This version needn't worry about resizing the table.
+ *
+ * Subclass overrides this to alter the behavior of HashMap(Map),
+ * clone, and readObject.
+ */
+ void createEntry(int hash, String key, V value, int bucketIndex) {
+ TEntry<V> e = table.get(bucketIndex);
+ TEntry<V> n = factory.create();
+ n.setKey(key);
+ n.setHash(hash);
+ n.setValue(value);
+ n.setNext(e);
+ table.set(bucketIndex, n);
+ size.incrementAndGet();
+ }
+
+ private abstract class HashIterator<E> implements Iterator<E> {
+ TEntry<V> next; // next entry to return
+ int expectedModCount; // For fast-fail
+ int index; // current slot
+ TEntry<V> current; // current entry
+
+ HashIterator() {
+ expectedModCount = modCount;
+ dstm2.AtomicArray<TEntry> t = table;
+ int i = capacity;
+ TEntry<V> n = null;
+ if (size.get() != 0) { // advance to first entry
+ while (i > 0 && (n = t.get(--i)) == null)
+ ;
+ }
+ next = n;
+ index = i;
+ }
+
+ public boolean hasNext() {
+ return next != null;
+ }
+
+ TEntry<V> nextEntry() {
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ TEntry<V> e = next;
+ if (e == null)
+ throw new NoSuchElementException();
+
+ TEntry<V> n = e.getNext();
+ dstm2.AtomicArray<TEntry> t = table;
+ int i = index;
+ while (n == null && i > 0)
+ n = t.get(--i);
+ index = i;
+ next = n;
+ return current = e;
+ }
+
+ public void remove() {
+ if (current == null)
+ throw new IllegalStateException();
+ if (modCount != expectedModCount)
+ throw new ConcurrentModificationException();
+ String k = current.getKey();
+ current = null;
+ StringKeyHashMap.this.removeEntryForKey(k);
+ expectedModCount = modCount;
+ }
+
+ }
+
+ private class ValueIterator extends HashIterator<V> {
+ public V next() {
+ return nextEntry().getValue();
+ }
+ }
+
+ private class KeyIterator extends HashIterator<Integer> {
+ public Integer next() {
+ return nextEntry().getHash();
+ }
+ }
+
+// private class EntryIterator extends HashIterator<Map.Entry<K,V>> {
+// public Map.Entry<K,V> next() {
+// return nextEntry();
+// }
+// }
+
+ // Subclass overrides these to alter behavior of views' iterator() method
+ public Iterator<Integer> newKeyIterator() {
+ return new KeyIterator();
+ }
+ public Iterator<V> newValueIterator() {
+ return new ValueIterator();
+ }
+// Iterator<Map.Entry<K,V>> newEntryIterator() {
+// return new EntryIterator();
+// }
+
+
+ // Views
+
+ private transient Set<StringKeyHashMap.TEntry<V>> entrySet = null;
+
+
+
+ private class KeySet extends AbstractSet<Integer> {
+ public Iterator<Integer> iterator() {
+ return newKeyIterator();
+ }
+ public int size() {
+ return size.get();
+ }
+ public boolean contains(String o) {
+ return containsKey(o);
+ }
+ public boolean remove(String o) {
+ return StringKeyHashMap.this.removeEntryForKey(o) != null;
+ }
+ public void clear() {
+ StringKeyHashMap.this.clear();
+ }
+ }
+
+
+
+ /**
+ * Returns a collection view of the mappings contained in this map. Each
+ * element in the returned collection is a <tt>Map.Entry</tt>. The
+ * collection is backed by the map, so changes to the map are reflected in
+ * the collection, and vice-versa. The collection supports element
+ * removal, which removes the corresponding mapping from the map, via the
+ * <tt>Iterator.remove</tt>, <tt>Collection.remove</tt>,
+ * <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> operations.
+ * It does not support the <tt>add</tt> or <tt>addAll</tt> operations.
+ *
+ * @return a collection view of the mappings contained in this map.
+ * @see Map.Entry
+ */
+ public Set<StringKeyHashMap.TEntry<V>> entrySet() {
+ Set<StringKeyHashMap.TEntry<V>> es = entrySet;
+ return (es != null ? es : (entrySet = (Set<StringKeyHashMap.TEntry<V>>) (Set) new EntrySet()));
+ }
+
+ private class EntrySet {//extends AbstractSet/*<Map.Entry<K,V>>*/ {
+// public Iterator/*<Map.Entry<K,V>>*/ iterator() {
+// return newEntryIterator();
+// }
+ public boolean contains(StringKeyHashMap.TEntry<V> o) {
+ StringKeyHashMap.TEntry<V> e = (StringKeyHashMap.TEntry<V>) o;
+ TEntry<V> candidate = getEntry(e.getHash());
+ return candidate != null && candidate.equals(e);
+ }
+ public boolean remove(StringKeyHashMap.TEntry<V> o) {
+ return removeMapping(o) != null;
+ }
+ public int size() {
+ return size.get();
+ }
+ public void clear() {
+ StringKeyHashMap.this.clear();
+ }
+ }
+
+ /**
+ * Save the state of the <tt>HashMap</tt> instance to a stream (i.e.,
+ * serialize it).
+ *
+ * @serialData The <i>capacity</i> of the HashMap (the length of the
+ * bucket array) is emitted (int), followed by the
+ * <i>size</i> of the HashMap (the number of key-value
+ * mappings), followed by the key (Object) and value (Object)
+ * for each key-value mapping represented by the HashMap
+ * The key-value mappings are emitted in the order that they
+ * are returned by <tt>entrySet().iterator()</tt>.
+ *
+ */
+ private void writeObject(java.io.ObjectOutputStream s)
+ throws IOException
+ {
+ Iterator<StringKeyHashMap.TEntry<V>> i = entrySet().iterator();
+
+ // Write out the threshold, loadfactor, and any hidden stuff
+ s.defaultWriteObject();
+
+ // Write out number of buckets
+ s.writeInt(capacity);
+
+ // Write out size (number of Mappings)
+ s.writeInt(size.get());
+
+ // Write out keys and values (alternating)
+ while (i.hasNext()) {
+ StringKeyHashMap.TEntry<V> e = i.next();
+ s.writeObject(e.getHash());
+ s.writeObject(e.getValue());
+ }
+ }
+
+ private static final long serialVersionUID = 362498820763181265L;
+
+ /**
+ * Reconstitute the <tt>HashMap</tt> instance from a stream (i.e.,
+ * deserialize it).
+ */
+ /* private void readObject(java.io.ObjectInputStream s)
+ throws IOException, ClassNotFoundException
+ {
+ // Read in the threshold, loadfactor, and any hidden stuff
+ s.defaultReadObject();
+
+ // Read in number of buckets and allocate the bucket array;
+ int numBuckets = s.readInt();
+ table = new dstm2.AtomicArray(TEntry.class, numBuckets);
+
+ init(); // Give subclass a chance to do its thing.
+
+ // Read in size (number of Mappings)
+ int size = s.readInt();
+
+ // Read the keys and values, and put the mappings in the HashMap
+ for (int i=0; i<size; i++) {
+ int key = (Integer) s.readObject();
+ V value = (V) s.readObject();
+ putForCreate(key, value);
+ }
+ }*/
+
+ // These methods are used when serializing HashSets
+ int capacity() { return capacity; }
+ float loadFactor() { return loadFactor; }
+
+ public int getCapacity() {
+ return capacity;
+ }
+
+
+ public Iterator<V> iterator() {
+ return new Iterator<V>() {
+ int tableIndex = 0;
+ public TEntry<V> cursor = table.get(tableIndex);
+ public boolean hasNext() {
+ return cursor != null;
+ }
+ public V next() {
+ TEntry<V> node = cursor;
+ cursor = cursor.getNext();
+ while(cursor==null) {
+ tableIndex++;
+ if(tableIndex < capacity) {
+ cursor = table.get(tableIndex);
+ }
+ }
+ return node.getValue();
+ }
+ public void remove() {
+ throw new UnsupportedOperationException();
+ }
+
+ };
+ }
+}
+
import java.util.*;
import java.nio.channels.*;
import com.solidosystems.tuplesoup.filter.*;
-import dstm2.atomic;
/**
* The table stores a group of rows.
private int indexcacheusage;
private Hashtable<String,TableIndexNode> indexcache;
-
- DualFileTableTSInf atomicfields;
// Statistic counters
- public @atomic interface DualFileTableTSInf{
- long getstat_add();
- long getstat_update();
- long getstat_delete();
- long getstat_add_size();
- long getstat_update_size();
- long getstat_read_size();
- long getstat_read();
- long getstat_cache_hit();
- long getstat_cache_miss();
- long getstat_cache_drop();
-
- void setstat_add(long val);
- void setstat_update(long val);
- void setstat_delete(long val);
- void setstat_add_size(long val);
- void setstat_update_size(long val);
- void setstat_read_size(long val);
- void setstat_read(long val);
- void setstat_cache_hit(long val);
- void setstat_cache_miss(long val);
- void setstat_cache_drop(long val);
- }
-
- /*long stat_add=0;
+ long stat_add=0;
long stat_update=0;
long stat_delete=0;
long stat_add_size=0;
long stat_read=0;
long stat_cache_hit=0;
long stat_cache_miss=0;
- long stat_cache_drop=0;*/
+ long stat_cache_drop=0;
protected String statlock="stat-dummy";
public Hashtable<String,Long> readStatistics(){
Hashtable<String,Long> hash=new Hashtable<String,Long>();
synchronized(statlock){
- hash.put("stat_table_add",atomicfields.getstat_add());
- hash.put("stat_table_update",atomicfields.getstat_update());
- hash.put("stat_table_delete",atomicfields.getstat_delete());
- hash.put("stat_table_add_size",atomicfields.getstat_add_size());
- hash.put("stat_table_update_size",atomicfields.getstat_update_size());
- hash.put("stat_table_read_size",atomicfields.getstat_read_size());
- hash.put("stat_table_read",atomicfields.getstat_read());
- hash.put("stat_table_cache_hit",atomicfields.getstat_cache_hit());
- hash.put("stat_table_cache_miss",atomicfields.getstat_cache_miss());
- hash.put("stat_table_cache_drop",atomicfields.getstat_cache_drop());
- atomicfields.setstat_add(0);
- atomicfields.setstat_update(0);
- atomicfields.setstat_delete(0);
- atomicfields.setstat_add_size(0);
- atomicfields.setstat_update_size(0);
- atomicfields.setstat_read_size(0);
- atomicfields.setstat_read(0);
- atomicfields.setstat_cache_hit(0);
- atomicfields.setstat_cache_miss(0);
- atomicfields.setstat_cache_drop(0);
+ hash.put("stat_table_add",stat_add);
+ hash.put("stat_table_update",stat_update);
+ hash.put("stat_table_delete",stat_delete);
+ hash.put("stat_table_add_size",stat_add_size);
+ hash.put("stat_table_update_size",stat_update_size);
+ hash.put("stat_table_read_size",stat_read_size);
+ hash.put("stat_table_read",stat_read);
+ hash.put("stat_table_cache_hit",stat_cache_hit);
+ hash.put("stat_table_cache_miss",stat_cache_miss);
+ hash.put("stat_table_cache_drop",stat_cache_drop);
+ stat_add=0;
+ stat_update=0;
+ stat_delete=0;
+ stat_add_size=0;
+ stat_update_size=0;
+ stat_read_size=0;
+ stat_read=0;
+ stat_cache_hit=0;
+ stat_cache_miss=0;
+ stat_cache_drop=0;
Hashtable<String,Long> ihash=index.readStatistics();
hash.putAll(ihash);
}
indexcache.remove(node.getData().getId());
indexcacheusage--;
synchronized(statlock){
- atomicfields.setstat_cache_drop(atomicfields.getstat_cache_drop()+1);
+ stat_cache_drop++;
}
indexcachefirst=node.getNext();
if(indexcachefirst==null){
row.writeToStream(fileastream);
int post=fileastream.size();
fileastream.flush();
-
synchronized(statlock){
- atomicfields.setstat_add(atomicfields.getstat_add()+1);
- atomicfields.setstat_add_size(atomicfields.getstat_add_size()+row.getSize());
- }
-
+ stat_add++;
+ stat_add_size+=row.getSize();
+ }
index.addEntry(row.getId(),row.getSize(),FILEA,fileaposition);
if(INDEXCACHESIZE>0){
TableIndexEntry entry=new TableIndexEntry(row.getId(),row.getSize(),FILEA,fileaposition);
int post=filebstream.size();
filebstream.flush();
synchronized(statlock){
- atomicfields.setstat_add(atomicfields.getstat_add()+1);
- atomicfields.setstat_add_size(atomicfields.getstat_add_size()+row.getSize());
+ stat_add++;
+ stat_add_size+=row.getSize();
}
index.addEntry(row.getId(),row.getSize(),FILEB,filebposition);
if(INDEXCACHESIZE>0){
}
indexcacheusage--;
synchronized(statlock){
- atomicfields.setstat_cache_drop(atomicfields.getstat_cache_drop()+1);
+ stat_cache_drop++;
}
}
}
indexcachelast=node;
}
synchronized(statlock){
- atomicfields.setstat_cache_hit(atomicfields.getstat_cache_hit()+1);
+ stat_cache_hit++;
}
return node.getData();
}
}
synchronized(statlock){
- atomicfields.setstat_cache_miss(atomicfields.getstat_cache_miss()+1);
+ stat_cache_miss++;
}
return null;
}
rowswitch=!rowswitch;
}
synchronized(statlock){
- atomicfields.setstat_update(atomicfields.getstat_update()+1);
- atomicfields.setstat_update_size(atomicfields.getstat_update_size()+row.getSize());
+ stat_update++;
+ stat_update_size+=row.getSize();
}
}
}
index.updateEntry(row.getId(),row.getSize(),DELETE,0);
synchronized(statlock){
- atomicfields.setstat_delete(atomicfields.getstat_delete()+1);
+ stat_delete++;
}
}
Row row=Row.readFromStream(data);
data.close();
synchronized(statlock){
- atomicfields.setstat_read(atomicfields.getstat_read()+1);
- atomicfields.setstat_read_size(atomicfields.getstat_read_size()+row.getSize());
+ stat_read++;
+ stat_read_size+=row.getSize();
}
return row;
}
--- /dev/null
+/*
+ * Copyright (c) 2007, Solido Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of Solido Systems nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+package com.solidosystems.tuplesoup.core;
+
+
+import java.io.*;
+import java.util.*;
+import java.nio.channels.*;
+import com.solidosystems.tuplesoup.filter.*;
+import dstm2.atomic;
+import dstm2.util.StringKeyHashMap;
+
+/**
+ * The table stores a group of rows.
+ * Every row must have a unique id within a table.
+ */
+public class DualFileTableTransactional implements TableTransactional{
+
+
+ private int INDEXCACHESIZE=8192;
+
+ private String filealock="filea-dummy";
+ private String fileblock="fileb-dummy";
+
+ private DataOutputStream fileastream=null;
+ private DataOutputStream filebstream=null;
+ private RandomAccessFile filearandom=null;
+ private RandomAccessFile filebrandom=null;
+ FileChannel fca=null;
+ FileChannel fcb=null;
+ private TableIndexTransactional index=null;
+
+ private long fileaposition=0;
+ private long filebposition=0;
+
+ private boolean rowswitch=true;
+
+ private String title;
+ private String location;
+
+ private TableIndexNodeTransactional indexcachefirst;
+ private TableIndexNodeTransactional indexcachelast;
+ private int indexcacheusage;
+
+ private StringKeyHashMap<TableIndexNodeTransactional> indexcache;
+ //private Hashtable<String,TableIndexNode> indexcache;
+
+
+ DualFileTableTSInf atomicfields;
+ // Statistic counters
+ public @atomic interface DualFileTableTSInf{
+ long getstat_add();
+ long getstat_update();
+ long getstat_delete();
+ long getstat_add_size();
+ long getstat_update_size();
+ long getstat_read_size();
+ long getstat_read();
+ long getstat_cache_hit();
+ long getstat_cache_miss();
+ long getstat_cache_drop();
+
+ void setstat_add(long val);
+ void setstat_update(long val);
+ void setstat_delete(long val);
+ void setstat_add_size(long val);
+ void setstat_update_size(long val);
+ void setstat_read_size(long val);
+ void setstat_read(long val);
+ void setstat_cache_hit(long val);
+ void setstat_cache_miss(long val);
+ void setstat_cache_drop(long val);
+ }
+
+ /*long stat_add=0;
+ long stat_update=0;
+ long stat_delete=0;
+ long stat_add_size=0;
+ long stat_update_size=0;
+ long stat_read_size=0;
+ long stat_read=0;
+ long stat_cache_hit=0;
+ long stat_cache_miss=0;
+ long stat_cache_drop=0;*/
+
+ protected String statlock="stat-dummy";
+
+ /**
+ * Return the current values of the statistic counters and reset them.
+ * The current counters are:
+ * <ul>
+ * <li>stat_table_add
+ * <li>stat_table_update
+ * <li>stat_table_delete
+ * <li>stat_table_add_size
+ * <li>stat_table_update_size
+ * <li>stat_table_read_size
+ * <li>stat_table_read
+ * <li>stat_table_cache_hit
+ * <li>stat_table_cache_miss
+ * <li>stat_table_cache_drop
+ * </ul>
+ * Furthermore, the index will be asked to deliver separate index specific counters
+ */
+ public Hashtable<String,Long> readStatistics(){
+ Hashtable<String,Long> hash=new Hashtable<String,Long>();
+ synchronized(statlock){
+ hash.put("stat_table_add",atomicfields.getstat_add());
+ hash.put("stat_table_update",atomicfields.getstat_update());
+ hash.put("stat_table_delete",atomicfields.getstat_delete());
+ hash.put("stat_table_add_size",atomicfields.getstat_add_size());
+ hash.put("stat_table_update_size",atomicfields.getstat_update_size());
+ hash.put("stat_table_read_size",atomicfields.getstat_read_size());
+ hash.put("stat_table_read",atomicfields.getstat_read());
+ hash.put("stat_table_cache_hit",atomicfields.getstat_cache_hit());
+ hash.put("stat_table_cache_miss",atomicfields.getstat_cache_miss());
+ hash.put("stat_table_cache_drop",atomicfields.getstat_cache_drop());
+ atomicfields.setstat_add(0);
+ atomicfields.setstat_update(0);
+ atomicfields.setstat_delete(0);
+ atomicfields.setstat_add_size(0);
+ atomicfields.setstat_update_size(0);
+ atomicfields.setstat_read_size(0);
+ atomicfields.setstat_read(0);
+ atomicfields.setstat_cache_hit(0);
+ atomicfields.setstat_cache_miss(0);
+ atomicfields.setstat_cache_drop(0);
+ Hashtable<String,Long> ihash=index.readStatistics();
+ hash.putAll(ihash);
+ }
+ return hash;
+ }
+
+ /**
+ * Create a new table object with the default flat index model
+ */
+
+
+ /**
+ * Create a new table object with a specific index model
+ */
+ public DualFileTableTransactional(String title,String location, int indextype) throws IOException{
+ this.title=title;
+ this.location=location;
+ if(!this.location.endsWith(File.separator))this.location+=File.separator;
+ switch(indextype){
+ case PAGED : index=new PagedIndexTransactional(getFileName(INDEX));
+ break;
+
+ }
+ indexcachefirst=null;
+ indexcachelast=null;
+ indexcacheusage=0;
+ indexcache=new StringKeyHashMap<TableIndexNodeTransactional>();
+ }
+
+ /**
+ * Set the maximal allowable size of the index cache.
+ */
+ public void setIndexCacheSize(int newsize){
+ INDEXCACHESIZE=newsize;
+ }
+
+ /**
+ * Close all open file streams
+ */
+ public void close(){
+ try{
+ if(fileastream!=null)fileastream.close();
+ if(filebstream!=null)filebstream.close();
+ if(filearandom!=null)filearandom.close();
+ if(filebrandom!=null)filebrandom.close();
+ index.close();
+ }catch(Exception e){}
+ }
+
+ /**
+ * Returns the name of this table
+ */
+ public String getTitle(){
+ return title;
+ }
+
+ /**
+ * Returns the location of this tables datafiles
+ */
+ public String getLocation(){
+ return location;
+ }
+
+ protected String getFileName(int type){
+ switch(type){
+ case FILEB : return location+title+".a";
+ case FILEA : return location+title+".b";
+ case INDEX : return location+title+".index";
+ }
+ return null;
+ }
+
+ /**
+ * Delete the files created by this table object.
+ * Be aware that this will delete any data stored in this table!
+ */
+ public void deleteFiles(){
+ try{
+ File ftest=new File(getFileName(FILEA));
+ ftest.delete();
+ }catch(Exception e){}
+ try{
+ File ftest=new File(getFileName(FILEB));
+ ftest.delete();
+ }catch(Exception e){}
+ try{
+ File ftest=new File(getFileName(INDEX));
+ ftest.delete();
+ }catch(Exception e){}
+ }
+
+ private synchronized void openFile(int type) throws IOException{
+ switch(type){
+ case FILEA : if(fileastream==null){
+ fileastream=new DataOutputStream(new BufferedOutputStream(new FileOutputStream(getFileName(FILEA),true)));
+ File ftest=new File(getFileName(FILEA));
+ fileaposition=ftest.length();
+ }
+ break;
+ case FILEB : if(filebstream==null){
+ filebstream=new DataOutputStream(new BufferedOutputStream(new FileOutputStream(getFileName(FILEB),true)));
+ File ftest=new File(getFileName(FILEB));
+ filebposition=ftest.length();
+ }
+ break;
+ }
+ }
+
+ /**
+ * Adds a row of data to this table.
+ */
+ public void addRow(RowTransactional row) throws IOException{
+ // Distribute new rows between the two datafiles by using the rowswitch, but don't spend time synchronizing... this does not need to be acurate!
+ if(rowswitch){
+ addRowA(row);
+ }else{
+ addRowB(row);
+ }
+ rowswitch=!rowswitch;
+ }
+
+ private void addCacheEntry(TableIndexEntryTransactional entry){
+ synchronized(indexcache){
+ if(indexcacheusage>INDEXCACHESIZE){
+ // remove first entry
+ TableIndexNodeTransactional node=indexcachefirst;
+ indexcache.remove(node.getData().getId());
+ indexcacheusage--;
+ synchronized(statlock){
+ atomicfields.setstat_cache_drop(atomicfields.getstat_cache_drop()+1);
+ }
+ indexcachefirst=node.getNext();
+ if(indexcachefirst==null){
+ indexcachelast=null;
+ }else{
+ indexcachefirst.setPrevious(null);
+ }
+ }
+ TableIndexNodeTransactional node=new TableIndexNodeTransactional(indexcachelast,entry);
+ if(indexcachelast!=null){
+ indexcachelast.setNext(node);
+ }
+ if(indexcachefirst==null){
+ indexcachefirst=node;
+ }
+ indexcachelast=node;
+ indexcache.put(entry.getId(),node);
+ indexcacheusage++;
+ }
+ }
+
+ private void addRowA(RowTransactional row) throws IOException{
+ synchronized(filealock){
+ openFile(FILEA);
+ int pre=fileastream.size();
+ row.writeToStream(fileastream);
+ int post=fileastream.size();
+ fileastream.flush();
+
+ synchronized(statlock){
+ atomicfields.setstat_add(atomicfields.getstat_add()+1);
+ atomicfields.setstat_add_size(atomicfields.getstat_add_size()+row.getSize());
+ }
+
+ index.addEntry(row.getId(),row.getSize(),FILEA,fileaposition);
+ if(INDEXCACHESIZE>0){
+ TableIndexEntryTransactional entry=new TableIndexEntryTransactional(row.getId(),row.getSize(),FILEA,fileaposition);
+ addCacheEntry(entry);
+ }
+ fileaposition+=Row.calcSize(pre,post);
+ }
+ }
+ private void addRowB(RowTransactional row) throws IOException{
+ synchronized(fileblock){
+ openFile(FILEB);
+ int pre=filebstream.size();
+ row.writeToStream(filebstream);
+ int post=filebstream.size();
+ filebstream.flush();
+ synchronized(statlock){
+ atomicfields.setstat_add(atomicfields.getstat_add()+1);
+ atomicfields.setstat_add_size(atomicfields.getstat_add_size()+row.getSize());
+ }
+ index.addEntry(row.getId(),row.getSize(),FILEB,filebposition);
+ if(INDEXCACHESIZE>0){
+ TableIndexEntryTransactional entry=new TableIndexEntryTransactional(row.getId(),row.getSize(),FILEB,filebposition);
+ addCacheEntry(entry);
+ }
+ filebposition+=RowTransactional.calcSize(pre,post);
+ }
+ }
+
+
+ private void updateCacheEntry(TableIndexEntryTransactional entry){
+ synchronized(indexcache){
+ if(indexcache.containsKey(entry.getId())){
+ TableIndexNodeTransactional node=indexcache.get(entry.getId());
+ node.setData(entry);
+ if(node!=indexcachelast){
+ if(node==indexcachefirst){
+ indexcachefirst=node.getNext();
+ }
+ node.remove();
+ indexcachelast.setNext(node);
+ node.setPrevious(indexcachelast);
+ node.setNext(null);
+ indexcachelast=node;
+ }
+ }else{
+ addCacheEntry(entry);
+ }
+ }
+ }
+
+ private void removeCacheEntry(String id){
+ synchronized(indexcache){
+ if(indexcache.containsKey(id)){
+ TableIndexNodeTransactional node=indexcache.get(id);
+ indexcache.remove(id);
+ if(indexcacheusage==1){
+ indexcachefirst=null;
+ indexcachelast=null;
+ indexcacheusage=0;
+ return;
+ }
+ if(node==indexcachefirst){
+ indexcachefirst=node.getNext();
+ indexcachefirst.setPrevious(null);
+ }else if(node==indexcachelast){
+ indexcachelast=node.getPrevious();
+ indexcachelast.setNext(null);
+ }else{
+ node.remove();
+ }
+ indexcacheusage--;
+ synchronized(statlock){
+ atomicfields.setstat_cache_drop(atomicfields.getstat_cache_drop()+1);
+ }
+ }
+ }
+ }
+
+ private TableIndexEntryTransactional getCacheEntry(String id){
+ synchronized(indexcache){
+ if(indexcache.containsKey(id)){
+ TableIndexNodeTransactional node=indexcache.get(id);
+ if(node!=indexcachelast){
+ if(node==indexcachefirst){
+ indexcachefirst=node.getNext();
+ }
+ node.remove();
+ indexcachelast.setNext(node);
+ node.setPrevious(indexcachelast);
+ node.setNext(null);
+ indexcachelast=node;
+ }
+ synchronized(statlock){
+ atomicfields.setstat_cache_hit(atomicfields.getstat_cache_hit()+1);
+ }
+ return node.getData();
+ }
+ }
+ synchronized(statlock){
+ atomicfields.setstat_cache_miss(atomicfields.getstat_cache_miss()+1);
+ }
+ return null;
+ }
+
+ /**
+ * Adds a row to this table if it doesn't already exist, if it does it updates the row instead.
+ * This method is much slower than directly using add or update, so only use it if you don't know wether or not the row already exists.
+ */
+ public void addOrUpdateRow(RowTransactional row) throws IOException{
+ RowTransactional tmprow=getRow(row.getId());
+ if(tmprow==null){
+ addRow(row);
+ }else{
+ updateRow(row);
+ }
+ }
+
+ /**
+ * Updates a row stored in this table.
+ */
+ public void updateRow(RowTransactional row) throws IOException{
+ TableIndexEntryTransactional entry=null;
+ // Handle index entry caching
+ if(INDEXCACHESIZE>0){
+ synchronized(indexcache){
+ entry=getCacheEntry(row.getId());
+ if(entry==null){
+ entry=index.scanIndex(row.getId());
+ addCacheEntry(entry);
+ }
+ }
+ }else{
+ entry=index.scanIndex(row.getId());
+ }
+ if(entry.getRowSize()>=row.getSize()){
+ // Add to the existing location
+ switch(entry.getLocation()){
+ case FILEA :synchronized(filealock){
+ if(filearandom==null){
+ filearandom=new RandomAccessFile(getFileName(FILEA),"rw");
+ fca=filearandom.getChannel();
+ }
+ filearandom.seek(entry.getPosition());
+ row.writeToFile(filearandom);
+
+ fca.force(false);
+ }
+ break;
+ case FILEB :synchronized(fileblock){
+ if(filebrandom==null){
+ filebrandom=new RandomAccessFile(getFileName(FILEB),"rw");
+ fcb=filebrandom.getChannel();
+ }
+ filebrandom.seek(entry.getPosition());
+ row.writeToFile(filebrandom);
+
+ fcb.force(false);
+ }
+ break;
+ }
+ }else{
+ if(rowswitch){
+ updateRowA(row);
+ }else{
+ updateRowB(row);
+ }
+ rowswitch=!rowswitch;
+ }
+ synchronized(statlock){
+ atomicfields.setstat_update(atomicfields.getstat_update()+1);
+ atomicfields.setstat_update_size(atomicfields.getstat_update_size()+row.getSize());
+ }
+ }
+
+ private void updateRowA(RowTransactional row) throws IOException{
+ synchronized(filealock){
+ openFile(FILEA);
+ int pre=fileastream.size();
+ row.writeToStream(fileastream);
+ int post=fileastream.size();
+ fileastream.flush();
+ index.updateEntry(row.getId(),row.getSize(),FILEA,fileaposition);
+
+ // Handle index entry caching
+ if(INDEXCACHESIZE>0){
+ updateCacheEntry(new TableIndexEntryTransactional(row.getId(),row.getSize(),FILEA,fileaposition));
+ }
+ fileaposition+=Row.calcSize(pre,post);
+ }
+ }
+
+ private void updateRowB(RowTransactional row) throws IOException{
+ synchronized(fileblock){
+ openFile(FILEB);
+ int pre=filebstream.size();
+ row.writeToStream(filebstream);
+ int post=filebstream.size();
+ filebstream.flush();
+ index.updateEntry(row.getId(),row.getSize(),FILEB,filebposition);
+ // Handle index entry caching
+ // Handle index entry caching
+ if(INDEXCACHESIZE>0){
+ updateCacheEntry(new TableIndexEntryTransactional(row.getId(),row.getSize(),FILEB,filebposition));
+ }
+ filebposition+=Row.calcSize(pre,post);
+ }
+ }
+
+ /**
+ * Marks a row as deleted in the index.
+ * Be aware that the space consumed by the row is not actually reclaimed.
+ */
+ public void deleteRow(RowTransactional row) throws IOException{
+ // Handle index entry caching
+ if(INDEXCACHESIZE>0){
+ removeCacheEntry(row.getId());
+ }
+ index.updateEntry(row.getId(),row.getSize(),DELETE,0);
+ synchronized(statlock){
+ atomicfields.setstat_delete(atomicfields.getstat_delete()+1);
+ }
+ }
+
+ /**
+ * Returns a tuplestream containing the given list of rows
+ */
+ public TupleStreamTransactional getRows(List<String> rows) throws IOException{
+ return new IndexedTableReaderTransactional(this,index.scanIndex(rows));
+ }
+
+ /**
+ * Returns a tuplestream containing the rows matching the given rowmatcher
+ */
+ public TupleStreamTransactional getRows(RowMatcherTransactional matcher) throws IOException{
+ return new IndexedTableReaderTransactional(this,index.scanIndex(),matcher);
+ }
+
+ /**
+ * Returns a tuplestream containing those rows in the given list that matches the given RowMatcher
+ */
+ public TupleStreamTransactional getRows(List<String> rows,RowMatcherTransactional matcher) throws IOException{
+ return new IndexedTableReaderTransactional(this,index.scanIndex(rows),matcher);
+ }
+
+ /**
+ * Returns a tuplestream of all rows in this table.
+ */
+ public TupleStreamTransactional getRows() throws IOException{
+ // return new TableReader(this);
+ return new IndexedTableReaderTransactional(this,index.scanIndex());
+ }
+
+ /**
+ * Returns a single row stored in this table.
+ * If the row does not exist in the table, null will be returned.
+ */
+ public RowTransactional getRow(String id) throws IOException{
+ TableIndexEntryTransactional entry=null;
+ // Handle index entry caching
+ if(INDEXCACHESIZE>0){
+ synchronized(indexcache){
+ entry=getCacheEntry(id);
+ if(entry==null){
+ entry=index.scanIndex(id);
+ if(entry!=null){
+ addCacheEntry(entry);
+ }
+ }
+ }
+ }else{
+ entry=index.scanIndex(id);
+ }
+ if(entry!=null){
+ long dataoffset=0;
+ DataInputStream data=null;
+ if(entry.getLocation()==Table.FILEA){
+ data=new DataInputStream(new BufferedInputStream(new FileInputStream(getFileName(Table.FILEA))));
+ }else if(entry.getLocation()==Table.FILEB){
+ data=new DataInputStream(new BufferedInputStream(new FileInputStream(getFileName(Table.FILEB))));
+ }
+ if(data!=null){
+ while(dataoffset!=entry.getPosition()){
+ dataoffset+=data.skipBytes((int)(entry.getPosition()-dataoffset));
+ }
+ RowTransactional row=RowTransactional.readFromStream(data);
+ data.close();
+ synchronized(statlock){
+ atomicfields.setstat_read(atomicfields.getstat_read()+1);
+ atomicfields.setstat_read_size(atomicfields.getstat_read_size()+row.getSize());
+ }
+ return row;
+ }
+
+ }
+ return null;
+ }
+ }
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2007, Solido Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of Solido Systems nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ package com.solidosystems.tuplesoup.core;
+
+ import com.solidosystems.tuplesoup.filter.*;
+ import java.io.*;
+ import java.util.*;
+
+public class IndexedTableReaderTransactional extends TupleStreamTransactional{
+ private DataInputStream fileastream=null;
+ private DataInputStream filebstream=null;
+ private long fileaposition=0;
+ private long filebposition=0;
+
+ private List<TableIndexEntryTransactional>fileaentries;
+ private List<TableIndexEntryTransactional>filebentries;
+
+ private List<TableIndexEntryTransactional>entries;
+
+ private Hashtable<String,RowTransactional>fileabuffer;
+ private Hashtable<String,RowTransactional>filebbuffer;
+
+ private List<String>rows;
+ private int rowpointer;
+ private RowTransactional next=null;
+
+ private DualFileTableTransactional table;
+
+ private RowMatcherTransactional matcher=null;
+
+ public IndexedTableReaderTransactional(DualFileTableTransactional table,List<TableIndexEntryTransactional>entries) throws IOException{
+ this.table=table;
+ this.rows=rows;
+ rowpointer=0;
+
+ this.entries=entries;
+ fileaentries=new ArrayList<TableIndexEntryTransactional>();
+ filebentries=new ArrayList<TableIndexEntryTransactional>();
+
+ Iterator<TableIndexEntryTransactional> it=entries.iterator();
+ while(it.hasNext()){
+ TableIndexEntryTransactional entry=it.next();
+ // TODO: we really shouldn't get nulls here
+ if(entry!=null){
+ if(entry.getLocation()==Table.FILEA){
+ fileaentries.add(entry);
+ }else if(entry.getLocation()==Table.FILEB){
+ filebentries.add(entry);
+ }
+ }
+ }
+
+ Collections.sort(fileaentries);
+ Collections.sort(filebentries);
+
+ fileabuffer=new Hashtable<String,RowTransactional>();
+ filebbuffer=new Hashtable<String,RowTransactional>();
+
+ readNext();
+ }
+
+
+ public IndexedTableReaderTransactional(DualFileTableTransactional table,List<TableIndexEntryTransactional>entries,RowMatcherTransactional matcher) throws IOException{
+ this.table=table;
+ this.rows=rows;
+ rowpointer=0;
+ this.matcher=matcher;
+
+ this.entries=entries;
+ fileaentries=new ArrayList<TableIndexEntryTransactional>();
+ filebentries=new ArrayList<TableIndexEntryTransactional>();
+
+ Iterator<TableIndexEntryTransactional> it=entries.iterator();
+ while(it.hasNext()){
+ TableIndexEntryTransactional entry=it.next();
+ // TODO: we really shouldn't get nulls here
+ if(entry!=null){
+ if(entry.getLocation()==Table.FILEA){
+ fileaentries.add(entry);
+ }else if(entry.getLocation()==Table.FILEB){
+ filebentries.add(entry);
+ }
+ }
+ }
+
+ Collections.sort(fileaentries);
+ Collections.sort(filebentries);
+
+ fileabuffer=new Hashtable<String,RowTransactional>();
+ filebbuffer=new Hashtable<String,RowTransactional>();
+
+ readNext();
+ }
+
+ private void readNextFromFileA(TableIndexEntryTransactional entry) throws IOException{
+ if(fileabuffer.containsKey(entry.getId())){
+ next=fileabuffer.remove(entry.getId());
+ return;
+ }
+ while(true){
+ if(fileaentries.size()>0){
+ TableIndexEntryTransactional nextfilea=fileaentries.remove(0);
+ if(fileastream==null){
+ fileastream=new DataInputStream(new BufferedInputStream(new FileInputStream(table.getFileName(Table.FILEA))));
+ fileaposition=0;
+ }
+ if(fileaposition>nextfilea.getPosition()){
+ // We have already read this entry... skip it
+ // readNextFromFileA(entry);
+ // return;
+ }else{
+ while(fileaposition!=nextfilea.getPosition()){
+ fileaposition+=fileastream.skipBytes((int)(nextfilea.getPosition()-fileaposition));
+ }
+ RowTransactional row=RowTransactional.readFromStream(fileastream);
+ synchronized(table.statlock){
+ table.atomicfields.setstat_read_size(table.atomicfields.getstat_read_size()+row.getSize());
+ table.atomicfields.setstat_read(table.atomicfields.getstat_read()+1);
+ }
+ fileaposition+=row.getSize();
+ if(row.getId().equals(entry.getId())){
+ next=row;
+ return;
+ }else{
+ fileabuffer.put(row.getId(),row);
+ // readNextFromFileA(entry);
+ }
+ }
+ }else{
+ next=null;
+ return;
+ }
+ }
+ }
+
+ private void readNextFromFileB(TableIndexEntryTransactional entry) throws IOException{
+ if(filebbuffer.containsKey(entry.getId())){
+ next=filebbuffer.remove(entry.getId());
+ return;
+ }
+ while(true){
+ if(filebentries.size()>0){
+ TableIndexEntryTransactional nextfileb=filebentries.remove(0);
+ if(filebstream==null){
+ filebstream=new DataInputStream(new BufferedInputStream(new FileInputStream(table.getFileName(Table.FILEB))));
+ filebposition=0;
+ }
+ if(filebposition>nextfileb.getPosition()){
+ // We have already read this entry... skip it
+ // readNextFromFileB(entry);
+ // return;
+ }else{
+ while(filebposition!=nextfileb.getPosition()){
+ filebposition+=filebstream.skipBytes((int)(nextfileb.getPosition()-filebposition));
+ }
+ RowTransactional row=RowTransactional.readFromStream(filebstream);
+ synchronized(table.statlock){
+ table.atomicfields.setstat_read_size(table.atomicfields.getstat_read_size()+row.getSize());
+ table.atomicfields.setstat_read(table.atomicfields.getstat_read()+1);
+ }
+ filebposition+=row.getSize();
+ if(row.getId().equals(entry.getId())){
+ next=row;
+ return;
+ }else{
+ filebbuffer.put(row.getId(),row);
+ // readNextFromFileB(entry);
+ }
+ }
+ }else{
+ next=null;
+ return;
+ }
+ }
+ }
+
+ private void readNext() throws IOException{
+ if(entries.size()>rowpointer){
+ TableIndexEntryTransactional entry=entries.get(rowpointer++);
+ if(entry!=null){
+ switch(entry.getLocation()){
+ case Table.FILEA : readNextFromFileA(entry);
+ // return;
+ break;
+ case Table.FILEB : readNextFromFileB(entry);
+ // return;
+ break;
+ }
+ if(next!=null){
+ if(matcher!=null){
+ if(!matcher.matches(next)){
+ readNext();
+ }
+ }
+ }
+ return;
+ }else{
+ readNext();
+ return;
+ }
+ }
+ try{
+ if(fileastream!=null)fileastream.close();
+ }catch(Exception e){}
+ try{
+ if(filebstream!=null)filebstream.close();
+ }catch(Exception e){}
+ next=null;
+ }
+
+ public boolean hasNext(){
+ if(next!=null)return true;
+ return false;
+ }
+
+ public RowTransactional next(){
+ try{
+ if(next!=null){
+ RowTransactional tmp=next;
+ readNext();
+ return tmp;
+ }
+ }catch(Exception e){
+ e.printStackTrace();
+ }
+ return null;
+ }
+
+ public void remove(){
+
+ }
+}
\ No newline at end of file
package com.solidosystems.tuplesoup.core;
+import TransactionalIO.core.TransactionalFile;
import dstm2.AtomicArray;
import dstm2.atomic;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Hashtable;
+import java.util.List;
/**
*
* @author navid
*/
-public class PagedIndexTransactional {
+public class PagedIndexTransactional implements TableIndexTransactional{
- public @atomic interface PageIndexTSInf{
+ PageIndexTSInf atomicfields;
+
+
+
+ public @atomic interface PageIndexTSInf{
+ String getFilename();
Long getStat_read();
Long getStat_write();
Long getStat_create_page();
Long getStat_page_branch();
AtomicArray<TableIndexPageTransactional> getRoots();
+ void setFilename(String val);
void setRoots(AtomicArray<TableIndexPageTransactional> roots);
void setStat_read(Long val);
void setStat_write(Long val);
void setStat_page_branch(Long val);
}
+
+ private TransactionalFile out=null;
protected static final int INITIALPAGEHASH=1024;
protected static final int PAGESIZE=2048;
+
+ public PagedIndexTransactional(String filename) throws IOException{
+ this.atomicfields.setFilename(filename);
+ File ftest=new File(filename);
+ if(!ftest.exists())ftest.createNewFile();
+ out=new TransactionalFile(filename,"rw");
+ atomicfields.setRoots(new AtomicArray<TableIndexPageTransactional>(TableIndexPageTransactional.class, INITIALPAGEHASH));
+
+ if(out.length()>0){
+ for(int i=0;i<INITIALPAGEHASH;i++){
+ atomicfields.getRoots().set(i, new TableIndexPageTransactional(this,out));
+ atomicfields.getRoots().get(i).setFirst();
+ out.seek(atomicfields.getRoots().get(i).getEndLocation());
+ }
+ }else{
+ for(int i=0;i<INITIALPAGEHASH;i++){
+ atomicfields.getRoots().set(i, TableIndexPageTransactional.createNewPage(this,out,PAGESIZE));
+ atomicfields.getRoots().get(i).setFirst();
+ }
+ }
+ }
+
+ public Hashtable<String,Long> readStatistics(){
+ Hashtable<String,Long> hash=new Hashtable<String,Long>();
+ hash.put("stat_index_read",atomicfields.getStat_read());
+ hash.put("stat_index_write",atomicfields.getStat_write());
+ hash.put("stat_index_create_page",atomicfields.getStat_create_page());
+ hash.put("stat_index_page_next",atomicfields.getStat_page_next());
+ hash.put("stat_index_page_branch",atomicfields.getStat_page_branch());
+ atomicfields.setStat_read((long)0);
+ atomicfields.setStat_write((long)0);
+ atomicfields.setStat_create_page((long)0);
+ atomicfields.setStat_page_next((long)0);
+ atomicfields.setStat_page_branch((long)0);
+ return hash;
+ }
+
+ private int rootHash(String id){
+ return id.hashCode() & (INITIALPAGEHASH-1);
+ }
+
+ private synchronized TableIndexPageTransactional getFirstFreePage(String id) throws IOException{
+ return atomicfields.getRoots().get(rootHash(id)).getFirstFreePage(id, id.hashCode());
+ }
+
+ private synchronized long getOffset(String id) throws IOException{
+ if(atomicfields.getRoots()==null)return -1;
+ return atomicfields.getRoots().get(rootHash(id)).getOffset(id,id.hashCode());
+ }
+
+ public synchronized void updateEntry(String id,int rowsize,int location,long position) throws IOException{
+ long offset=getOffset(id);
+ out.seek(offset);
+ TableIndexEntryTransactional entry=new TableIndexEntryTransactional(id,rowsize,location,position);
+ entry.updateData(out);
+ atomicfields.setStat_write(atomicfields.getStat_write()+1);
+ }
+ public synchronized void addEntry(String id,int rowsize,int location,long position) throws IOException{
+ TableIndexPageTransactional page=getFirstFreePage(id);
+ page.addEntry(id,rowsize,location,position);
+ atomicfields.setStat_write(atomicfields.getStat_write()+1);
+ }
+ public synchronized TableIndexEntryTransactional scanIndex(String id) throws IOException{
+ if(atomicfields.getRoots()==null)return null;
+ return atomicfields.getRoots().get(rootHash(id)).scanIndex(id,id.hashCode());
+ }
+ public synchronized List<TableIndexEntryTransactional> scanIndex(List<String> rows) throws IOException{
+ List<TableIndexEntryTransactional> lst=new ArrayList<TableIndexEntryTransactional>();
+ for(int i=0;i<rows.size();i++){
+ String id=rows.get(i);
+ TableIndexEntryTransactional entry=scanIndex(id);
+ if(entry!=null){
+ if(entry.getLocation()!=Table.DELETE)lst.add(entry);
+ }
+ }
+ return lst;
+ }
+ public synchronized List<TableIndexEntryTransactional> scanIndex() throws IOException{
+ ArrayList<TableIndexEntryTransactional> lst=new ArrayList<TableIndexEntryTransactional>();
+ for(int i=0;i<INITIALPAGEHASH;i++){
+ atomicfields.getRoots().get(i).addEntriesToList(lst);
+ }
+ return lst;
+ }
+ public void close(){
+ try{
+ if(out!=null){
+ out.close();
+ }
+ }catch(Exception e){}
+ }
}
--- /dev/null
+/*
+ * Copyright (c) 2007, Solido Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of Solido Systems nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ package com.solidosystems.tuplesoup.core;
+
+ import java.io.*;
+ import java.util.*;
+
+public class RowMatcherTransactional{
+ public final static int LESSTHAN=0;
+ public final static int EQUALS=1;
+ public final static int GREATERTHAN=2;
+ public final static int STARTSWITH=3;
+ public final static int ENDSWITH=4;
+ public final static int CONTAINS=5;
+ public final static int ISNULL=6;
+
+ public final static int NOT=7;
+ public final static int OR=8;
+ public final static int AND=9;
+ public final static int XOR=10;
+
+ private String key=null;
+ private ValueTransactional value=null;
+ private int type=-1;
+
+ private RowMatcherTransactional match1=null;
+ private RowMatcherTransactional match2=null;
+
+ public RowMatcherTransactional(String key,int type,ValueTransactional value){
+ this.key=key;
+ this.type=type;
+ this.value=value;
+ }
+
+ public RowMatcherTransactional(String key,int type){
+ this.key=key;
+ this.type=type;
+ }
+
+ public RowMatcherTransactional(RowMatcherTransactional match1,int type,RowMatcherTransactional match2){
+ this.match1=match1;
+ this.type=type;
+ this.match2=match2;
+ }
+
+ public RowMatcherTransactional(int type,RowMatcherTransactional match1){
+ this.match1=match1;
+ this.type=type;
+ }
+
+ /**
+ * This method needs to be seriously optimized... especially the XOR method
+ */
+ public boolean matches(RowTransactional row){
+ if(value!=null){
+ ValueTransactional compare=row.get(key);
+ switch(type){
+ case LESSTHAN : return compare.lessThan(value);
+ case EQUALS : return compare.equals(value);
+ case GREATERTHAN: return compare.greaterThan(value);
+ case STARTSWITH : return compare.startsWith(value);
+ case ENDSWITH : return compare.endsWith(value);
+ case CONTAINS : return compare.contains(value);
+ }
+ }else if(type==ISNULL){
+ ValueTransactional compare=row.get(key);
+ return compare.isNull();
+ }else if((type==AND)||(type==OR)||(type==XOR)){
+ switch(type){
+ case AND : return match1.matches(row)&&match2.matches(row);
+ case OR : return match1.matches(row)||match2.matches(row);
+ case XOR : return (match1.matches(row)||match2.matches(row))&&(!(match1.matches(row)&&match2.matches(row)));
+ }
+ }else if(type==NOT){
+ return !match1.matches(row);
+ }
+ return false;
+ }
+}
\ No newline at end of file
package com.solidosystems.tuplesoup.core;
+import dstm2.AtomicSuperClass;
import java.util.*;
import java.io.*;
package com.solidosystems.tuplesoup.core;
+import dstm2.atomic;
import java.io.*;
public class TableIndexEntry implements Comparable<TableIndexEntry>{
private int size;
private int rowsize;
+
+
public TableIndexEntry(String id,int rowsize,int location,long position){
this.id=id;
this.location=location;
--- /dev/null
+/*
+ * To change this template, choose Tools | Templates
+ * and open the template in the editor.
+ */
+
+package com.solidosystems.tuplesoup.core;
+
+import TransactionalIO.core.TransactionalFile;
+import dstm2.AtomicSuperClass;
+import dstm2.atomic;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+/**
+ *
+ * @author navid
+ */
+public class TableIndexEntryTransactional implements AtomicSuperClass, Comparable<TableIndexEntryTransactional>{
+
+ TableIndexEntryTSInf atomicfields;
+ public @atomic interface TableIndexEntryTSInf{
+ String getId();
+ int getLocation();
+ long getPosition();
+ int getSize();
+ int getRowsize();
+
+ void setId(String val);
+ void setLocation(int val);
+ void setPosition(long val);
+ void setSize(int val);
+ void setRowsize(int val);
+ }
+
+
+ public TableIndexEntryTransactional(String id,int rowsize,int location,long position){
+ this.atomicfields.setId(id);
+ this.atomicfields.setLocation(location);
+ this.atomicfields.setPosition(position);
+ this.atomicfields.setRowsize(rowsize);
+ this.atomicfields.setSize(-1);
+ }
+ public String getId(){
+ return atomicfields.getId();
+ }
+ public void setPosition(long position){
+ this.atomicfields.setPosition(position);
+ }
+ public long getPosition(){
+ return atomicfields.getPosition();
+ }
+ public void setLocation(int location){
+ this.atomicfields.setLocation(location);
+ }
+ public int getLocation(){
+ return atomicfields.getLocation();
+ }
+
+ public int getRowSize(){
+ return atomicfields.getRowsize();
+ }
+
+ public int compareTo(TableIndexEntryTransactional obj) throws ClassCastException{
+ TableIndexEntryTransactional ent=(TableIndexEntryTransactional)obj;
+ if(atomicfields.getPosition()<ent.atomicfields.getPosition()) return -1;
+ if(atomicfields.getPosition()==ent.atomicfields.getPosition()) return 0;
+ return 1;
+ }
+
+ public boolean equals(Object obj){
+ try{
+ TableIndexEntryTransactional ent=(TableIndexEntryTransactional)obj;
+ if(ent.atomicfields.getLocation()==atomicfields.getLocation()){
+ if(ent.atomicfields.getPosition()==atomicfields.getPosition()){
+ if(ent.atomicfields.getId().equals(atomicfields.getId())){
+ return true;
+ }
+ }
+ }
+ }catch(ClassCastException e){}
+ return false;
+ }
+
+ public int getSize(){
+ if(atomicfields.getSize()<0) calcSize();
+ return atomicfields.getSize();
+ }
+ public void setSize(int size){
+ this.atomicfields.setSize(size);
+ }
+ private void calcSize(){
+ try{
+ ByteArrayOutputStream bout=new ByteArrayOutputStream();
+ DataOutputStream dout=new DataOutputStream(bout);
+ dout.writeInt(atomicfields.getId().hashCode());
+ dout.writeShort(atomicfields.getId().length());
+ dout.writeChars(atomicfields.getId());
+ dout.writeInt(atomicfields.getRowsize());
+ dout.writeByte(atomicfields.getLocation());
+ dout.writeLong(atomicfields.getPosition());
+ setSize(bout.size());
+ dout.close();
+ bout.close();
+ }catch(Exception e){
+ e.printStackTrace();
+ }
+ }
+
+ protected void writeData(TransactionalFile out) throws IOException{
+ long pre=out.getFilePointer();
+ out.writeInt(atomicfields.getId().hashCode());
+ out.writeShort(atomicfields.getId().length());
+ out.writeChars(atomicfields.getId());
+ out.writeInt(atomicfields.getRowsize());
+ out.writeByte(atomicfields.getLocation());
+ out.writeLong(atomicfields.getPosition());
+ setSize((int)(out.getFilePointer()-pre));
+ }
+ protected void updateData(TransactionalFile out) throws IOException{
+ long pre=out.getFilePointer();
+ out.skipBytes(4+2+atomicfields.getId().length()*2);
+ out.writeInt(atomicfields.getRowsize());
+ out.writeByte(atomicfields.getLocation());
+ out.writeLong(atomicfields.getPosition());
+ setSize((int)(out.getFilePointer()-pre));
+ }
+ protected void writeData(DataOutputStream out) throws IOException{
+ out.writeInt(atomicfields.getId().hashCode());
+ out.writeShort(atomicfields.getId().length());
+ out.writeChars(atomicfields.getId());
+ out.writeInt(atomicfields.getRowsize());
+ out.writeByte(atomicfields.getLocation());
+ out.writeLong(atomicfields.getPosition());
+ }
+ protected static TableIndexEntryTransactional readData(TransactionalFile in) throws IOException{
+ long pre=in.getFilePointer();
+ in.readInt();
+ int num=in.readShort();
+ StringBuilder buf=new StringBuilder(num);
+ for(int i=0;i<num;i++){
+ buf.append(in.readChar());
+ }
+ String id=buf.toString();
+ int rowsize=in.readInt();
+ int location=in.readByte();
+ long position=in.readLong();
+ TableIndexEntryTransactional tmp=new TableIndexEntryTransactional(id,rowsize,location,position);
+ tmp.setSize((int)(in.getFilePointer()-pre));
+ return tmp;
+ }
+
+ protected static TableIndexEntryTransactional readData(DataInputStream in) throws IOException{
+ in.readInt();
+ int num=in.readShort();
+ StringBuilder buf=new StringBuilder(num);
+ for(int i=0;i<num;i++){
+ buf.append(in.readChar());
+ }
+ String id=buf.toString();
+ int rowsize=in.readInt();
+ int location=in.readByte();
+ long position=in.readLong();
+ TableIndexEntryTransactional tmp=new TableIndexEntryTransactional(id,rowsize,location,position);
+ return tmp;
+ }
+
+ protected static long scanForOffset(String id,DataInputStream in) throws IOException{
+ long offset=0;
+ int scanhash=id.hashCode();
+ try{
+ int datahash=in.readInt();
+ while(scanhash!=datahash){
+ int num=in.readShort();
+ in.skipBytes(1+4+8+num*2);
+ offset+=4+4+1+2+8+num*2;
+ datahash=in.readInt();
+ }
+ return offset;
+ }catch(EOFException e){}
+ return -1;
+ }
+ protected static TableIndexEntryTransactional lookForData(String id,DataInputStream in) throws IOException{
+ int scanhash=id.hashCode();
+ int datahash=in.readInt();
+ int num=in.readShort();
+ if(scanhash!=datahash){
+ in.skipBytes(4+1+8+num*2);
+ return null;
+ }
+ StringBuilder buf=new StringBuilder(num);
+ for(int i=0;i<num;i++){
+ buf.append(in.readChar());
+ }
+ String readid=buf.toString();
+ if(!readid.equals(id)){
+ in.skipBytes(4+1+8);
+ return null;
+ }
+ int rowsize=in.readInt();
+ int location=in.readByte();
+ long position=in.readLong();
+ TableIndexEntryTransactional tmp=new TableIndexEntryTransactional(id,rowsize,location,position);
+ return tmp;
+ }
+ protected static TableIndexEntryTransactional lookForData(String id,TransactionalFile in) throws IOException{
+ int scanhash=id.hashCode();
+ int datahash=in.readInt();
+ int num=in.readShort();
+ if(scanhash!=datahash){
+ in.skipBytes(4+1+8+num*2);
+ return null;
+ }
+ StringBuilder buf=new StringBuilder(num);
+ for(int i=0;i<num;i++){
+ buf.append(in.readChar());
+ }
+ String readid=buf.toString();
+ if(!readid.equals(id)){
+ in.skipBytes(4+1+8);
+ return null;
+ }
+ int rowsize=in.readInt();
+ int location=in.readByte();
+ long position=in.readLong();
+ TableIndexEntryTransactional tmp=new TableIndexEntryTransactional(id,rowsize,location,position);
+ return tmp;
+ }
+}
--- /dev/null
+/*
+ * To change this template, choose Tools | Templates
+ * and open the template in the editor.
+ */
+
+package com.solidosystems.tuplesoup.core;
+
+import dstm2.AtomicSuperClass;
+import dstm2.atomic;
+
+/**
+ *
+ * @author navid
+ */
+public class TableIndexNodeTransactional implements AtomicSuperClass{
+
+ //private TableIndexNodeTransactional previous;
+ //private TableIndexEntryTransactional data;
+ //private TableIndexNodeTransactional next;
+ TableIndexInodeTSinf atomicfields;
+
+ public @atomic interface TableIndexInodeTSinf{
+ TableIndexNodeTransactional getPrevious();
+ TableIndexEntryTransactional getData();
+ TableIndexNodeTransactional getNext();
+
+ void setPrevious(TableIndexNodeTransactional val);
+ void setData(TableIndexEntryTransactional val);
+ void setNext(TableIndexNodeTransactional val);
+ }
+
+ public TableIndexNodeTransactional(){
+ atomicfields.setPrevious(null);
+ atomicfields.setData(null);
+ atomicfields.setNext(null);
+ }
+
+ public TableIndexNodeTransactional(TableIndexEntryTransactional entry){
+ atomicfields.setPrevious(null);
+ atomicfields.setData(entry);
+ atomicfields.setNext(null);
+
+ }
+
+ public TableIndexNodeTransactional(TableIndexNodeTransactional prev,TableIndexEntryTransactional entry){
+ atomicfields.setPrevious(prev);
+ atomicfields.setData(entry);
+ atomicfields.setNext(null);
+ }
+
+ public TableIndexNodeTransactional(TableIndexNodeTransactional prev,TableIndexEntryTransactional entry,TableIndexNodeTransactional nex){
+ atomicfields.setPrevious(prev);
+ atomicfields.setData(entry);
+ atomicfields.setNext(nex);
+ }
+
+ public TableIndexEntryTransactional getData(){
+ return atomicfields.getData();
+ }
+ public TableIndexNodeTransactional getPrevious(){
+ return atomicfields.getPrevious();
+ }
+ public TableIndexNodeTransactional getNext(){
+ return atomicfields.getNext();
+ }
+ public void setNext(TableIndexNodeTransactional node){
+ atomicfields.setNext(node);
+ }
+ public void setPrevious(TableIndexNodeTransactional node){
+ atomicfields.setPrevious(node);
+ }
+ public void setData(TableIndexEntryTransactional entry){
+ atomicfields.setData(entry);
+ }
+ public void remove(){
+ if(atomicfields.getPrevious()!=null){
+ atomicfields.getPrevious().setNext(atomicfields.getNext());
+ }
+ if(atomicfields.getNext()!=null){
+ atomicfields.getNext().setPrevious(atomicfields.getPrevious());
+ }
+ }
+}
* POSSIBILITY OF SUCH DAMAGE.
*/
-/*package com.solidosystems.tuplesoup.core;
+package com.solidosystems.tuplesoup.core;
import TransactionalIO.core.TransactionalFile;
import dstm2.AtomicSuperClass;
import java.util.*;
public class TableIndexPageTransactional implements AtomicSuperClass{
- TableIndexPageTSInf atomicfields;
+ TableIndexPageTSInf atomicfields = null;
private final static int BASEOFFSET=4+8+8+4+4;
//private RandomAccessFile file=null;
private TransactionalFile file = null;
Integer getStarthash();
Integer getEndhash();
Boolean getFirst();
- TableIndexPageTSInf getNextpage();
- TableIndexPageTSInf getLowerpage();
+ TableIndexPageTransactional getNextpage();
+ TableIndexPageTransactional getLowerpage();
+ PagedIndexTransactional getIndex();
- void setLowerpage(TableIndexPageTSInf lowerpage);
- void setNextpage(TableIndexPageTSInf nextpage);
+ void setLowerpage(TableIndexPageTransactional lowerpage);
+ void setNextpage(TableIndexPageTransactional nextpage);
void setFirst(Boolean val);
- void setEndhash();
- void setStarthash();
+ void setEndhash(int val);
+ void setStarthash(int val);
void setOffset(Integer offset);
void setNext(Long next);
void setSize(Integer size);
void setLocation(Long location);
void setLower(Long val);
+ void setIndex(PagedIndexTransactional val);
}
- private long location=-1;
- private int size=-1;
- private long next=-1;
- private long lower=-1;
- private int offset=0;
- private int starthash=-1;
- private int endhash=-1;
- private boolean first=false;
-
- private TableIndexPage nextpage=null;
- private TableIndexPage lowerpage=null;
-
- private PagedIndex index=null;
-
- public TableIndexPageTransactional(PagedIndex index,TransactionalFile file) throws IOException{
+ public TableIndexPageTransactional(PagedIndexTransactional index,TransactionalFile file) throws IOException{
this.file=file;
- this.index=index;
- first=false;
- location=file.getFilePointer();
- size=file.readInt();
- next=file.readLong();
- lower=file.readLong();
- offset=file.readInt();
- endhash=file.readInt();
- if(offset>0)starthash=file.readInt();
+ this.atomicfields.setIndex(index);
+ this.atomicfields.setFirst(false);
+ this.atomicfields.setLocation(file.getFilePointer());
+ this.atomicfields.setSize(file.readInt());
+ this.atomicfields.setNext(file.readLong());
+ this.atomicfields.setLower(file.readLong());
+ this.atomicfields.setOffset(file.readInt());
+ this.atomicfields.setEndhash(file.readInt());
+ if(this.atomicfields.getOffset()>0)
+ this.atomicfields.setStarthash(file.readInt());
}
- public static TableIndexPage createNewPage(PagedIndex index,RandomAccessFile file,int size) throws IOException{
+ public static TableIndexPageTransactional createNewPage(PagedIndexTransactional index,TransactionalFile file,int size) throws IOException{
long pre=file.length();
- file.setLength(file.length()+size+BASEOFFSET);
+// file.setLength(file.length()+size+BASEOFFSET);
file.seek(pre);
file.writeInt(size);
file.writeLong(-1l);
file.writeInt(0);
file.writeInt(-1);
file.seek(pre);
- index.stat_create_page++;
- return new TableIndexPage(index,file);
+ index.atomicfields.setStat_create_page(index.atomicfields.getStat_create_page()+1);
+ return new TableIndexPageTransactional(index,file);
}
public void setFirst(){
- first=true;
+ this.atomicfields.setFirst(true);
}
public long getLocation(){
- return location;
+ return atomicfields.getLocation();
}
public long getEndLocation(){
- return location+size+BASEOFFSET;
+ return this.atomicfields.getLocation()+atomicfields.getSize()+BASEOFFSET;
}
public String toString(){
StringBuffer buf=new StringBuffer();
buf.append("{\n");
- buf.append(" location "+location+"\n");
- buf.append(" size "+size+"\n");
- buf.append(" next "+next+"\n");
- buf.append(" lower "+lower+"\n");
- buf.append(" offset "+offset+"\n");
- buf.append(" starthash "+starthash+"\n");
- buf.append(" endhash "+endhash+"\n");
+ buf.append(" location "+this.atomicfields.getLocation()+"\n");
+ buf.append(" size "+this.atomicfields.getSize()+"\n");
+ buf.append(" next "+this.atomicfields.getNext()+"\n");
+ buf.append(" lower "+this.atomicfields.getLower()+"\n");
+ buf.append(" offset "+this.atomicfields.getOffset()+"\n");
+ buf.append(" starthash "+this.atomicfields.getStarthash()+"\n");
+ buf.append(" endhash "+this.atomicfields.getEndhash()+"\n");
buf.append("}\n");
return buf.toString();
}
private void updateMeta() throws IOException{
- file.seek(location);
- file.writeInt(size);
- file.writeLong(next);
- file.writeLong(lower);
- file.writeInt(offset);
- file.writeInt(endhash);
+ file.seek(this.atomicfields.getLocation());
+ file.writeInt(this.atomicfields.getSize());
+ file.writeLong(this.atomicfields.getNext());
+ file.writeLong(this.atomicfields.getLower());
+ file.writeInt(this.atomicfields.getOffset());
+ file.writeInt(this.atomicfields.getEndhash());
}
- public void addEntriesToList(List<TableIndexEntry> lst) throws IOException{
- if(lower>-1){
- if(lowerpage==null){
- file.seek(lower);
- lowerpage=new TableIndexPage(index,file);
+ public void addEntriesToList(List<TableIndexEntryTransactional> lst) throws IOException{
+ if(this.atomicfields.getLower()>-1){
+ if(this.atomicfields.getLowerpage()==null){
+ file.seek(this.atomicfields.getLower());
+ this.atomicfields.setLowerpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file));
}
- lowerpage.addEntriesToList(lst);
+ this.atomicfields.getLowerpage().addEntriesToList(lst);
}
- if(next>-1){
- if(nextpage==null){
- file.seek(next);
- nextpage=new TableIndexPage(index,file);
+ if(this.atomicfields.getNext()>-1){
+ if(this.atomicfields.getNextpage()==null){
+ file.seek(this.atomicfields.getNext());
+ this.atomicfields.setNextpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file));
}
- nextpage.addEntriesToList(lst);
+ this.atomicfields.getNextpage().addEntriesToList(lst);
}
- file.seek(location+BASEOFFSET);
+ file.seek(this.atomicfields.getLocation()+BASEOFFSET);
long pre=file.getFilePointer();
- while(file.getFilePointer()<pre+offset){
- TableIndexEntry entry=TableIndexEntry.readData(file);
+ while(file.getFilePointer()<pre+this.atomicfields.getOffset()){
+ TableIndexEntryTransactional entry=TableIndexEntryTransactional.readData(file);
if(entry!=null){
if(entry.getLocation()!=Table.DELETE)lst.add(entry);
}
}
}
- public TableIndexEntry scanIndex(String id,int hashcode) throws IOException{
- if(!first){
- if(hashcode<starthash){
- if(lower==-1)return null;
- if(lowerpage==null){
- file.seek(lower);
- lowerpage=new TableIndexPage(index,file);
+ public TableIndexEntryTransactional scanIndex(String id,int hashcode) throws IOException{
+ if(!atomicfields.getFirst()){
+ if(hashcode<atomicfields.getStarthash()){
+ if(atomicfields.getLower()==-1) return null;
+ if(this.atomicfields.getLowerpage()==null){
+ file.seek(this.atomicfields.getLower());
+ this.atomicfields.setLowerpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file));
}
- index.stat_page_branch++;
- return lowerpage.scanIndex(id,hashcode);
+ atomicfields.getIndex().atomicfields.setStat_page_branch(atomicfields.getIndex().atomicfields.getStat_page_branch()+1);
+ return this.atomicfields.getLowerpage().scanIndex(id,hashcode);
}
}
- if(hashcode>endhash){
- if(next==-1)return null;
- if(nextpage==null){
- file.seek(next);
- nextpage=new TableIndexPage(index,file);
+ if(hashcode>this.atomicfields.getEndhash()){
+ if(this.atomicfields.getNext()==-1)return null;
+ if(this.atomicfields.getNextpage()==null){
+ file.seek(this.atomicfields.getNext());
+ this.atomicfields.setNextpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file));
}
- index.stat_page_next++;
- return nextpage.scanIndex(id,hashcode);
+ atomicfields.getIndex().atomicfields.setStat_page_next(atomicfields.getIndex().atomicfields.getStat_page_next()+1);
+ return this.atomicfields.getNextpage().scanIndex(id,hashcode);
}
- file.seek(location+BASEOFFSET);
+ file.seek(this.atomicfields.getLocation()+BASEOFFSET);
long pre=file.getFilePointer();
- while(file.getFilePointer()<pre+offset){
- TableIndexEntry entry=TableIndexEntry.lookForData(id,file);
+ while(file.getFilePointer()<pre+this.atomicfields.getOffset()){
+ TableIndexEntryTransactional entry=TableIndexEntryTransactional.lookForData(id,file);
if(entry!=null)return entry;
}
- if(next==-1)return null;
- if(nextpage==null){
- file.seek(next);
- nextpage=new TableIndexPage(index,file);
+ if(this.atomicfields.getNext()==-1)return null;
+ if(this.atomicfields.getNextpage()==null){
+ file.seek(this.atomicfields.getNext());
+ this.atomicfields.setNextpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file));
}
- index.stat_page_next++;
- return nextpage.scanIndex(id,hashcode);
+ atomicfields.getIndex().atomicfields.setStat_page_next(atomicfields.getIndex().atomicfields.getStat_page_next()+1);
+ return this.atomicfields.getNextpage().scanIndex(id,hashcode);
}
protected long getOffset(String id,int hashcode) throws IOException{
- if(!first){
- if(hashcode<starthash){
- if(lower==-1)return -1;
- if(lowerpage==null){
- file.seek(lower);
- lowerpage=new TableIndexPage(index,file);
+ if(!this.atomicfields.getFirst()){
+ if(hashcode<this.atomicfields.getStarthash()){
+ if(this.atomicfields.getLower()==-1)return -1;
+ if(this.atomicfields.getLowerpage()==null){
+ file.seek(atomicfields.getLower());
+ this.atomicfields.setLowerpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file));
}
- index.stat_page_branch++;
- return lowerpage.getOffset(id,hashcode);
+ atomicfields.getIndex().atomicfields.setStat_page_branch(atomicfields.getIndex().atomicfields.getStat_page_branch()+1);
+ return this.atomicfields.getLowerpage().getOffset(id,hashcode);
}
}
- if(hashcode>endhash){
- if(next==-1)return -1;
- if(nextpage==null){
- file.seek(next);
- nextpage=new TableIndexPage(index,file);
+ if(hashcode>this.atomicfields.getEndhash()){
+ if(this.atomicfields.getNext()==-1)return -1;
+ if(this.atomicfields.getNextpage()==null){
+ file.seek(this.atomicfields.getNext());
+ this.atomicfields.setNextpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file));
}
- index.stat_page_next++;
- return nextpage.getOffset(id,hashcode);
+ atomicfields.getIndex().atomicfields.setStat_page_next(atomicfields.getIndex().atomicfields.getStat_page_next()+1);
+ return this.atomicfields.getNextpage().getOffset(id,hashcode);
}
- file.seek(location+BASEOFFSET);
+ file.seek(this.atomicfields.getLocation()+BASEOFFSET);
long pre=file.getFilePointer();
- while(file.getFilePointer()<pre+offset){
+ while(file.getFilePointer()<pre+this.atomicfields.getOffset()){
long prescan=file.getFilePointer();
- TableIndexEntry entry=TableIndexEntry.lookForData(id,file);
+ TableIndexEntryTransactional entry=TableIndexEntryTransactional.lookForData(id,file);
if(entry!=null)return prescan;
}
- if(next==-1)return -1;
- if(nextpage==null){
- file.seek(next);
- nextpage=new TableIndexPage(index,file);
+ if(this.atomicfields.getNext()==-1)return -1;
+ if(this.atomicfields.getNextpage()==null){
+ file.seek(this.atomicfields.getNext());
+ this.atomicfields.setNextpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file));
}
- index.stat_page_next++;
- return nextpage.getOffset(id,hashcode);
+ atomicfields.getIndex().atomicfields.setStat_page_next(atomicfields.getIndex().atomicfields.getStat_page_next()+1);
+ return this.atomicfields.getNextpage().getOffset(id,hashcode);
}
- protected TableIndexPage getFirstFreePage(String id,int hashcode) throws IOException{
+ protected TableIndexPageTransactional getFirstFreePage(String id,int hashcode) throws IOException{
// Is this an empty page?
- if(offset==0){
+ if(this.atomicfields.getOffset()==0){
return this;
}
// Is this hash lower than the starthash
- if(!first){
- if(hashcode<starthash){
- if(lower==-1){
- lower=file.length();
+ if(!this.atomicfields.getFirst()){
+ if(hashcode<this.atomicfields.getStarthash()){
+ if(this.atomicfields.getLower()==-1){
+ this.atomicfields.setLower(file.length());
updateMeta();
- return createNewPage(index,file,PagedIndex.PAGESIZE);
+ return createNewPage(this.atomicfields.getIndex(),file,PagedIndexTransactional.PAGESIZE);
}
- if(lowerpage==null){
- file.seek(lower);
- lowerpage=new TableIndexPage(index,file);
+ if(this.atomicfields.getLowerpage()==null){
+ file.seek(this.atomicfields.getLower());
+ this.atomicfields.setLowerpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file));
}
- index.stat_page_branch++;
- return lowerpage.getFirstFreePage(id,hashcode);
+ atomicfields.getIndex().atomicfields.setStat_page_branch(atomicfields.getIndex().atomicfields.getStat_page_branch()+1);
+ return this.atomicfields.getLowerpage().getFirstFreePage(id,hashcode);
}
}
// Do we have space in this page
- if(size-offset>id.length()*2+4+4+8+1+2)return this;
+ if(this.atomicfields.getSize()-this.atomicfields.getOffset()>id.length()*2+4+4+8+1+2)return this;
// Check next
- if(next==-1){
- next=file.length();
+ if(this.atomicfields.getNext()==-1){
+ this.atomicfields.setNext(file.length());
updateMeta();
- return createNewPage(index,file,PagedIndex.PAGESIZE);
+ return createNewPage(this.atomicfields.getIndex(),file,PagedIndexTransactional.PAGESIZE);
}
- if(nextpage==null){
- file.seek(next);
- nextpage=new TableIndexPage(index,file);
+ if(this.atomicfields.getNextpage()==null){
+ file.seek(this.atomicfields.getNext());
+ this.atomicfields.setNextpage(new TableIndexPageTransactional(this.atomicfields.getIndex(),file));
}
- index.stat_page_next++;
- return nextpage.getFirstFreePage(id,hashcode);
+ atomicfields.getIndex().atomicfields.setStat_page_next(atomicfields.getIndex().atomicfields.getStat_page_next()+1);
+ return this.atomicfields.getNextpage().getFirstFreePage(id,hashcode);
}
public void addEntry(String id,int rowsize,int location,long position) throws IOException{
- if(offset==0)starthash=id.hashCode();
- file.seek(this.location+BASEOFFSET+offset);
- TableIndexEntry entry=new TableIndexEntry(id,rowsize,location,position);
+ if(atomicfields.getOffset()==0)this.atomicfields.setStarthash(id.hashCode());
+ file.seek(this.atomicfields.getLocation()+BASEOFFSET+this.atomicfields.getOffset());
+ TableIndexEntryTransactional entry=new TableIndexEntryTransactional(id,rowsize,location,position);
entry.writeData(file);
- offset+=entry.getSize();
- if(id.hashCode()>endhash)endhash=id.hashCode();
+ this.atomicfields.setOffset(this.atomicfields.getOffset()+entry.getSize());
+ if(id.hashCode()>this.atomicfields.getEndhash()) this.atomicfields.setEndhash(id.hashCode());
updateMeta();
}
-}*/
\ No newline at end of file
+}
\ No newline at end of file
--- /dev/null
+/*
+ * To change this template, choose Tools | Templates
+ * and open the template in the editor.
+ */
+
+package com.solidosystems.tuplesoup.core;
+
+import dstm2.AtomicSuperClass;
+import java.util.*;
+import java.io.*;
+
+public interface TableIndexTransactional extends AtomicSuperClass{
+ public Hashtable<String,Long> readStatistics();
+ public void updateEntry(String id,int rowsize,int location,long position) throws IOException;
+ public void addEntry(String id,int rowsize,int location,long position) throws IOException;
+ public TableIndexEntryTransactional scanIndex(String id) throws IOException;
+ public List<TableIndexEntryTransactional> scanIndex(List<String> rows) throws IOException;
+ public List<TableIndexEntryTransactional> scanIndex() throws IOException;
+ public void close();
+}
\ No newline at end of file
--- /dev/null
+/*
+ * To change this template, choose Tools | Templates
+ * and open the template in the editor.
+ */
+
+package com.solidosystems.tuplesoup.core;
+
+/**
+ *
+ * @author navid
+ */
+/*
+ * Copyright (c) 2007, Solido Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * Neither the name of Solido Systems nor the names of its contributors may be
+ * used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+import java.io.*;
+import java.util.*;
+import java.nio.channels.*;
+import com.solidosystems.tuplesoup.filter.*;
+
+/**
+ * The table stores a group of rows.
+ * Every row must have a unique id within a table.
+ */
+public interface TableTransactional{
+ // Index type constants
+ public static final int MEMORY=0;
+ public static final int FLAT=1;
+ public static final int PAGED=2;
+
+ // Row location constants
+ public static final int FILEA=0;
+ public static final int FILEB=1;
+ public static final int DELETE=2;
+ public static final int INDEX=3;
+
+ /**
+ * Return the current values of the statistic counters and reset them.
+ * The current counters are:
+ * <ul>
+ * <li>stat_table_add
+ * <li>stat_table_update
+ * <li>stat_table_delete
+ * <li>stat_table_add_size
+ * <li>stat_table_update_size
+ * <li>stat_table_read_size
+ * <li>stat_table_read
+ * <li>stat_table_cache_hit
+ * <li>stat_table_cache_miss
+ * <li>stat_table_cache_drop
+ * </ul>
+ * Furthermore, the index will be asked to deliver separate index specific counters
+ */
+ public Hashtable<String,Long> readStatistics();
+
+ /**
+ * Set the maximal allowable size of the index cache.
+ */
+ public void setIndexCacheSize(int newsize);
+
+ /**
+ * Close all open file streams
+ */
+ public void close();
+
+ /**
+ * Returns the name of this table
+ */
+ public String getTitle();
+
+ /**
+ * Returns the location of this tables datafiles
+ */
+ public String getLocation();
+
+ /**
+ * Delete the files created by this table object.
+ * Be aware that this will delete any data stored in this table!
+ */
+ public void deleteFiles();
+
+ /**
+ * Adds a row of data to this table.
+ */
+ public void addRow(RowTransactional row) throws IOException;
+
+ /**
+ * Adds a row to this table if it doesn't already exist, if it does it updates the row instead.
+ * This method is much slower than directly using add or update, so only use it if you don't know wether or not the row already exists.
+ */
+ public void addOrUpdateRow(RowTransactional row) throws IOException;
+
+ /**
+ * Updates a row stored in this table.
+ */
+ public void updateRow(RowTransactional row) throws IOException;
+
+ /**
+ * Marks a row as deleted in the index.
+ * Be aware that the space consumed by the row is not actually reclaimed.
+ */
+ public void deleteRow(RowTransactional row) throws IOException;
+
+ /**
+ * Returns a tuplestream containing the given list of rows
+ */
+ public TupleStreamTransactional getRows(List<String> rows) throws IOException;
+
+ /**
+ * Returns a tuplestream containing the rows matching the given rowmatcher
+ */
+ public TupleStreamTransactional getRows(RowMatcherTransactional matcher) throws IOException;
+
+ /**
+ * Returns a tuplestream containing those rows in the given list that matches the given RowMatcher
+ */
+ public TupleStreamTransactional getRows(List<String> rows,RowMatcherTransactional matcher) throws IOException;
+
+ /**
+ * Returns a tuplestream of all rows in this table.
+ */
+ public TupleStreamTransactional getRows() throws IOException;
+
+ /**
+ * Returns a single row stored in this table.
+ * If the row does not exist in the table, null will be returned.
+ */
+ public RowTransactional getRow(String id) throws IOException;
+ }
\ No newline at end of file
--- /dev/null
+/*
+ * To change this template, choose Tools | Templates
+ * and open the template in the editor.
+ */
+
+package com.solidosystems.tuplesoup.core;
+
+import java.io.IOException;
+
+/**
+ *
+ * @author navid
+ */
+public abstract class TupleStreamTransactional {
+ public abstract boolean hasNext() throws IOException;
+ public abstract RowTransactional next() throws IOException;
+}