--- /dev/null
+((nil . ((indent-tabs-mode . t))))
\ No newline at end of file
--- /dev/null
+#include "Abort.h"
+#include "ByteBuffer.h"
+
+Abort::Abort(Slot * slot, int64_t _transactionClientLocalSequenceNumber, int64_t _transactionSequenceNumber , int64_t _transactionMachineId, int64_t _transactionArbitrator, int64_t _arbitratorLocalSequenceNumber) :
+ Entry(slot),
+ transactionClientLocalSequenceNumber(_transactionClientLocalSequenceNumber),
+ transactionSequenceNumber(_transactionSequenceNumber),
+ transactionMachineId(_transactionMachineId),
+ transactionArbitrator(_transactionArbitrator),
+ arbitratorLocalSequenceNumber(_arbitratorLocalSequenceNumber),
+ abortId(new Pair<uint64_t, uint64_t>(transactionMachineId, transactionClientLocalSequenceNumber)) {
+}
+
+Abort::Abort(Slot * slot, int64_t _transactionClientLocalSequenceNumber, int64_t _transactionSequenceNumber, int64_t _sequenceNumber , int64_t _transactionMachineId, int64_t _transactionArbitrator, int64_t _arbitratorLocalSequenceNumber) :
+ Entry(slot),
+ transactionClientLocalSequenceNumber(_transactionClientLocalSequenceNumber),
+ transactionSequenceNumber(_transactionSequenceNumber),
+ sequenceNumber(_sequenceNumber),
+ transactionMachineId(_transactionMachineId),
+ transactionArbitrator(_transactionArbitrator),
+ arbitratorLocalSequenceNumber(_arbitratorLocalSequenceNumber),
+ abortId(new Pair<uint64_t, uint64_t>(transactionMachineId, transactionClientLocalSequenceNumber)) {
+}
+
+Entry * Abortdecode(Slot * slot, ByteBuffer * bb) {
+ int64_t transactionClientLocalSequenceNumber = bb->getLong();
+ int64_t transactionSequenceNumber = bb->getLong();
+ int64_t sequenceNumber = bb->getLong();
+ int64_t transactionMachineId = bb->getLong();
+ int64_t transactionArbitrator = bb->getLong();
+ int64_t arbitratorLocalSequenceNumber = bb->getLong();
+
+ return new Abort(slot, transactionClientLocalSequenceNumber, transactionSequenceNumber, sequenceNumber, transactionMachineId, transactionArbitrator, arbitratorLocalSequenceNumber);
+}
+
+void Abort::encode(ByteBuffer * bb) {
+ bb->put(Entry.TypeAbort);
+ bb->putLong(transactionClientLocalSequenceNumber);
+ bb->putLong(transactionSequenceNumber);
+ bb->putLong(sequenceNumber);
+ bb->putLong(transactionMachineId);
+ bb->putLong(transactionArbitrator);
+ bb->putLong(arbitratorLocalSequenceNumber);
+}
--- /dev/null
+#ifndef ABORT_H
+#define ABORT_H
+#include "common.h"
+#include "Entry.h"
+#include "Pair.h"
+
+class Abort : public Entry {
+ private:
+ int64_t transactionClientLocalSequenceNumber;
+ int64_t transactionSequenceNumber;
+ int64_t sequenceNumber;
+ int64_t transactionMachineId;
+ int64_t transactionArbitrator;
+ int64_t arbitratorLocalSequenceNumber;
+ Pair<int64_t, int64_t> * abortId;
+
+ public:
+ Abort(Slot * slot, int64_t _transactionClientLocalSequenceNumber, int64_t _transactionSequenceNumber , int64_t _transactionMachineId, int64_t _transactionArbitrator, int64_t _arbitratorLocalSequenceNumber);
+ Abort(Slot * slot, int64_t _transactionClientLocalSequenceNumber, int64_t _transactionSequenceNumber, int64_t _sequenceNumber , int64_t _transactionMachineId, int64_t _transactionArbitrator, int64_t _arbitratorLocalSequenceNumber);
+
+ Pair<int64_t, int64_t> * getAbortId() {return abortId;}
+
+ int64_t getTransactionMachineId() { return transactionMachineId; }
+ int64_t getTransactionSequenceNumber() { return transactionSequenceNumber; }
+ int64_t getTransactionClientLocalSequenceNumber() { return transactionClientLocalSequenceNumber; }
+ int64_t getArbitratorLocalSequenceNumber() { return arbitratorLocalSequenceNumber; }
+ void setSlot(Slot * s) { parentslot = s; }
+ int64_t getSequenceNumber() { return sequenceNumber; }
+ void setSequenceNumber(int64_t _sequenceNumber) { sequenceNumber = _sequenceNumber; }
+ int64_t getTransactionArbitrator() { return transactionArbitrator; }
+
+ void encode(ByteBuffer bb);
+ int getSize() { return (6 * sizeof(uint64_t)) + sizeof(char); }
+ char getType() { return TypeAbort; }
+ Entry * getCopy(Slot * s) { return new Abort(s, transactionClientLocalSequenceNumber, transactionSequenceNumber, sequenceNumber, transactionMachineId, transactionArbitrator, arbitratorLocalSequenceNumber); }
+};
+
+Entry * Abortdecode(Slot * slot, ByteBuffer * bb);
+#endif
--- /dev/null
+#include"ArbitrationRound.h"
+
+ArbitrationRound::ArbitrationRound(Commit * _commit, Set<Abort *> * _abortsBefore) {
+ parts = new ArrayList<Entry>();
+ commit = _commit;
+ abortsBefore = _abortsBefore;
+
+ if (commit != NULL) {
+ commit.createCommitParts();
+ currentSize += commit.getNumberOfParts();
+ }
+
+ currentSize += abortsBefore.size();
+}
+
+void ArbitrationRound::generateParts() {
+ if (didGenerateParts) {
+ return;
+ }
+ parts = new ArrayList<Entry>(abortsBefore);
+ if (commit != NULL) {
+ parts.addAll(commit.getParts().values());
+ }
+}
+
+List<Entry *> * ArbitrationRound::getParts() {
+ return parts;
+}
+
+void ArbitrationRound::removeParts(List<Entry *> * removeParts) {
+ parts.removeAll(removeParts);
+ didSendPart = true;
+}
+
+bool ArbitrationRound::isDoneSending() {
+ if ((commit == NULL) && abortsBefore.isEmpty()) {
+ return true;
+ }
+
+ return parts.isEmpty();
+}
+
+Commit * ArbitrationRound::getCommit() {
+ return commit;
+}
+
+void ArbitrationRound::setCommit(Commit * _commit) {
+ if (commit != NULL) {
+ currentSize -= commit.getNumberOfParts();
+ }
+ commit = _commit;
+
+ if (commit != NULL) {
+ currentSize += commit.getNumberOfParts();
+ }
+}
+
+void ArbitrationRound::addAbort(Abort * abort) {
+ abortsBefore.add(abort);
+ currentSize++;
+}
+
+void ArbitrationRound::addAborts(Set<Abort *> * aborts) {
+ abortsBefore.addAll(aborts);
+ currentSize += aborts.size();
+}
+
+Set<Abort> ArbitrationRound::getAborts() {
+ return abortsBefore;
+}
+
+int ArbitrationRound::getAbortsCount() {
+ return abortsBefore.size();
+}
+
+int ArbitrationRound::getCurrentSize() {
+ return currentSize;
+}
+
+bool ArbitrationRound::isFull() {
+ return currentSize >= MAX_PARTS;
+}
+
+bool ArbitrationRound::didSendPart() {
+ return didSendPart;
+}
+
--- /dev/null
+#ifndef ARBITRATIONROUND_H
+#define ARBITRATIONROUND_H
+
+#define MAX_PARTS 10
+
+class ArbitrationRound {
+ private:
+ Set<Abort *> * abortsBefore = NULL;
+ List<Entry *> * parts = NULL;
+ Commit commit = NULL;
+ int currentSize = 0;
+ bool didSendPart = false;
+ bool didGenerateParts = false;
+
+ public:
+ ArbitrationRound(Commit * _commit, Set<Abort *> * _abortsBefore);
+ void generateParts();
+ List<Entry> * getParts();
+ void removeParts(List<Entry> * removeParts);
+ bool isDoneSending();
+ void setCommit(Commit * _commit);
+ void addAbort(Abort * abort);
+ void addAborts(Set<Abort *> * aborts);
+ Set<Abort *> * getAborts();
+ int getAbortsCount();
+ int getCurrentSize();
+ bool isFull();
+ bool didSendPart();
+};
+
+#endif
--- /dev/null
+
+
+
+
+/**
+ * This class provides a communication API to the webserver. It also
+ * validates the HMACs on the slots and handles encryption.
+ * @author Brian Demsky <bdemsky@uci.edu>
+ * @version 1.0
+ */
+
+
+class CloudComm {
+ static final int SALT_SIZE = 8;
+ static final int TIMEOUT_MILLIS = 5000; // 100
+ static final int IV_SIZE = 16;
+
+ /** Sets the size for the HMAC. */
+ static final int HMAC_SIZE = 32;
+
+ String baseurl;
+ SecretKeySpec key;
+ Mac mac;
+ String password;
+ SecureRandom random;
+ char salt[];
+ Table table;
+ int listeningPort = -1;
+ Thread localServerThread = NULL;
+ bool doEnd = false;
+
+ TimingSingleton timer = NULL;
+
+ /**
+ * Empty Constructor needed for child class.
+ */
+ CloudComm() {
+ timer = TimingSingleton.getInstance();
+ }
+
+ /**
+ * Constructor for actual use. Takes in the url and password.
+ */
+ CloudComm(Table _table, String _baseurl, String _password, int _listeningPort) {
+ timer = TimingSingleton.getInstance();
+ this.table = _table;
+ this.baseurl = _baseurl;
+ this.password = _password;
+ this.random = new SecureRandom();
+ this.listeningPort = _listeningPort;
+
+ if (this.listeningPort > 0) {
+ localServerThread = new Thread(new Runnable() {
+ void run() {
+ localServerWorkerFunction();
+ }
+ });
+ localServerThread.start();
+ }
+ }
+
+ /**
+ * Generates Key from password.
+ */
+ SecretKeySpec initKey() {
+ try {
+ PBEKeySpec keyspec = new PBEKeySpec(password.toCharArray(),
+ salt,
+ 65536,
+ 128);
+ SecretKey tmpkey = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA256").generateSecret(keyspec);
+ return new SecretKeySpec(tmpkey.getEncoded(), "AES");
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Failed generating key.");
+ }
+ }
+
+ /**
+ * Inits all the security stuff
+ */
+ void initSecurity() throws ServerException {
+ // try to get the salt and if one does not exist set one
+ if (!getSalt()) {
+ //Set the salt
+ setSalt();
+ }
+
+ initCrypt();
+ }
+
+ /**
+ * Inits the HMAC generator.
+ */
+ void initCrypt() {
+
+ if (password == NULL) {
+ return;
+ }
+
+ try {
+ key = initKey();
+ password = NULL; // drop password
+ mac = Mac.getInstance("HmacSHA256");
+ mac.init(key);
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Failed To Initialize Ciphers");
+ }
+ }
+
+ /*
+ * Builds the URL for the given request.
+ */
+ URL buildRequest(bool isput, int64_t sequencenumber, int64_t maxentries) throws IOException {
+ String reqstring = isput ? "req=putslot" : "req=getslot";
+ String urlstr = baseurl + "?" + reqstring + "&seq=" + sequencenumber;
+ if (maxentries != 0)
+ urlstr += "&max=" + maxentries;
+ return new URL(urlstr);
+ }
+
+ void setSalt() throws ServerException {
+
+ if (salt != NULL) {
+ // Salt already sent to server so dont set it again
+ return;
+ }
+
+ try {
+ char[] saltTmp = new char[SALT_SIZE];
+ random.nextBytes(saltTmp);
+
+ for (int i = 0; i < SALT_SIZE; i++) {
+ System.out.println((int)saltTmp[i] & 255);
+ }
+
+
+ URL url = new URL(baseurl + "?req=setsalt");
+
+ timer.startTime();
+ URLConnection con = url.openConnection();
+ HttpURLConnection http = (HttpURLConnection) con;
+
+ http.setRequestMethod("POST");
+ http.setFixedLengthStreamingMode(saltTmp.length);
+ http.setDoOutput(true);
+ http.setConnectTimeout(TIMEOUT_MILLIS);
+
+
+ http.connect();
+
+ OutputStream os = http.getOutputStream();
+ os.write(saltTmp);
+ os.flush();
+
+ int responsecode = http.getResponseCode();
+ if (responsecode != HttpURLConnection.HTTP_OK) {
+ // TODO: Remove this print
+ System.out.println(responsecode);
+ throw new Error("Invalid response");
+ }
+
+ timer.endTime();
+
+ salt = saltTmp;
+ } catch (Exception e) {
+ // e.printStackTrace();
+ timer.endTime();
+ throw new ServerException("Failed setting salt", ServerException.TypeConnectTimeout);
+ }
+ }
+
+ bool getSalt() throws ServerException {
+ URL url = NULL;
+ URLConnection con = NULL;
+ HttpURLConnection http = NULL;
+
+ try {
+ url = new URL(baseurl + "?req=getsalt");
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("getSlot failed");
+ }
+ try {
+
+ timer.startTime();
+ con = url.openConnection();
+ http = (HttpURLConnection) con;
+ http.setRequestMethod("POST");
+ http.setConnectTimeout(TIMEOUT_MILLIS);
+ http.setReadTimeout(TIMEOUT_MILLIS);
+
+
+ http.connect();
+ timer.endTime();
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+ throw new ServerException("getSalt failed", ServerException.TypeConnectTimeout);
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("getSlot failed");
+ }
+
+ try {
+
+ timer.startTime();
+
+ int responsecode = http.getResponseCode();
+ if (responsecode != HttpURLConnection.HTTP_OK) {
+ // TODO: Remove this print
+ // System.out.println(responsecode);
+ throw new Error("Invalid response");
+ }
+
+ InputStream is = http.getInputStream();
+ if (is.available() > 0) {
+ DataInputStream dis = new DataInputStream(is);
+ int salt_length = dis.readInt();
+ char [] tmp = new char[salt_length];
+ dis.readFully(tmp);
+ salt = tmp;
+ timer.endTime();
+
+ return true;
+ } else {
+ timer.endTime();
+
+ return false;
+ }
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+
+ throw new ServerException("getSalt failed", ServerException.TypeInputTimeout);
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("getSlot failed");
+ }
+ }
+
+ char[] createIV(int64_t machineId, int64_t localSequenceNumber) {
+ ByteBuffer buffer = ByteBuffer.allocate(IV_SIZE);
+ buffer.putLong(machineId);
+ int64_t localSequenceNumberShifted = localSequenceNumber << 16;
+ buffer.putLong(localSequenceNumberShifted);
+ return buffer.array();
+
+ }
+
+ char[] encryptSlotAndPrependIV(char[] rawData, char[] ivBytes) {
+ try {
+ IvParameterSpec ivSpec = new IvParameterSpec(ivBytes);
+ Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
+ cipher.init(Cipher.ENCRYPT_MODE, key, ivSpec);
+
+ char[] encryptedBytes = cipher.doFinal(rawData);
+
+ char[] chars = new char[encryptedBytes.length + IV_SIZE];
+ System.arraycopy(ivBytes, 0, chars, 0, ivBytes.length);
+ System.arraycopy(encryptedBytes, 0, chars, IV_SIZE, encryptedBytes.length);
+
+ return chars;
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Failed To Encrypt");
+ }
+ }
+
+
+ char[] stripIVAndDecryptSlot(char[] rawData) {
+ try {
+ char[] ivBytes = new char[IV_SIZE];
+ char[] encryptedBytes = new char[rawData.length - IV_SIZE];
+ System.arraycopy(rawData, 0, ivBytes, 0, IV_SIZE);
+ System.arraycopy(rawData, IV_SIZE, encryptedBytes, 0 , encryptedBytes.length);
+
+ IvParameterSpec ivSpec = new IvParameterSpec(ivBytes);
+
+ Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
+ cipher.init(Cipher.DECRYPT_MODE, key, ivSpec);
+ return cipher.doFinal(encryptedBytes);
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Failed To Decrypt");
+ }
+ }
+
+
+ /*
+ * API for putting a slot into the queue. Returns NULL on success.
+ * On failure, the server will send slots with newer sequence
+ * numbers.
+ */
+ Slot[] putSlot(Slot slot, int max) throws ServerException {
+ URL url = NULL;
+ URLConnection con = NULL;
+ HttpURLConnection http = NULL;
+
+ try {
+ if (salt == NULL) {
+ if (!getSalt()) {
+ throw new ServerException("putSlot failed", ServerException.TypeSalt);
+ }
+ initCrypt();
+ }
+
+ int64_t sequencenumber = slot.getSequenceNumber();
+ char[] slotBytes = slot.encode(mac);
+ // slotBytes = encryptCipher.doFinal(slotBytes);
+
+ // char[] iVBytes = slot.getSlotCryptIV();
+
+ // char[] chars = new char[slotBytes.length + IV_SIZE];
+ // System.arraycopy(iVBytes, 0, chars, 0, iVBytes.length);
+ // System.arraycopy(slotBytes, 0, chars, IV_SIZE, slotBytes.length);
+
+
+ char[] chars = encryptSlotAndPrependIV(slotBytes, slot.getSlotCryptIV());
+
+ url = buildRequest(true, sequencenumber, max);
+
+ timer.startTime();
+ con = url.openConnection();
+ http = (HttpURLConnection) con;
+
+ http.setRequestMethod("POST");
+ http.setFixedLengthStreamingMode(chars.length);
+ http.setDoOutput(true);
+ http.setConnectTimeout(TIMEOUT_MILLIS);
+ http.setReadTimeout(TIMEOUT_MILLIS);
+ http.connect();
+
+ OutputStream os = http.getOutputStream();
+ os.write(chars);
+ os.flush();
+
+ timer.endTime();
+
+
+ // System.out.println("Bytes Sent: " + chars.length);
+ } catch (ServerException e) {
+ timer.endTime();
+
+ throw e;
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+
+ throw new ServerException("putSlot failed", ServerException.TypeConnectTimeout);
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("putSlot failed");
+ }
+
+
+
+ try {
+ timer.startTime();
+ InputStream is = http.getInputStream();
+ DataInputStream dis = new DataInputStream(is);
+ char[] resptype = new char[7];
+ dis.readFully(resptype);
+ timer.endTime();
+
+ if (Arrays.equals(resptype, "getslot".getBytes())) {
+ return processSlots(dis);
+ } else if (Arrays.equals(resptype, "putslot".getBytes())) {
+ return NULL;
+ } else
+ throw new Error("Bad response to putslot");
+
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+ throw new ServerException("putSlot failed", ServerException.TypeInputTimeout);
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("putSlot failed");
+ }
+ }
+
+ /**
+ * Request the server to send all slots with the given
+ * sequencenumber or newer.
+ */
+ Slot[] getSlots(int64_t sequencenumber) throws ServerException {
+ URL url = NULL;
+ URLConnection con = NULL;
+ HttpURLConnection http = NULL;
+
+ try {
+ if (salt == NULL) {
+ if (!getSalt()) {
+ throw new ServerException("getSlots failed", ServerException.TypeSalt);
+ }
+ initCrypt();
+ }
+
+ url = buildRequest(false, sequencenumber, 0);
+ timer.startTime();
+ con = url.openConnection();
+ http = (HttpURLConnection) con;
+ http.setRequestMethod("POST");
+ http.setConnectTimeout(TIMEOUT_MILLIS);
+ http.setReadTimeout(TIMEOUT_MILLIS);
+
+
+
+ http.connect();
+ timer.endTime();
+
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+
+ throw new ServerException("getSlots failed", ServerException.TypeConnectTimeout);
+ } catch (ServerException e) {
+ timer.endTime();
+
+ throw e;
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("getSlots failed");
+ }
+
+ try {
+
+ timer.startTime();
+ InputStream is = http.getInputStream();
+ DataInputStream dis = new DataInputStream(is);
+ char[] resptype = new char[7];
+
+ dis.readFully(resptype);
+ timer.endTime();
+
+ if (!Arrays.equals(resptype, "getslot".getBytes()))
+ throw new Error("Bad Response: " + new String(resptype));
+
+ return processSlots(dis);
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+
+ throw new ServerException("getSlots failed", ServerException.TypeInputTimeout);
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("getSlots failed");
+ }
+ }
+
+ /**
+ * Method that actually handles building Slot objects from the
+ * server response. Shared by both putSlot and getSlots.
+ */
+ Slot[] processSlots(DataInputStream dis) throws Exception {
+ int numberofslots = dis.readInt();
+ int[] sizesofslots = new int[numberofslots];
+
+ Slot[] slots = new Slot[numberofslots];
+ for (int i = 0; i < numberofslots; i++)
+ sizesofslots[i] = dis.readInt();
+
+ for (int i = 0; i < numberofslots; i++) {
+
+ char[] rawData = new char[sizesofslots[i]];
+ dis.readFully(rawData);
+
+
+ // char[] data = new char[rawData.length - IV_SIZE];
+ // System.arraycopy(rawData, IV_SIZE, data, 0, data.length);
+
+
+ char[] data = stripIVAndDecryptSlot(rawData);
+
+ // data = decryptCipher.doFinal(data);
+
+ slots[i] = Slot.decode(table, data, mac);
+ }
+ dis.close();
+ return slots;
+ }
+
+ char[] sendLocalData(char[] sendData, int64_t localSequenceNumber, String host, int port) {
+
+ if (salt == NULL) {
+ return NULL;
+ }
+ try {
+ System.out.println("Passing Locally");
+
+ mac.update(sendData);
+ char[] genmac = mac.doFinal();
+ char[] totalData = new char[sendData.length + genmac.length];
+ System.arraycopy(sendData, 0, totalData, 0, sendData.length);
+ System.arraycopy(genmac, 0, totalData, sendData.length, genmac.length);
+
+ // Encrypt the data for sending
+ // char[] encryptedData = encryptCipher.doFinal(totalData);
+ // char[] encryptedData = encryptCipher.doFinal(totalData);
+ char[] iv = createIV(table.getMachineId(), table.getLocalSequenceNumber());
+ char[] encryptedData = encryptSlotAndPrependIV(totalData, iv);
+
+ // Open a TCP socket connection to a local device
+ Socket socket = new Socket(host, port);
+ socket.setReuseAddress(true);
+ DataOutputStream output = new DataOutputStream(socket.getOutputStream());
+ DataInputStream input = new DataInputStream(socket.getInputStream());
+
+
+ timer.startTime();
+ // Send data to output (length of data, the data)
+ output.writeInt(encryptedData.length);
+ output.write(encryptedData, 0, encryptedData.length);
+ output.flush();
+
+ int lengthOfReturnData = input.readInt();
+ char[] returnData = new char[lengthOfReturnData];
+ input.readFully(returnData);
+
+ timer.endTime();
+
+ // returnData = decryptCipher.doFinal(returnData);
+ returnData = stripIVAndDecryptSlot(returnData);
+ // returnData = decryptCipher.doFinal(returnData);
+
+ // We are done with this socket
+ socket.close();
+
+ mac.update(returnData, 0, returnData.length - HMAC_SIZE);
+ char[] realmac = mac.doFinal();
+ char[] recmac = new char[HMAC_SIZE];
+ System.arraycopy(returnData, returnData.length - realmac.length, recmac, 0, realmac.length);
+
+ if (!Arrays.equals(recmac, realmac))
+ throw new Error("Local Error: Invalid HMAC! Potential Attack!");
+
+ char[] returnData2 = new char[lengthOfReturnData - recmac.length];
+ System.arraycopy(returnData, 0, returnData2, 0, returnData2.length);
+
+ return returnData2;
+ } catch (Exception e) {
+ e.printStackTrace();
+ // throw new Error("Local comms failure...");
+
+ }
+
+ return NULL;
+ }
+
+ void localServerWorkerFunction() {
+
+ ServerSocket inputSocket = NULL;
+
+ try {
+ // Local server socket
+ inputSocket = new ServerSocket(listeningPort);
+ inputSocket.setReuseAddress(true);
+ inputSocket.setSoTimeout(TIMEOUT_MILLIS);
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Local server setup failure...");
+ }
+
+ while (!doEnd) {
+
+ try {
+ // Accept incoming socket
+ Socket socket = inputSocket.accept();
+
+ DataInputStream input = new DataInputStream(socket.getInputStream());
+ DataOutputStream output = new DataOutputStream(socket.getOutputStream());
+
+ // Get the encrypted data from the server
+ int dataSize = input.readInt();
+ char[] readData = new char[dataSize];
+ input.readFully(readData);
+
+ timer.endTime();
+
+ // Decrypt the data
+ // readData = decryptCipher.doFinal(readData);
+ readData = stripIVAndDecryptSlot(readData);
+
+ mac.update(readData, 0, readData.length - HMAC_SIZE);
+ char[] genmac = mac.doFinal();
+ char[] recmac = new char[HMAC_SIZE];
+ System.arraycopy(readData, readData.length - recmac.length, recmac, 0, recmac.length);
+
+ if (!Arrays.equals(recmac, genmac))
+ throw new Error("Local Error: Invalid HMAC! Potential Attack!");
+
+ char[] returnData = new char[readData.length - recmac.length];
+ System.arraycopy(readData, 0, returnData, 0, returnData.length);
+
+ // Process the data
+ // char[] sendData = table.acceptDataFromLocal(readData);
+ char[] sendData = table.acceptDataFromLocal(returnData);
+
+
+ mac.update(sendData);
+ char[] realmac = mac.doFinal();
+ char[] totalData = new char[sendData.length + realmac.length];
+ System.arraycopy(sendData, 0, totalData, 0, sendData.length);
+ System.arraycopy(realmac, 0, totalData, sendData.length, realmac.length);
+
+ // Encrypt the data for sending
+ // char[] encryptedData = encryptCipher.doFinal(totalData);
+ char[] iv = createIV(table.getMachineId(), table.getLocalSequenceNumber());
+ char[] encryptedData = encryptSlotAndPrependIV(totalData, iv);
+
+
+ timer.startTime();
+ // Send data to output (length of data, the data)
+ output.writeInt(encryptedData.length);
+ output.write(encryptedData, 0, encryptedData.length);
+ output.flush();
+
+ // close the socket
+ socket.close();
+ } catch (Exception e) {
+
+ }
+ }
+
+ if (inputSocket != NULL) {
+ try {
+ inputSocket.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Local server close failure...");
+ }
+ }
+ }
+
+ void close() {
+ doEnd = true;
+
+ if (localServerThread != NULL) {
+ try {
+ localServerThread.join();
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Local Server thread join issue...");
+ }
+ }
+
+ // System.out.println("Done Closing Cloud Comm");
+ }
+
+ protected void finalize() throws Throwable {
+ try {
+ close(); // close open files
+ } finally {
+ super.finalize();
+ }
+ }
+}
--- /dev/null
+
+
+
+
+/**
+ * This class provides a communication API to the webserver. It also
+ * validates the HMACs on the slots and handles encryption.
+ * @author Brian Demsky <bdemsky@uci.edu>
+ * @version 1.0
+ */
+
+
+class CloudComm {
+ private static final int SALT_SIZE = 8;
+ private static final int TIMEOUT_MILLIS = 5000; // 100
+ public static final int IV_SIZE = 16;
+
+ /** Sets the size for the HMAC. */
+ static final int HMAC_SIZE = 32;
+
+ private String baseurl;
+ private SecretKeySpec key;
+ private Mac mac;
+ private String password;
+ private SecureRandom random;
+ private char salt[];
+ private Table table;
+ private int listeningPort = -1;
+ private Thread localServerThread = NULL;
+ private bool doEnd = false;
+
+ private TimingSingleton timer = NULL;
+
+ /**
+ * Empty Constructor needed for child class.
+ */
+ CloudComm() {
+ timer = TimingSingleton.getInstance();
+ }
+
+ /**
+ * Constructor for actual use. Takes in the url and password.
+ */
+ CloudComm(Table _table, String _baseurl, String _password, int _listeningPort) {
+ timer = TimingSingleton.getInstance();
+ this.table = _table;
+ this.baseurl = _baseurl;
+ this.password = _password;
+ this.random = new SecureRandom();
+ this.listeningPort = _listeningPort;
+
+ if (this.listeningPort > 0) {
+ localServerThread = new Thread(new Runnable() {
+ public void run() {
+ localServerWorkerFunction();
+ }
+ });
+ localServerThread.start();
+ }
+ }
+
+ /**
+ * Generates Key from password.
+ */
+ private SecretKeySpec initKey() {
+ try {
+ PBEKeySpec keyspec = new PBEKeySpec(password.toCharArray(),
+ salt,
+ 65536,
+ 128);
+ SecretKey tmpkey = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA256").generateSecret(keyspec);
+ return new SecretKeySpec(tmpkey.getEncoded(), "AES");
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Failed generating key.");
+ }
+ }
+
+ /**
+ * Inits all the security stuff
+ */
+ public void initSecurity() throws ServerException {
+ // try to get the salt and if one does not exist set one
+ if (!getSalt()) {
+ //Set the salt
+ setSalt();
+ }
+
+ initCrypt();
+ }
+
+ /**
+ * Inits the HMAC generator.
+ */
+ private void initCrypt() {
+
+ if (password == NULL) {
+ return;
+ }
+
+ try {
+ key = initKey();
+ password = NULL; // drop password
+ mac = Mac.getInstance("HmacSHA256");
+ mac.init(key);
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Failed To Initialize Ciphers");
+ }
+ }
+
+ /*
+ * Builds the URL for the given request.
+ */
+ private URL buildRequest(bool isput, int64_t sequencenumber, int64_t maxentries) throws IOException {
+ String reqstring = isput ? "req=putslot" : "req=getslot";
+ String urlstr = baseurl + "?" + reqstring + "&seq=" + sequencenumber;
+ if (maxentries != 0)
+ urlstr += "&max=" + maxentries;
+ return new URL(urlstr);
+ }
+
+ private void setSalt() throws ServerException {
+
+ if (salt != NULL) {
+ // Salt already sent to server so dont set it again
+ return;
+ }
+
+ try {
+ char[] saltTmp = new char[SALT_SIZE];
+ random.nextBytes(saltTmp);
+
+ for (int i = 0; i < SALT_SIZE; i++) {
+ System.out.println((int)saltTmp[i] & 255);
+ }
+
+
+ URL url = new URL(baseurl + "?req=setsalt");
+
+ timer.startTime();
+ URLConnection con = url.openConnection();
+ HttpURLConnection http = (HttpURLConnection) con;
+
+ http.setRequestMethod("POST");
+ http.setFixedLengthStreamingMode(saltTmp.length);
+ http.setDoOutput(true);
+ http.setConnectTimeout(TIMEOUT_MILLIS);
+
+
+ http.connect();
+
+ OutputStream os = http.getOutputStream();
+ os.write(saltTmp);
+ os.flush();
+
+ int responsecode = http.getResponseCode();
+ if (responsecode != HttpURLConnection.HTTP_OK) {
+ // TODO: Remove this print
+ System.out.println(responsecode);
+ throw new Error("Invalid response");
+ }
+
+ timer.endTime();
+
+ salt = saltTmp;
+ } catch (Exception e) {
+ // e.printStackTrace();
+ timer.endTime();
+ throw new ServerException("Failed setting salt", ServerException.TypeConnectTimeout);
+ }
+ }
+
+ private bool getSalt() throws ServerException {
+ URL url = NULL;
+ URLConnection con = NULL;
+ HttpURLConnection http = NULL;
+
+ try {
+ url = new URL(baseurl + "?req=getsalt");
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("getSlot failed");
+ }
+ try {
+
+ timer.startTime();
+ con = url.openConnection();
+ http = (HttpURLConnection) con;
+ http.setRequestMethod("POST");
+ http.setConnectTimeout(TIMEOUT_MILLIS);
+ http.setReadTimeout(TIMEOUT_MILLIS);
+
+
+ http.connect();
+ timer.endTime();
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+ throw new ServerException("getSalt failed", ServerException.TypeConnectTimeout);
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("getSlot failed");
+ }
+
+ try {
+
+ timer.startTime();
+
+ int responsecode = http.getResponseCode();
+ if (responsecode != HttpURLConnection.HTTP_OK) {
+ // TODO: Remove this print
+ // System.out.println(responsecode);
+ throw new Error("Invalid response");
+ }
+
+ InputStream is = http.getInputStream();
+ if (is.available() > 0) {
+ DataInputStream dis = new DataInputStream(is);
+ int salt_length = dis.readInt();
+ char [] tmp = new char[salt_length];
+ dis.readFully(tmp);
+ salt = tmp;
+ timer.endTime();
+
+ return true;
+ } else {
+ timer.endTime();
+
+ return false;
+ }
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+
+ throw new ServerException("getSalt failed", ServerException.TypeInputTimeout);
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("getSlot failed");
+ }
+ }
+
+ private char[] createIV(int64_t machineId, int64_t localSequenceNumber) {
+ ByteBuffer buffer = ByteBuffer.allocate(IV_SIZE);
+ buffer.putLong(machineId);
+ int64_t localSequenceNumberShifted = localSequenceNumber << 16;
+ buffer.putLong(localSequenceNumberShifted);
+ return buffer.array();
+
+ }
+
+ private char[] encryptSlotAndPrependIV(char[] rawData, char[] ivBytes) {
+ try {
+ IvParameterSpec ivSpec = new IvParameterSpec(ivBytes);
+ Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
+ cipher.init(Cipher.ENCRYPT_MODE, key, ivSpec);
+
+ char[] encryptedBytes = cipher.doFinal(rawData);
+
+ char[] chars = new char[encryptedBytes.length + IV_SIZE];
+ System.arraycopy(ivBytes, 0, chars, 0, ivBytes.length);
+ System.arraycopy(encryptedBytes, 0, chars, IV_SIZE, encryptedBytes.length);
+
+ return chars;
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Failed To Encrypt");
+ }
+ }
+
+
+ private char[] stripIVAndDecryptSlot(char[] rawData) {
+ try {
+ char[] ivBytes = new char[IV_SIZE];
+ char[] encryptedBytes = new char[rawData.length - IV_SIZE];
+ System.arraycopy(rawData, 0, ivBytes, 0, IV_SIZE);
+ System.arraycopy(rawData, IV_SIZE, encryptedBytes, 0 , encryptedBytes.length);
+
+ IvParameterSpec ivSpec = new IvParameterSpec(ivBytes);
+
+ Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
+ cipher.init(Cipher.DECRYPT_MODE, key, ivSpec);
+ return cipher.doFinal(encryptedBytes);
+
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Failed To Decrypt");
+ }
+ }
+
+
+ /*
+ * API for putting a slot into the queue. Returns NULL on success.
+ * On failure, the server will send slots with newer sequence
+ * numbers.
+ */
+ public Slot[] putSlot(Slot slot, int max) throws ServerException {
+ URL url = NULL;
+ URLConnection con = NULL;
+ HttpURLConnection http = NULL;
+
+ try {
+ if (salt == NULL) {
+ if (!getSalt()) {
+ throw new ServerException("putSlot failed", ServerException.TypeSalt);
+ }
+ initCrypt();
+ }
+
+ int64_t sequencenumber = slot.getSequenceNumber();
+ char[] slotBytes = slot.encode(mac);
+ // slotBytes = encryptCipher.doFinal(slotBytes);
+
+ // char[] iVBytes = slot.getSlotCryptIV();
+
+ // char[] chars = new char[slotBytes.length + IV_SIZE];
+ // System.arraycopy(iVBytes, 0, chars, 0, iVBytes.length);
+ // System.arraycopy(slotBytes, 0, chars, IV_SIZE, slotBytes.length);
+
+
+ char[] chars = encryptSlotAndPrependIV(slotBytes, slot.getSlotCryptIV());
+
+ url = buildRequest(true, sequencenumber, max);
+
+ timer.startTime();
+ con = url.openConnection();
+ http = (HttpURLConnection) con;
+
+ http.setRequestMethod("POST");
+ http.setFixedLengthStreamingMode(chars.length);
+ http.setDoOutput(true);
+ http.setConnectTimeout(TIMEOUT_MILLIS);
+ http.setReadTimeout(TIMEOUT_MILLIS);
+ http.connect();
+
+ OutputStream os = http.getOutputStream();
+ os.write(chars);
+ os.flush();
+
+ timer.endTime();
+
+
+ // System.out.println("Bytes Sent: " + chars.length);
+ } catch (ServerException e) {
+ timer.endTime();
+
+ throw e;
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+
+ throw new ServerException("putSlot failed", ServerException.TypeConnectTimeout);
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("putSlot failed");
+ }
+
+
+
+ try {
+ timer.startTime();
+ InputStream is = http.getInputStream();
+ DataInputStream dis = new DataInputStream(is);
+ char[] resptype = new char[7];
+ dis.readFully(resptype);
+ timer.endTime();
+
+ if (Arrays.equals(resptype, "getslot".getBytes())) {
+ return processSlots(dis);
+ } else if (Arrays.equals(resptype, "putslot".getBytes())) {
+ return NULL;
+ } else
+ throw new Error("Bad response to putslot");
+
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+ throw new ServerException("putSlot failed", ServerException.TypeInputTimeout);
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("putSlot failed");
+ }
+ }
+
+ /**
+ * Request the server to send all slots with the given
+ * sequencenumber or newer.
+ */
+ public Slot[] getSlots(int64_t sequencenumber) throws ServerException {
+ URL url = NULL;
+ URLConnection con = NULL;
+ HttpURLConnection http = NULL;
+
+ try {
+ if (salt == NULL) {
+ if (!getSalt()) {
+ throw new ServerException("getSlots failed", ServerException.TypeSalt);
+ }
+ initCrypt();
+ }
+
+ url = buildRequest(false, sequencenumber, 0);
+ timer.startTime();
+ con = url.openConnection();
+ http = (HttpURLConnection) con;
+ http.setRequestMethod("POST");
+ http.setConnectTimeout(TIMEOUT_MILLIS);
+ http.setReadTimeout(TIMEOUT_MILLIS);
+
+
+
+ http.connect();
+ timer.endTime();
+
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+
+ throw new ServerException("getSlots failed", ServerException.TypeConnectTimeout);
+ } catch (ServerException e) {
+ timer.endTime();
+
+ throw e;
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("getSlots failed");
+ }
+
+ try {
+
+ timer.startTime();
+ InputStream is = http.getInputStream();
+ DataInputStream dis = new DataInputStream(is);
+ char[] resptype = new char[7];
+
+ dis.readFully(resptype);
+ timer.endTime();
+
+ if (!Arrays.equals(resptype, "getslot".getBytes()))
+ throw new Error("Bad Response: " + new String(resptype));
+
+ return processSlots(dis);
+ } catch (SocketTimeoutException e) {
+ timer.endTime();
+
+ throw new ServerException("getSlots failed", ServerException.TypeInputTimeout);
+ } catch (Exception e) {
+ // e.printStackTrace();
+ throw new Error("getSlots failed");
+ }
+ }
+
+ /**
+ * Method that actually handles building Slot objects from the
+ * server response. Shared by both putSlot and getSlots.
+ */
+ private Slot[] processSlots(DataInputStream dis) throws Exception {
+ int numberofslots = dis.readInt();
+ int[] sizesofslots = new int[numberofslots];
+
+ Slot[] slots = new Slot[numberofslots];
+ for (int i = 0; i < numberofslots; i++)
+ sizesofslots[i] = dis.readInt();
+
+ for (int i = 0; i < numberofslots; i++) {
+
+ char[] rawData = new char[sizesofslots[i]];
+ dis.readFully(rawData);
+
+
+ // char[] data = new char[rawData.length - IV_SIZE];
+ // System.arraycopy(rawData, IV_SIZE, data, 0, data.length);
+
+
+ char[] data = stripIVAndDecryptSlot(rawData);
+
+ // data = decryptCipher.doFinal(data);
+
+ slots[i] = Slot.decode(table, data, mac);
+ }
+ dis.close();
+ return slots;
+ }
+
+ public char[] sendLocalData(char[] sendData, int64_t localSequenceNumber, String host, int port) {
+
+ if (salt == NULL) {
+ return NULL;
+ }
+ try {
+ System.out.println("Passing Locally");
+
+ mac.update(sendData);
+ char[] genmac = mac.doFinal();
+ char[] totalData = new char[sendData.length + genmac.length];
+ System.arraycopy(sendData, 0, totalData, 0, sendData.length);
+ System.arraycopy(genmac, 0, totalData, sendData.length, genmac.length);
+
+ // Encrypt the data for sending
+ // char[] encryptedData = encryptCipher.doFinal(totalData);
+ // char[] encryptedData = encryptCipher.doFinal(totalData);
+ char[] iv = createIV(table.getMachineId(), table.getLocalSequenceNumber());
+ char[] encryptedData = encryptSlotAndPrependIV(totalData, iv);
+
+ // Open a TCP socket connection to a local device
+ Socket socket = new Socket(host, port);
+ socket.setReuseAddress(true);
+ DataOutputStream output = new DataOutputStream(socket.getOutputStream());
+ DataInputStream input = new DataInputStream(socket.getInputStream());
+
+
+ timer.startTime();
+ // Send data to output (length of data, the data)
+ output.writeInt(encryptedData.length);
+ output.write(encryptedData, 0, encryptedData.length);
+ output.flush();
+
+ int lengthOfReturnData = input.readInt();
+ char[] returnData = new char[lengthOfReturnData];
+ input.readFully(returnData);
+
+ timer.endTime();
+
+ // returnData = decryptCipher.doFinal(returnData);
+ returnData = stripIVAndDecryptSlot(returnData);
+ // returnData = decryptCipher.doFinal(returnData);
+
+ // We are done with this socket
+ socket.close();
+
+ mac.update(returnData, 0, returnData.length - HMAC_SIZE);
+ char[] realmac = mac.doFinal();
+ char[] recmac = new char[HMAC_SIZE];
+ System.arraycopy(returnData, returnData.length - realmac.length, recmac, 0, realmac.length);
+
+ if (!Arrays.equals(recmac, realmac))
+ throw new Error("Local Error: Invalid HMAC! Potential Attack!");
+
+ char[] returnData2 = new char[lengthOfReturnData - recmac.length];
+ System.arraycopy(returnData, 0, returnData2, 0, returnData2.length);
+
+ return returnData2;
+ } catch (Exception e) {
+ e.printStackTrace();
+ // throw new Error("Local comms failure...");
+
+ }
+
+ return NULL;
+ }
+
+ private void localServerWorkerFunction() {
+
+ ServerSocket inputSocket = NULL;
+
+ try {
+ // Local server socket
+ inputSocket = new ServerSocket(listeningPort);
+ inputSocket.setReuseAddress(true);
+ inputSocket.setSoTimeout(TIMEOUT_MILLIS);
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Local server setup failure...");
+ }
+
+ while (!doEnd) {
+
+ try {
+ // Accept incoming socket
+ Socket socket = inputSocket.accept();
+
+ DataInputStream input = new DataInputStream(socket.getInputStream());
+ DataOutputStream output = new DataOutputStream(socket.getOutputStream());
+
+ // Get the encrypted data from the server
+ int dataSize = input.readInt();
+ char[] readData = new char[dataSize];
+ input.readFully(readData);
+
+ timer.endTime();
+
+ // Decrypt the data
+ // readData = decryptCipher.doFinal(readData);
+ readData = stripIVAndDecryptSlot(readData);
+
+ mac.update(readData, 0, readData.length - HMAC_SIZE);
+ char[] genmac = mac.doFinal();
+ char[] recmac = new char[HMAC_SIZE];
+ System.arraycopy(readData, readData.length - recmac.length, recmac, 0, recmac.length);
+
+ if (!Arrays.equals(recmac, genmac))
+ throw new Error("Local Error: Invalid HMAC! Potential Attack!");
+
+ char[] returnData = new char[readData.length - recmac.length];
+ System.arraycopy(readData, 0, returnData, 0, returnData.length);
+
+ // Process the data
+ // char[] sendData = table.acceptDataFromLocal(readData);
+ char[] sendData = table.acceptDataFromLocal(returnData);
+
+
+ mac.update(sendData);
+ char[] realmac = mac.doFinal();
+ char[] totalData = new char[sendData.length + realmac.length];
+ System.arraycopy(sendData, 0, totalData, 0, sendData.length);
+ System.arraycopy(realmac, 0, totalData, sendData.length, realmac.length);
+
+ // Encrypt the data for sending
+ // char[] encryptedData = encryptCipher.doFinal(totalData);
+ char[] iv = createIV(table.getMachineId(), table.getLocalSequenceNumber());
+ char[] encryptedData = encryptSlotAndPrependIV(totalData, iv);
+
+
+ timer.startTime();
+ // Send data to output (length of data, the data)
+ output.writeInt(encryptedData.length);
+ output.write(encryptedData, 0, encryptedData.length);
+ output.flush();
+
+ // close the socket
+ socket.close();
+ } catch (Exception e) {
+
+ }
+ }
+
+ if (inputSocket != NULL) {
+ try {
+ inputSocket.close();
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Local server close failure...");
+ }
+ }
+ }
+
+ public void close() {
+ doEnd = true;
+
+ if (localServerThread != NULL) {
+ try {
+ localServerThread.join();
+ } catch (Exception e) {
+ e.printStackTrace();
+ throw new Error("Local Server thread join issue...");
+ }
+ }
+
+ // System.out.println("Done Closing Cloud Comm");
+ }
+
+ protected void finalize() throws Throwable {
+ try {
+ close(); // close open files
+ } finally {
+ super.finalize();
+ }
+ }
+}
--- /dev/null
+
+
+class Commit {
+
+ Map<Integer, CommitPart> parts = NULL;
+ Set<Integer> missingParts = NULL;
+ bool isComplete = false;
+ bool hasLastPart = false;
+ Set<KeyValue> keyValueUpdateSet = NULL;
+ bool isDead = false;
+ int64_t sequenceNumber = -1;
+ int64_t machineId = -1;
+ int64_t transactionSequenceNumber = -1;
+
+ Set<IoTString> liveKeys = NULL;
+
+ Commit() {
+ parts = new HashMap<Integer, CommitPart>();
+ keyValueUpdateSet = new HashSet<KeyValue>();
+
+ liveKeys = new HashSet<IoTString>();
+ }
+
+ Commit(int64_t _sequenceNumber, int64_t _machineId, int64_t _transactionSequenceNumber) {
+ parts = new HashMap<Integer, CommitPart>();
+ keyValueUpdateSet = new HashSet<KeyValue>();
+
+ liveKeys = new HashSet<IoTString>();
+
+ sequenceNumber = _sequenceNumber;
+ machineId = _machineId;
+ transactionSequenceNumber = _transactionSequenceNumber;
+ isComplete = true;
+ }
+
+
+ void addPartDecode(CommitPart newPart) {
+
+ if (isDead) {
+ // If dead then just kill this part and move on
+ newPart.setDead();
+ return;
+ }
+
+ CommitPart previoslySeenPart = parts.put(newPart.getPartNumber(), newPart);
+
+ if (previoslySeenPart != NULL) {
+ // Set dead the old one since the new one is a rescued version of this part
+ previoslySeenPart.setDead();
+ } else if (newPart.isLastPart()) {
+ missingParts = new HashSet<Integer>();
+ hasLastPart = true;
+
+ for (int i = 0; i < newPart.getPartNumber(); i++) {
+ if (parts.get(i) == NULL) {
+ missingParts.add(i);
+ }
+ }
+ }
+
+ if (!isComplete && hasLastPart) {
+
+ // We have seen this part so remove it from the set of missing parts
+ missingParts.remove(newPart.getPartNumber());
+
+ // Check if all the parts have been seen
+ if (missingParts.size() == 0) {
+
+ // We have all the parts
+ isComplete = true;
+
+ // Decode all the parts and create the key value guard and update sets
+ decodeCommitData();
+
+ // Get the sequence number and arbitrator of this transaction
+ sequenceNumber = parts.get(0).getSequenceNumber();
+ machineId = parts.get(0).getMachineId();
+ transactionSequenceNumber = parts.get(0).getTransactionSequenceNumber();
+ }
+ }
+ }
+
+ int64_t getSequenceNumber() {
+ return sequenceNumber;
+ }
+
+ int64_t getTransactionSequenceNumber() {
+ return transactionSequenceNumber;
+ }
+
+ Map<Integer, CommitPart> getParts() {
+ return parts;
+ }
+
+ void addKV(KeyValue kv) {
+ keyValueUpdateSet.add(kv);
+ liveKeys.add(kv.getKey());
+ }
+
+ void invalidateKey(IoTString key) {
+ liveKeys.remove(key);
+
+ if (liveKeys.size() == 0) {
+ setDead();
+ }
+ }
+
+ Set<KeyValue> getKeyValueUpdateSet() {
+ return keyValueUpdateSet;
+ }
+
+ int getNumberOfParts() {
+ return parts.size();
+ }
+
+ int64_t getMachineId() {
+ return machineId;
+ }
+
+ bool isComplete() {
+ return isComplete;
+ }
+
+ bool isLive() {
+ return !isDead;
+ }
+
+ void setDead() {
+ if (isDead) {
+ // Already dead
+ return;
+ }
+
+ // Set dead
+ isDead = true;
+
+ // Make all the parts of this transaction dead
+ for (Integer partNumber : parts.keySet()) {
+ CommitPart part = parts.get(partNumber);
+ part.setDead();
+ }
+ }
+
+ CommitPart getPart(int index) {
+ return parts.get(index);
+ }
+
+ void createCommitParts() {
+
+ parts.clear();
+
+ // Convert to chars
+ char[] charData = convertDataToBytes();
+
+
+ int commitPartCount = 0;
+ int currentPosition = 0;
+ int remaining = charData.length;
+
+ while (remaining > 0) {
+
+ Boolean isLastPart = false;
+ // determine how much to copy
+ int copySize = CommitPart.MAX_NON_HEADER_SIZE;
+ if (remaining <= CommitPart.MAX_NON_HEADER_SIZE) {
+ copySize = remaining;
+ isLastPart = true; // last bit of data so last part
+ }
+
+ // Copy to a smaller version
+ char[] partData = new char[copySize];
+ System.arraycopy(charData, currentPosition, partData, 0, copySize);
+
+ CommitPart part = new CommitPart(NULL, machineId, sequenceNumber, transactionSequenceNumber, commitPartCount, partData, isLastPart);
+ parts.put(part.getPartNumber(), part);
+
+ // Update position, count and remaining
+ currentPosition += copySize;
+ commitPartCount++;
+ remaining -= copySize;
+ }
+ }
+
+ void decodeCommitData() {
+
+ // Calculate the size of the data section
+ int dataSize = 0;
+ for (int i = 0; i < parts.keySet().size(); i++) {
+ CommitPart tp = parts.get(i);
+ dataSize += tp.getDataSize();
+ }
+
+ char[] combinedData = new char[dataSize];
+ int currentPosition = 0;
+
+ // Stitch all the data sections together
+ for (int i = 0; i < parts.keySet().size(); i++) {
+ CommitPart tp = parts.get(i);
+ System.arraycopy(tp.getData(), 0, combinedData, currentPosition, tp.getDataSize());
+ currentPosition += tp.getDataSize();
+ }
+
+ // Decoder Object
+ ByteBuffer bbDecode = ByteBuffer.wrap(combinedData);
+
+ // Decode how many key value pairs need to be decoded
+ int numberOfKVUpdates = bbDecode.getInt();
+
+ // Decode all the updates key values
+ for (int i = 0; i < numberOfKVUpdates; i++) {
+ KeyValue kv = (KeyValue)KeyValue.decode(bbDecode);
+ keyValueUpdateSet.add(kv);
+ liveKeys.add(kv.getKey());
+ }
+ }
+
+ char[] convertDataToBytes() {
+
+ // Calculate the size of the data
+ int sizeOfData = sizeof(int32_t); // Number of Update KV's
+ for (KeyValue kv : keyValueUpdateSet) {
+ sizeOfData += kv.getSize();
+ }
+
+ // Data handlers and storage
+ char[] dataArray = new char[sizeOfData];
+ ByteBuffer bbEncode = ByteBuffer.wrap(dataArray);
+
+ // Encode the size of the updates and guard sets
+ bbEncode.putInt(keyValueUpdateSet.size());
+
+ // Encode all the updates
+ for (KeyValue kv : keyValueUpdateSet) {
+ kv.encode(bbEncode);
+ }
+
+ return bbEncode.array();
+ }
+
+ void setKVsMap(Map<IoTString, KeyValue> newKVs) {
+ keyValueUpdateSet.clear();
+ liveKeys.clear();
+
+ keyValueUpdateSet.addAll(newKVs.values());
+ liveKeys.addAll(newKVs.keySet());
+
+ }
+
+
+ static Commit merge(Commit newer, Commit older, int64_t newSequenceNumber) {
+
+ if (older == NULL) {
+ return newer;
+ } else if (newer == NULL) {
+ return older;
+ }
+
+ Map<IoTString, KeyValue> kvSet = new HashMap<IoTString, KeyValue>();
+ for (KeyValue kv : older.getKeyValueUpdateSet()) {
+ kvSet.put(kv.getKey(), kv);
+ }
+
+ for (KeyValue kv : newer.getKeyValueUpdateSet()) {
+ kvSet.put(kv.getKey(), kv);
+ }
+
+ int64_t transactionSequenceNumber = newer.getTransactionSequenceNumber();
+
+ if (transactionSequenceNumber == -1) {
+ transactionSequenceNumber = older.getTransactionSequenceNumber();
+ }
+
+ Commit newCommit = new Commit(newSequenceNumber, newer.getMachineId(), transactionSequenceNumber);
+
+ newCommit.setKVsMap(kvSet);
+
+ return newCommit;
+ }
+}
--- /dev/null
+
+
+class Commit {
+
+ private Map<Integer, CommitPart> parts = NULL;
+ private Set<Integer> missingParts = NULL;
+ private bool isComplete = false;
+ private bool hasLastPart = false;
+ private Set<KeyValue> keyValueUpdateSet = NULL;
+ private bool isDead = false;
+ private int64_t sequenceNumber = -1;
+ private int64_t machineId = -1;
+ private int64_t transactionSequenceNumber = -1;
+
+ private Set<IoTString> liveKeys = NULL;
+
+ public Commit() {
+ parts = new HashMap<Integer, CommitPart>();
+ keyValueUpdateSet = new HashSet<KeyValue>();
+
+ liveKeys = new HashSet<IoTString>();
+ }
+
+ public Commit(int64_t _sequenceNumber, int64_t _machineId, int64_t _transactionSequenceNumber) {
+ parts = new HashMap<Integer, CommitPart>();
+ keyValueUpdateSet = new HashSet<KeyValue>();
+
+ liveKeys = new HashSet<IoTString>();
+
+ sequenceNumber = _sequenceNumber;
+ machineId = _machineId;
+ transactionSequenceNumber = _transactionSequenceNumber;
+ isComplete = true;
+ }
+
+
+ public void addPartDecode(CommitPart newPart) {
+
+ if (isDead) {
+ // If dead then just kill this part and move on
+ newPart.setDead();
+ return;
+ }
+
+ CommitPart previoslySeenPart = parts.put(newPart.getPartNumber(), newPart);
+
+ if (previoslySeenPart != NULL) {
+ // Set dead the old one since the new one is a rescued version of this part
+ previoslySeenPart.setDead();
+ } else if (newPart.isLastPart()) {
+ missingParts = new HashSet<Integer>();
+ hasLastPart = true;
+
+ for (int i = 0; i < newPart.getPartNumber(); i++) {
+ if (parts.get(i) == NULL) {
+ missingParts.add(i);
+ }
+ }
+ }
+
+ if (!isComplete && hasLastPart) {
+
+ // We have seen this part so remove it from the set of missing parts
+ missingParts.remove(newPart.getPartNumber());
+
+ // Check if all the parts have been seen
+ if (missingParts.size() == 0) {
+
+ // We have all the parts
+ isComplete = true;
+
+ // Decode all the parts and create the key value guard and update sets
+ decodeCommitData();
+
+ // Get the sequence number and arbitrator of this transaction
+ sequenceNumber = parts.get(0).getSequenceNumber();
+ machineId = parts.get(0).getMachineId();
+ transactionSequenceNumber = parts.get(0).getTransactionSequenceNumber();
+ }
+ }
+ }
+
+ public int64_t getSequenceNumber() {
+ return sequenceNumber;
+ }
+
+ public int64_t getTransactionSequenceNumber() {
+ return transactionSequenceNumber;
+ }
+
+ public Map<Integer, CommitPart> getParts() {
+ return parts;
+ }
+
+ public void addKV(KeyValue kv) {
+ keyValueUpdateSet.add(kv);
+ liveKeys.add(kv.getKey());
+ }
+
+ public void invalidateKey(IoTString key) {
+ liveKeys.remove(key);
+
+ if (liveKeys.size() == 0) {
+ setDead();
+ }
+ }
+
+ public Set<KeyValue> getKeyValueUpdateSet() {
+ return keyValueUpdateSet;
+ }
+
+ public int getNumberOfParts() {
+ return parts.size();
+ }
+
+ public int64_t getMachineId() {
+ return machineId;
+ }
+
+ public bool isComplete() {
+ return isComplete;
+ }
+
+ public bool isLive() {
+ return !isDead;
+ }
+
+ public void setDead() {
+ if (isDead) {
+ // Already dead
+ return;
+ }
+
+ // Set dead
+ isDead = true;
+
+ // Make all the parts of this transaction dead
+ for (Integer partNumber : parts.keySet()) {
+ CommitPart part = parts.get(partNumber);
+ part.setDead();
+ }
+ }
+
+ public CommitPart getPart(int index) {
+ return parts.get(index);
+ }
+
+ public void createCommitParts() {
+
+ parts.clear();
+
+ // Convert to chars
+ char[] charData = convertDataToBytes();
+
+
+ int commitPartCount = 0;
+ int currentPosition = 0;
+ int remaining = charData.length;
+
+ while (remaining > 0) {
+
+ Boolean isLastPart = false;
+ // determine how much to copy
+ int copySize = CommitPart.MAX_NON_HEADER_SIZE;
+ if (remaining <= CommitPart.MAX_NON_HEADER_SIZE) {
+ copySize = remaining;
+ isLastPart = true; // last bit of data so last part
+ }
+
+ // Copy to a smaller version
+ char[] partData = new char[copySize];
+ System.arraycopy(charData, currentPosition, partData, 0, copySize);
+
+ CommitPart part = new CommitPart(NULL, machineId, sequenceNumber, transactionSequenceNumber, commitPartCount, partData, isLastPart);
+ parts.put(part.getPartNumber(), part);
+
+ // Update position, count and remaining
+ currentPosition += copySize;
+ commitPartCount++;
+ remaining -= copySize;
+ }
+ }
+
+ private void decodeCommitData() {
+
+ // Calculate the size of the data section
+ int dataSize = 0;
+ for (int i = 0; i < parts.keySet().size(); i++) {
+ CommitPart tp = parts.get(i);
+ dataSize += tp.getDataSize();
+ }
+
+ char[] combinedData = new char[dataSize];
+ int currentPosition = 0;
+
+ // Stitch all the data sections together
+ for (int i = 0; i < parts.keySet().size(); i++) {
+ CommitPart tp = parts.get(i);
+ System.arraycopy(tp.getData(), 0, combinedData, currentPosition, tp.getDataSize());
+ currentPosition += tp.getDataSize();
+ }
+
+ // Decoder Object
+ ByteBuffer bbDecode = ByteBuffer.wrap(combinedData);
+
+ // Decode how many key value pairs need to be decoded
+ int numberOfKVUpdates = bbDecode.getInt();
+
+ // Decode all the updates key values
+ for (int i = 0; i < numberOfKVUpdates; i++) {
+ KeyValue kv = (KeyValue)KeyValue.decode(bbDecode);
+ keyValueUpdateSet.add(kv);
+ liveKeys.add(kv.getKey());
+ }
+ }
+
+ private char[] convertDataToBytes() {
+
+ // Calculate the size of the data
+ int sizeOfData = sizeof(int32_t); // Number of Update KV's
+ for (KeyValue kv : keyValueUpdateSet) {
+ sizeOfData += kv.getSize();
+ }
+
+ // Data handlers and storage
+ char[] dataArray = new char[sizeOfData];
+ ByteBuffer bbEncode = ByteBuffer.wrap(dataArray);
+
+ // Encode the size of the updates and guard sets
+ bbEncode.putInt(keyValueUpdateSet.size());
+
+ // Encode all the updates
+ for (KeyValue kv : keyValueUpdateSet) {
+ kv.encode(bbEncode);
+ }
+
+ return bbEncode.array();
+ }
+
+ private void setKVsMap(Map<IoTString, KeyValue> newKVs) {
+ keyValueUpdateSet.clear();
+ liveKeys.clear();
+
+ keyValueUpdateSet.addAll(newKVs.values());
+ liveKeys.addAll(newKVs.keySet());
+
+ }
+
+
+ public static Commit merge(Commit newer, Commit older, int64_t newSequenceNumber) {
+
+ if (older == NULL) {
+ return newer;
+ } else if (newer == NULL) {
+ return older;
+ }
+
+ Map<IoTString, KeyValue> kvSet = new HashMap<IoTString, KeyValue>();
+ for (KeyValue kv : older.getKeyValueUpdateSet()) {
+ kvSet.put(kv.getKey(), kv);
+ }
+
+ for (KeyValue kv : newer.getKeyValueUpdateSet()) {
+ kvSet.put(kv.getKey(), kv);
+ }
+
+ int64_t transactionSequenceNumber = newer.getTransactionSequenceNumber();
+
+ if (transactionSequenceNumber == -1) {
+ transactionSequenceNumber = older.getTransactionSequenceNumber();
+ }
+
+ Commit newCommit = new Commit(newSequenceNumber, newer.getMachineId(), transactionSequenceNumber);
+
+ newCommit.setKVsMap(kvSet);
+
+ return newCommit;
+ }
+}
--- /dev/null
+
+
+
+
+class CommitPart extends Entry{
+
+ // Max size of the part excluding the fixed size header
+ static final int MAX_NON_HEADER_SIZE = 512;
+
+
+ // Sequence number of the transaction this commit is for, -1 if not a cloud transaction
+ int64_t machineId = -1; // Machine Id of the device that made the commit
+ int64_t sequenceNumber = -1; // commit sequence number for this arbitrator
+ int64_t transactionSequenceNumber = -1;
+ int partNumber = -1; // Parts position in the
+ Boolean isLastPart = false;
+ char[] data = NULL;
+
+ Pair<Long, Integer> partId = NULL;
+ Pair<Long, Long> commitId = NULL;
+
+
+ CommitPart(Slot s, int64_t _machineId, int64_t _sequenceNumber, int64_t _transactionSequenceNumber, int _partNumber, char[] _data, Boolean _isLastPart) {
+ super(s);
+ machineId = _machineId;
+ sequenceNumber = _sequenceNumber;
+ transactionSequenceNumber = _transactionSequenceNumber;
+ partNumber = _partNumber;
+ isLastPart = _isLastPart;
+ data = _data;
+
+ partId = new Pair<Long, Integer>(sequenceNumber, partNumber);
+ commitId = new Pair<Long, Long>(machineId, sequenceNumber);
+ }
+
+ int getSize() {
+ if (data == NULL) {
+ return (3 * sizeof(int64_t)) + (2 * sizeof(int32_t)) + (2 * sizeof(char));
+ }
+ return (3 * sizeof(int64_t)) + (2 * sizeof(int32_t)) + (2 * sizeof(char)) + data.length;
+ }
+
+ void setSlot(Slot s) {
+ parentslot = s;
+ }
+
+ int getPartNumber() {
+ return partNumber;
+ }
+
+ int getDataSize() {
+ return data.length;
+ }
+
+ char[] getData() {
+ return data;
+ }
+
+ Pair<Long, Integer> getPartId() {
+ return partId;
+ }
+
+ Pair<Long, Long> getCommitId() {
+ return commitId;
+ }
+
+ Boolean isLastPart() {
+ return isLastPart;
+ }
+
+ int64_t getMachineId() {
+ return machineId;
+ }
+
+ int64_t getTransactionSequenceNumber() {
+ return transactionSequenceNumber;
+ }
+
+ int64_t getSequenceNumber() {
+ return sequenceNumber;
+ }
+
+ static Entry decode(Slot s, ByteBuffer bb) {
+ int64_t machineId = bb.getLong();
+ int64_t sequenceNumber = bb.getLong();
+ int64_t transactionSequenceNumber = bb.getLong();
+ int partNumber = bb.getInt();
+ int dataSize = bb.getInt();
+ Boolean isLastPart = bb.get() == 1;
+
+ // Get the data
+ char[] data = new char[dataSize];
+ bb.get(data);
+
+ return new CommitPart(s, machineId, sequenceNumber, transactionSequenceNumber, partNumber, data, isLastPart);
+ }
+
+ void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeCommitPart);
+ bb.putLong(machineId);
+ bb.putLong(sequenceNumber);
+ bb.putLong(transactionSequenceNumber);
+ bb.putInt(partNumber);
+ bb.putInt(data.length);
+
+ if (isLastPart) {
+ bb.put((char)1);
+ } else {
+ bb.put((char)0);
+ }
+
+ bb.put(data);
+ }
+
+ char getType() {
+ return Entry.TypeCommitPart;
+ }
+
+ Entry getCopy(Slot s) {
+ return new CommitPart(s, machineId, sequenceNumber, transactionSequenceNumber, partNumber, data, isLastPart);
+ }
+}
--- /dev/null
+
+
+
+
+class CommitPart extends Entry{
+
+ // Max size of the part excluding the fixed size header
+ public static final int MAX_NON_HEADER_SIZE = 512;
+
+
+ // Sequence number of the transaction this commit is for, -1 if not a cloud transaction
+ private int64_t machineId = -1; // Machine Id of the device that made the commit
+ private int64_t sequenceNumber = -1; // commit sequence number for this arbitrator
+ private int64_t transactionSequenceNumber = -1;
+ private int partNumber = -1; // Parts position in the
+ private Boolean isLastPart = false;
+ private char[] data = NULL;
+
+ private Pair<Long, Integer> partId = NULL;
+ private Pair<Long, Long> commitId = NULL;
+
+
+ public CommitPart(Slot s, int64_t _machineId, int64_t _sequenceNumber, int64_t _transactionSequenceNumber, int _partNumber, char[] _data, Boolean _isLastPart) {
+ super(s);
+ machineId = _machineId;
+ sequenceNumber = _sequenceNumber;
+ transactionSequenceNumber = _transactionSequenceNumber;
+ partNumber = _partNumber;
+ isLastPart = _isLastPart;
+ data = _data;
+
+ partId = new Pair<Long, Integer>(sequenceNumber, partNumber);
+ commitId = new Pair<Long, Long>(machineId, sequenceNumber);
+ }
+
+ public int getSize() {
+ if (data == NULL) {
+ return (3 * sizeof(int64_t)) + (2 * sizeof(int32_t)) + (2 * sizeof(char));
+ }
+ return (3 * sizeof(int64_t)) + (2 * sizeof(int32_t)) + (2 * sizeof(char)) + data.length;
+ }
+
+ public void setSlot(Slot s) {
+ parentslot = s;
+ }
+
+ public int getPartNumber() {
+ return partNumber;
+ }
+
+ public int getDataSize() {
+ return data.length;
+ }
+
+ public char[] getData() {
+ return data;
+ }
+
+ public Pair<Long, Integer> getPartId() {
+ return partId;
+ }
+
+ public Pair<Long, Long> getCommitId() {
+ return commitId;
+ }
+
+ public Boolean isLastPart() {
+ return isLastPart;
+ }
+
+ public int64_t getMachineId() {
+ return machineId;
+ }
+
+ public int64_t getTransactionSequenceNumber() {
+ return transactionSequenceNumber;
+ }
+
+ public int64_t getSequenceNumber() {
+ return sequenceNumber;
+ }
+
+ static Entry decode(Slot s, ByteBuffer bb) {
+ int64_t machineId = bb.getLong();
+ int64_t sequenceNumber = bb.getLong();
+ int64_t transactionSequenceNumber = bb.getLong();
+ int partNumber = bb.getInt();
+ int dataSize = bb.getInt();
+ Boolean isLastPart = bb.get() == 1;
+
+ // Get the data
+ char[] data = new char[dataSize];
+ bb.get(data);
+
+ return new CommitPart(s, machineId, sequenceNumber, transactionSequenceNumber, partNumber, data, isLastPart);
+ }
+
+ public void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeCommitPart);
+ bb.putLong(machineId);
+ bb.putLong(sequenceNumber);
+ bb.putLong(transactionSequenceNumber);
+ bb.putInt(partNumber);
+ bb.putInt(data.length);
+
+ if (isLastPart) {
+ bb.put((char)1);
+ } else {
+ bb.put((char)0);
+ }
+
+ bb.put(data);
+ }
+
+ public char getType() {
+ return Entry.TypeCommitPart;
+ }
+
+ public Entry getCopy(Slot s) {
+ return new CommitPart(s, machineId, sequenceNumber, transactionSequenceNumber, partNumber, data, isLastPart);
+ }
+}
--- /dev/null
+#include "Entry.h"
+
+/**
+ * Generic class that wraps all the different types of information
+ * that can be stored in a Slot.
+ * @author Brian Demsky <bdemsky@uci.edu>
+ * @version 1.0
+ */
+
+Entry * Entry_decode(Slot * slot, ByteBuffer * bb) {
+ char type = bb.get();
+ switch (type) {
+
+ case TypeCommitPart:
+ return CommitPart_decode(slot, bb);
+
+ case TypeAbort:
+ return Abort_decode(slot, bb);
+
+ case TypeTransactionPart:
+ return TransactionPart_decode(slot, bb);
+
+ case TypeNewKey:
+ return NewKey_decode(slot, bb);
+
+ case TypeLastMessage:
+ return LastMessage_decode(slot, bb);
+
+ case TypeRejectedMessage:
+ return RejectedMessage_decode(slot, bb);
+
+ case TypeTableStatus:
+ return TableStatus_decode(slot, bb);
+
+ default:
+ ASSERT(0);
+ }
+}
+
+void Entry::setDead() {
+
+ if (!islive ) {
+ return; // already dead
+ }
+
+ islive = false;
+
+ if (parentslot != NULL) {
+ parentslot->decrementLiveCount();
+ }
+}
--- /dev/null
+#ifndef ENTRY_H
+#define ENTRY_H
+/**
+ * Generic class that wraps all the different types of information
+ * that can be stored in a Slot.
+ * @author Brian Demsky <bdemsky@uci.edu>
+ * @version 1.0
+ */
+
+#include "Liveness.h"
+
+#define TypeCommitPart 1
+#define TypeAbort 2
+#define TypeTransactionPart 3
+#define TypeNewKey 4
+#define TypeLastMessage 5
+#define TypeRejectedMessage 6
+#define TypeTableStatus 7
+
+class Entry : public Liveness {
+ /* Records whether the information is still live or has been
+ superceded by a newer update. */
+ private:
+ bool islive;
+
+ protected:
+ Slot * parentslot;
+
+ public:
+ Entry(Slot * _parentslot) : islive(true), parentslot(_parentslot) {}
+
+ /**
+ * Returns true if the Entry object is still live.
+ */
+ bool isLive() { return islive; }
+
+ /**
+ * Flags the entry object as dead. Also decrements the live count
+ * of the parent slot.
+ */
+ void setDead();
+
+ /**
+ * Serializes the Entry object into the char buffer.
+ */
+ void encode(ByteBuffer bb) = 0;
+
+
+ /**
+ * Returns the size in chars the entry object will take in the char
+ * array.
+ */
+ int getSize() = 0;
+
+
+ /**
+ * Returns a char encoding the type of the entry object.
+ */
+ char getType() = 0;
+
+
+ /**
+ * Returns a copy of the Entry that can be added to a different slot.
+ */
+ Entry * getCopy(Slot * s) = 0;
+};
+
+/**
+ * Static method for decoding char array into Entry objects. First
+ * char tells the type of entry.
+ */
+Entry * Entry_decode(Slot * slot, ByteBuffer * bb);
+
+#endif
--- /dev/null
+#ifndef IOTSTRING_H
+#define IOTSTRING_H
+
+#include "array.h"
+
+/**
+ * IoTString wraps the underlying char string.
+ * @author Brian Demsky <bdemsky@uci.edu>
+ * @version 1.0
+ */
+
+public class IoTString {
+ private:
+ Array<char> array;
+
+ IoTString() {}
+
+ /**
+ * Builds an IoTString object around the char array. This
+ * constructor makes a copy, so the caller is free to modify the char array.
+ */
+
+ public:
+ IoTString(Array<char> * _array) { array.init(_array); }
+ ~IoTString() {}
+
+ /**
+ * Internal method to grab a reference to our char array. Caller
+ * must not modify it.
+ */
+
+ Array<char> * internalBytes() { return &array; }
+
+ /**
+ * Returns a copy of the underlying char string.
+ */
+
+ Array<char> * getBytes() { return new Array<Char>(&array); }
+
+ /**
+ * Returns the length in chars of the IoTString.
+ */
+
+ int length() { return array->length(); }
+}
+#endif
--- /dev/null
+
+/**
+ * KeyValue entry for Slot.
+ * @author Brian Demsky <bdemsky@uci.edu>
+ * @version 1.0
+ */
+
+class KeyValue { /*extends Entry */
+ IoTString key;
+ IoTString value;
+
+ KeyValue(IoTString _key, IoTString _value) {
+ key = _key;
+ value = _value;
+ }
+
+ IoTString getKey() {
+ return key;
+ }
+
+ IoTString getValue() {
+ return value;
+ }
+
+ static KeyValue decode(ByteBuffer bb) {
+ int keylength = bb.getInt();
+ int valuelength = bb.getInt();
+ char[] key = new char[keylength];
+ bb.get(key);
+
+ if (valuelength != 0) {
+ char[] value = new char[valuelength];
+ bb.get(value);
+ return new KeyValue(IoTString.shallow(key), IoTString.shallow(value));
+ }
+
+ return new KeyValue(IoTString.shallow(key), NULL);
+ }
+
+ void encode(ByteBuffer bb) {
+ bb.putInt(key.length());
+
+ if (value != NULL) {
+ bb.putInt(value.length());
+ } else {
+ bb.putInt(0);
+ }
+
+ bb.put(key.internalBytes());
+
+ if (value != NULL) {
+ bb.put(value.internalBytes());
+ }
+ }
+
+ int getSize() {
+ if (value != NULL) {
+ return 2 * sizeof(int32_t) + key.length() + value.length();
+ }
+
+ return 2 * sizeof(int32_t) + key.length();
+ }
+
+ String toString() {
+ if (value == NULL) {
+ return "NULL";
+ }
+ return value.toString();
+ }
+
+ KeyValue getCopy() {
+ return new KeyValue(key, value);
+ }
+}
--- /dev/null
+
+/**
+ * KeyValue entry for Slot.
+ * @author Brian Demsky <bdemsky@uci.edu>
+ * @version 1.0
+ */
+
+class KeyValue { /*extends Entry */
+ private IoTString key;
+ private IoTString value;
+
+ public KeyValue(IoTString _key, IoTString _value) {
+ key = _key;
+ value = _value;
+ }
+
+ public IoTString getKey() {
+ return key;
+ }
+
+ public IoTString getValue() {
+ return value;
+ }
+
+ static KeyValue decode(ByteBuffer bb) {
+ int keylength = bb.getInt();
+ int valuelength = bb.getInt();
+ char[] key = new char[keylength];
+ bb.get(key);
+
+ if (valuelength != 0) {
+ char[] value = new char[valuelength];
+ bb.get(value);
+ return new KeyValue(IoTString.shallow(key), IoTString.shallow(value));
+ }
+
+ return new KeyValue(IoTString.shallow(key), NULL);
+ }
+
+ public void encode(ByteBuffer bb) {
+ bb.putInt(key.length());
+
+ if (value != NULL) {
+ bb.putInt(value.length());
+ } else {
+ bb.putInt(0);
+ }
+
+ bb.put(key.internalBytes());
+
+ if (value != NULL) {
+ bb.put(value.internalBytes());
+ }
+ }
+
+ public int getSize() {
+ if (value != NULL) {
+ return 2 * sizeof(int32_t) + key.length() + value.length();
+ }
+
+ return 2 * sizeof(int32_t) + key.length();
+ }
+
+ public String toString() {
+ if (value == NULL) {
+ return "NULL";
+ }
+ return value.toString();
+ }
+
+ public KeyValue getCopy() {
+ return new KeyValue(key, value);
+ }
+}
--- /dev/null
+
+
+/**
+ * This Entry records the last message sent by a given machine.
+ * @author Brian Demsky <bdemsky@uci.edu>
+ * @version 1.0
+ */
+
+
+class LastMessage extends Entry {
+ int64_t machineid;
+ int64_t seqnum;
+
+ LastMessage(Slot slot, int64_t _machineid, int64_t _seqnum) {
+ super(slot);
+ machineid=_machineid;
+ seqnum=_seqnum;
+ }
+
+ int64_t getMachineID() {
+ return machineid;
+ }
+
+ int64_t getSequenceNumber() {
+ return seqnum;
+ }
+
+ static Entry decode(Slot slot, ByteBuffer bb) {
+ int64_t machineid=bb.getLong();
+ int64_t seqnum=bb.getLong();
+ return new LastMessage(slot, machineid, seqnum);
+ }
+
+ void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeLastMessage);
+ bb.putLong(machineid);
+ bb.putLong(seqnum);
+ }
+
+ int getSize() {
+ return 2*sizeof(int64_t)+sizeof(char);
+ }
+
+ char getType() {
+ return Entry.TypeLastMessage;
+ }
+
+ Entry getCopy(Slot s) {
+ return new LastMessage(s, machineid, seqnum);
+ }
+}
+
+
--- /dev/null
+
+
+/**
+ * This Entry records the last message sent by a given machine.
+ * @author Brian Demsky <bdemsky@uci.edu>
+ * @version 1.0
+ */
+
+
+class LastMessage extends Entry {
+ private int64_t machineid;
+ private int64_t seqnum;
+
+ public LastMessage(Slot slot, int64_t _machineid, int64_t _seqnum) {
+ super(slot);
+ machineid=_machineid;
+ seqnum=_seqnum;
+ }
+
+ public int64_t getMachineID() {
+ return machineid;
+ }
+
+ public int64_t getSequenceNumber() {
+ return seqnum;
+ }
+
+ static Entry decode(Slot slot, ByteBuffer bb) {
+ int64_t machineid=bb.getLong();
+ int64_t seqnum=bb.getLong();
+ return new LastMessage(slot, machineid, seqnum);
+ }
+
+ public void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeLastMessage);
+ bb.putLong(machineid);
+ bb.putLong(seqnum);
+ }
+
+ public int getSize() {
+ return 2*sizeof(int64_t)+sizeof(char);
+ }
+
+ public char getType() {
+ return Entry.TypeLastMessage;
+ }
+
+ public Entry getCopy(Slot s) {
+ return new LastMessage(s, machineid, seqnum);
+ }
+}
+
+
--- /dev/null
+#ifndef LIVENESS_H
+#define LIVENESS_H
+
+class Liveness {
+};
+
+#endif
--- /dev/null
+
+class LocalComm {
+ Table t1;
+ Table t2;
+
+ LocalComm(Table _t1, Table _t2) {
+ t1 = _t1;
+ t2 = _t2;
+ }
+
+ char[] sendDataToLocalDevice(Long deviceId, char[] data) throws InterruptedException{
+ System.out.println("Passing Locally");
+
+ if (deviceId == t1.getMachineId()) {
+ // return t1.localCommInput(data);
+ } else if (deviceId == t2.getMachineId()) {
+ // return t2.localCommInput(data);
+ } else {
+ throw new Error("Cannot send to " + deviceId + " using this local comm");
+ }
+
+ return new char[0];
+ }
+}
--- /dev/null
+
+class LocalComm {
+ private Table t1;
+ private Table t2;
+
+ public LocalComm(Table _t1, Table _t2) {
+ t1 = _t1;
+ t2 = _t2;
+ }
+
+ public char[] sendDataToLocalDevice(Long deviceId, char[] data) throws InterruptedException{
+ System.out.println("Passing Locally");
+
+ if (deviceId == t1.getMachineId()) {
+ // return t1.localCommInput(data);
+ } else if (deviceId == t2.getMachineId()) {
+ // return t2.localCommInput(data);
+ } else {
+ throw new Error("Cannot send to " + deviceId + " using this local comm");
+ }
+
+ return new char[0];
+ }
+}
--- /dev/null
+include common.mk
+
+PHONY += directories
+MKDIR_P = mkdir -p
+OBJ_DIR = bin
+
+CPP_SOURCES := $(wildcard *.cc) $(wildcard AST/*.cc) $(wildcard ASTTransform/*.cc) $(wildcard Translator/*.cc) $(wildcard ASTAnalyses/*.cc) $(wildcard ASTAnalyses/Order/*.cc) $(wildcard ASTAnalyses/Encoding/*.cc) $(wildcard ASTAnalyses/Polarity/*.cc) $(wildcard Tuner/*.cc) $(wildcard Collections/*.cc) $(wildcard Backend/*.cc) $(wildcard Encoders/*.cc) $(wildcard Serialize/*.cc)
+
+C_SOURCES := $(wildcard *.c) $(wildcard AST/*.c) $(wildcard ASTTransform/*.c) $(wildcard Translator/*.c) $(wildcard ASTAnalyses/*.c) $(wildcard ASTAnalyses/Order/*.c) $(wildcard ASTAnalyses/Encoding/*.c) $(wildcard ASTAnalyses/Polarity/*.c) $(wildcard Tuner/*.c) $(wildcard Collections/*.c) $(wildcard Backend/*.c) $(wildcard Encoders/*.c) $(wildcard Serialize/*.c)
+
+HEADERS := $(wildcard *.h) $(wildcard AST/*.h) $(wildcard ASTTransform/*.h) $(wildcard Translator/*.h) $(wildcard ASTAnalyses/*.h) $(wildcard ASTAnalyses/Order/*.h) $(wildcard ASTAnalyses/Encoding/*.h) $(wildcard ASTAnalyses/Polarity/*.h) $(wildcard Tuner/*.h) $(wildcard Collections/*.h) $(wildcard Backend/*.h) $(wildcard Encoders/*.h) $(wildcard Serialize/*.h)
+
+OBJECTS := $(CPP_SOURCES:%.cc=$(OBJ_DIR)/%.o) $(C_SOURCES:%.c=$(OBJ_DIR)/%.o)
+
+CFLAGS := -Wall -O0 -g
+CFLAGS += -IAST -IASTTransform -IASTAnalyses -IASTAnalyses/Polarity -IASTAnalyses/Order -IASTAnalyses/Encoding -ITranslator -ICollections -IBackend -I. -IEncoders -ITuner -ISerialize
+LDFLAGS := -ldl -lrt -rdynamic -g
+SHARED := -shared
+
+# Mac OSX options
+ifeq ($(UNAME), Darwin)
+LDFLAGS := -ldl
+SHARED := -Wl,-undefined,dynamic_lookup -dynamiclib
+endif
+
+MARKDOWN := ../docs/Markdown/Markdown.pl
+
+all: directories ${OBJ_DIR}/$(LIB_SO)
+
+directories: ${OBJ_DIR}
+
+${OBJ_DIR}:
+ ${MKDIR_P} ${OBJ_DIR}
+ ${MKDIR_P} ${OBJ_DIR}/AST
+ ${MKDIR_P} ${OBJ_DIR}/ASTAnalyses
+ ${MKDIR_P} ${OBJ_DIR}/ASTAnalyses/Order
+ ${MKDIR_P} ${OBJ_DIR}/ASTAnalyses/Encoding
+ ${MKDIR_P} ${OBJ_DIR}/ASTAnalyses/Polarity
+ ${MKDIR_P} ${OBJ_DIR}/ASTTransform
+ ${MKDIR_P} ${OBJ_DIR}/Translator
+ ${MKDIR_P} ${OBJ_DIR}/Tuner
+ ${MKDIR_P} ${OBJ_DIR}/Collections
+ ${MKDIR_P} ${OBJ_DIR}/Backend
+ ${MKDIR_P} ${OBJ_DIR}/Encoders
+ ${MKDIR_P} ${OBJ_DIR}/Serialize
+
+debug: CFLAGS += -DCONFIG_DEBUG
+debug: all
+
+test: all
+ make -C Test
+
+PHONY += docs
+docs: $(C_SOURCES) $(HEADERS)
+ doxygen
+
+${OBJ_DIR}/$(LIB_SO): $(OBJECTS)
+ $(CXX) -g $(SHARED) -o ${OBJ_DIR}/$(LIB_SO) $+ $(LDFLAGS)
+
+${OBJ_DIR}/%.o: %.cc
+ $(CXX) -fPIC -c $< -o $@ $(CFLAGS) -Wno-unused-variable
+
+${OBJ_DIR}/%.o: %.c
+ $(CC) -fPIC -c $< -o $@ $(CFLAGS) -Wno-unused-variable
+
+-include $(OBJECTS:%=$OBJ_DIR/.%.d)
+
+PHONY += clean
+clean:
+ rm -f *.o *.so
+ rm -rf $(OBJ_DIR)
+
+PHONY += mrclean
+mrclean: clean
+ rm -rf ../docs
+
+PHONY += tags
+tags:
+ ctags -R
+
+tabbing:
+ uncrustify -c C.cfg --no-backup *.cc */*.cc */*/*.cc
+ uncrustify -c C.cfg --no-backup *.h */*.h */*/*.h
+
+wc:
+ wc */*.cc */*.h *.cc *.h */*/*.cc */*/*.h
+
+.PHONY: $(PHONY)
--- /dev/null
+
+
+/**
+ * This Entry records the abort sent by a given machine.
+ * @author Ali Younis <ayounis@uci.edu>
+ * @version 1.0
+ */
+
+
+class NewKey extends Entry {
+ IoTString key;
+ int64_t machineid;
+
+ NewKey(Slot slot, IoTString _key, int64_t _machineid) {
+ super(slot);
+ key = _key;
+ machineid = _machineid;
+ }
+
+ int64_t getMachineID() {
+ return machineid;
+ }
+
+ IoTString getKey() {
+ return key;
+ }
+
+ void setSlot(Slot s) {
+ parentslot = s;
+ }
+
+ static Entry decode(Slot slot, ByteBuffer bb) {
+ int keylength = bb.getInt();
+ char[] key = new char[keylength];
+ bb.get(key);
+ int64_t machineid = bb.getLong();
+
+ return new NewKey(slot, IoTString.shallow(key), machineid);
+ }
+
+ void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeNewKey);
+ bb.putInt(key.length());
+ bb.put(key.internalBytes());
+ bb.putLong(machineid);
+ }
+
+ int getSize() {
+ return sizeof(int64_t) + sizeof(char) + sizeof(int32_t) + key.length();
+ }
+
+ char getType() {
+ return Entry.TypeNewKey;
+ }
+
+ Entry getCopy(Slot s) {
+ return new NewKey(s, key, machineid);
+ }
+}
--- /dev/null
+
+
+/**
+ * This Entry records the abort sent by a given machine.
+ * @author Ali Younis <ayounis@uci.edu>
+ * @version 1.0
+ */
+
+
+class NewKey extends Entry {
+ private IoTString key;
+ private int64_t machineid;
+
+ public NewKey(Slot slot, IoTString _key, int64_t _machineid) {
+ super(slot);
+ key = _key;
+ machineid = _machineid;
+ }
+
+ public int64_t getMachineID() {
+ return machineid;
+ }
+
+ public IoTString getKey() {
+ return key;
+ }
+
+ public void setSlot(Slot s) {
+ parentslot = s;
+ }
+
+ static Entry decode(Slot slot, ByteBuffer bb) {
+ int keylength = bb.getInt();
+ char[] key = new char[keylength];
+ bb.get(key);
+ int64_t machineid = bb.getLong();
+
+ return new NewKey(slot, IoTString.shallow(key), machineid);
+ }
+
+ public void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeNewKey);
+ bb.putInt(key.length());
+ bb.put(key.internalBytes());
+ bb.putLong(machineid);
+ }
+
+ public int getSize() {
+ return sizeof(int64_t) + sizeof(char) + sizeof(int32_t) + key.length();
+ }
+
+ public char getType() {
+ return Entry.TypeNewKey;
+ }
+
+ public Entry getCopy(Slot s) {
+ return new NewKey(s, key, machineid);
+ }
+}
--- /dev/null
+#ifndef PAIR_H
+#define PAIR_H
+
+template<typename A, typename B>
+class Pair {
+private:
+ A a;
+ B b;
+
+ Pair(A _a, B _b) :
+ a(_a),
+ b(_b) {
+ }
+
+ A getFirst() {
+ return a;
+ }
+
+ B getSecond() {
+ return b;
+ }
+};
+
+#endif
--- /dev/null
+
+
+
+
+class PendingTransaction {
+
+ Set<KeyValue> keyValueUpdateSet = NULL;
+ Set<KeyValue> keyValueGuardSet = NULL;
+ int64_t arbitrator = -1;
+ int64_t clientLocalSequenceNumber = -1;
+ int64_t machineId = -1;
+
+ int currentDataSize = 0;
+
+ PendingTransaction(int64_t _machineId) {
+ machineId = _machineId;
+ keyValueUpdateSet = new HashSet<KeyValue>();
+ keyValueGuardSet = new HashSet<KeyValue>();
+ }
+
+ /**
+ * Add a new key value to the updates
+ *
+ */
+ void addKV(KeyValue newKV) {
+
+ KeyValue rmKV = NULL;
+
+ // Make sure there are no duplicates
+ for (KeyValue kv : keyValueUpdateSet) {
+ if (kv.getKey().equals(newKV.getKey())) {
+
+ // Remove key if we are adding a newer version of the same key
+ rmKV = kv;
+ break;
+ }
+ }
+
+ // Remove key if we are adding a newer version of the same key
+ if (rmKV != NULL) {
+ keyValueUpdateSet.remove(rmKV);
+ currentDataSize -= rmKV.getSize();
+ }
+
+ // Add the key to the hash set
+ keyValueUpdateSet.add(newKV);
+ currentDataSize += newKV.getSize();
+ }
+
+ /**
+ * Add a new key value to the guard set
+ *
+ */
+ void addKVGuard(KeyValue newKV) {
+ // Add the key to the hash set
+ keyValueGuardSet.add(newKV);
+ currentDataSize += newKV.getSize();
+ }
+
+ /**
+ * Checks if the arbitrator is the same
+ */
+ bool checkArbitrator(int64_t arb) {
+ if (arbitrator == -1) {
+ arbitrator = arb;
+ return true;
+ }
+
+ return arb == arbitrator;
+ }
+
+ /**
+ * Get the transaction arbitrator
+ */
+ int64_t getArbitrator() {
+ return arbitrator;
+ }
+
+ /**
+ * Get the key value update set
+ */
+ Set<KeyValue> getKVUpdates() {
+ return keyValueUpdateSet;
+ }
+
+ /**
+ * Get the key value update set
+ */
+ Set<KeyValue> getKVGuard() {
+ return keyValueGuardSet;
+ }
+
+ void setClientLocalSequenceNumber(int64_t _clientLocalSequenceNumber) {
+ clientLocalSequenceNumber = _clientLocalSequenceNumber;
+ }
+
+ int64_t getClientLocalSequenceNumber() {
+ return clientLocalSequenceNumber;
+ }
+
+ int64_t getMachineId() {
+ return machineId;
+ }
+
+ bool evaluateGuard(Map<IoTString, KeyValue> keyValTableCommitted, Map<IoTString, KeyValue> keyValTableSpeculative, Map<IoTString, KeyValue> keyValTablePendingTransSpeculative) {
+ for (KeyValue kvGuard : keyValueGuardSet) {
+
+ // First check if the key is in the speculative table, this is the value of the latest assumption
+ KeyValue kv = keyValTablePendingTransSpeculative.get(kvGuard.getKey());
+
+
+ if (kv == NULL) {
+ // if it is not in the pending trans table then check the speculative table and use that
+ // value as our latest assumption
+ kv = keyValTableSpeculative.get(kvGuard.getKey());
+ }
+
+
+ if (kv == NULL) {
+ // if it is not in the speculative table then check the committed table and use that
+ // value as our latest assumption
+ kv = keyValTableCommitted.get(kvGuard.getKey());
+ }
+
+ if (kvGuard.getValue() != NULL) {
+ if ((kv == NULL) || (!kvGuard.getValue().equals(kv.getValue()))) {
+ return false;
+ }
+ } else {
+ if (kv != NULL) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ Transaction createTransaction() {
+
+ Transaction newTransaction = new Transaction();
+ int transactionPartCount = 0;
+
+ // Convert all the data into a char array so we can start partitioning
+ char[] charData = convertDataToBytes();
+
+ int currentPosition = 0;
+ int remaining = charData.length;
+
+ while (remaining > 0) {
+
+ Boolean isLastPart = false;
+ // determine how much to copy
+ int copySize = TransactionPart.MAX_NON_HEADER_SIZE;
+ if (remaining <= TransactionPart.MAX_NON_HEADER_SIZE) {
+ copySize = remaining;
+ isLastPart = true; // last bit of data so last part
+ }
+
+ // Copy to a smaller version
+ char[] partData = new char[copySize];
+ System.arraycopy(charData, currentPosition, partData, 0, copySize);
+
+ TransactionPart part = new TransactionPart(NULL, machineId, arbitrator, clientLocalSequenceNumber, transactionPartCount, partData, isLastPart);
+ newTransaction.addPartEncode(part);
+
+ // Update position, count and remaining
+ currentPosition += copySize;
+ transactionPartCount++;
+ remaining -= copySize;
+ }
+
+ // Add the Guard Conditions
+ for (KeyValue kv : keyValueGuardSet) {
+ newTransaction.addGuardKV(kv);
+ }
+
+ // Add the updates
+ for (KeyValue kv : keyValueUpdateSet) {
+ newTransaction.addUpdateKV(kv);
+ }
+
+ return newTransaction;
+ }
+
+ char[] convertDataToBytes() {
+
+ // Calculate the size of the data
+ int sizeOfData = 2 * sizeof(int32_t); // Number of Update KV's and Guard KV's
+ sizeOfData += currentDataSize;
+
+ // Data handlers and storage
+ char[] dataArray = new char[sizeOfData];
+ ByteBuffer bbEncode = ByteBuffer.wrap(dataArray);
+
+ // Encode the size of the updates and guard sets
+ bbEncode.putInt(keyValueGuardSet.size());
+ bbEncode.putInt(keyValueUpdateSet.size());
+
+ // Encode all the guard conditions
+ for (KeyValue kv : keyValueGuardSet) {
+ kv.encode(bbEncode);
+ }
+
+ // Encode all the updates
+ for (KeyValue kv : keyValueUpdateSet) {
+ kv.encode(bbEncode);
+ }
+
+ return bbEncode.array();
+ }
+}
--- /dev/null
+
+
+
+
+class PendingTransaction {
+
+ private Set<KeyValue> keyValueUpdateSet = NULL;
+ private Set<KeyValue> keyValueGuardSet = NULL;
+ private int64_t arbitrator = -1;
+ private int64_t clientLocalSequenceNumber = -1;
+ private int64_t machineId = -1;
+
+ private int currentDataSize = 0;
+
+ public PendingTransaction(int64_t _machineId) {
+ machineId = _machineId;
+ keyValueUpdateSet = new HashSet<KeyValue>();
+ keyValueGuardSet = new HashSet<KeyValue>();
+ }
+
+ /**
+ * Add a new key value to the updates
+ *
+ */
+ public void addKV(KeyValue newKV) {
+
+ KeyValue rmKV = NULL;
+
+ // Make sure there are no duplicates
+ for (KeyValue kv : keyValueUpdateSet) {
+ if (kv.getKey().equals(newKV.getKey())) {
+
+ // Remove key if we are adding a newer version of the same key
+ rmKV = kv;
+ break;
+ }
+ }
+
+ // Remove key if we are adding a newer version of the same key
+ if (rmKV != NULL) {
+ keyValueUpdateSet.remove(rmKV);
+ currentDataSize -= rmKV.getSize();
+ }
+
+ // Add the key to the hash set
+ keyValueUpdateSet.add(newKV);
+ currentDataSize += newKV.getSize();
+ }
+
+ /**
+ * Add a new key value to the guard set
+ *
+ */
+ public void addKVGuard(KeyValue newKV) {
+ // Add the key to the hash set
+ keyValueGuardSet.add(newKV);
+ currentDataSize += newKV.getSize();
+ }
+
+ /**
+ * Checks if the arbitrator is the same
+ */
+ public bool checkArbitrator(int64_t arb) {
+ if (arbitrator == -1) {
+ arbitrator = arb;
+ return true;
+ }
+
+ return arb == arbitrator;
+ }
+
+ /**
+ * Get the transaction arbitrator
+ */
+ public int64_t getArbitrator() {
+ return arbitrator;
+ }
+
+ /**
+ * Get the key value update set
+ */
+ public Set<KeyValue> getKVUpdates() {
+ return keyValueUpdateSet;
+ }
+
+ /**
+ * Get the key value update set
+ */
+ public Set<KeyValue> getKVGuard() {
+ return keyValueGuardSet;
+ }
+
+ public void setClientLocalSequenceNumber(int64_t _clientLocalSequenceNumber) {
+ clientLocalSequenceNumber = _clientLocalSequenceNumber;
+ }
+
+ public int64_t getClientLocalSequenceNumber() {
+ return clientLocalSequenceNumber;
+ }
+
+ public int64_t getMachineId() {
+ return machineId;
+ }
+
+ public bool evaluateGuard(Map<IoTString, KeyValue> keyValTableCommitted, Map<IoTString, KeyValue> keyValTableSpeculative, Map<IoTString, KeyValue> keyValTablePendingTransSpeculative) {
+ for (KeyValue kvGuard : keyValueGuardSet) {
+
+ // First check if the key is in the speculative table, this is the value of the latest assumption
+ KeyValue kv = keyValTablePendingTransSpeculative.get(kvGuard.getKey());
+
+
+ if (kv == NULL) {
+ // if it is not in the pending trans table then check the speculative table and use that
+ // value as our latest assumption
+ kv = keyValTableSpeculative.get(kvGuard.getKey());
+ }
+
+
+ if (kv == NULL) {
+ // if it is not in the speculative table then check the committed table and use that
+ // value as our latest assumption
+ kv = keyValTableCommitted.get(kvGuard.getKey());
+ }
+
+ if (kvGuard.getValue() != NULL) {
+ if ((kv == NULL) || (!kvGuard.getValue().equals(kv.getValue()))) {
+ return false;
+ }
+ } else {
+ if (kv != NULL) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+
+ public Transaction createTransaction() {
+
+ Transaction newTransaction = new Transaction();
+ int transactionPartCount = 0;
+
+ // Convert all the data into a char array so we can start partitioning
+ char[] charData = convertDataToBytes();
+
+ int currentPosition = 0;
+ int remaining = charData.length;
+
+ while (remaining > 0) {
+
+ Boolean isLastPart = false;
+ // determine how much to copy
+ int copySize = TransactionPart.MAX_NON_HEADER_SIZE;
+ if (remaining <= TransactionPart.MAX_NON_HEADER_SIZE) {
+ copySize = remaining;
+ isLastPart = true; // last bit of data so last part
+ }
+
+ // Copy to a smaller version
+ char[] partData = new char[copySize];
+ System.arraycopy(charData, currentPosition, partData, 0, copySize);
+
+ TransactionPart part = new TransactionPart(NULL, machineId, arbitrator, clientLocalSequenceNumber, transactionPartCount, partData, isLastPart);
+ newTransaction.addPartEncode(part);
+
+ // Update position, count and remaining
+ currentPosition += copySize;
+ transactionPartCount++;
+ remaining -= copySize;
+ }
+
+ // Add the Guard Conditions
+ for (KeyValue kv : keyValueGuardSet) {
+ newTransaction.addGuardKV(kv);
+ }
+
+ // Add the updates
+ for (KeyValue kv : keyValueUpdateSet) {
+ newTransaction.addUpdateKV(kv);
+ }
+
+ return newTransaction;
+ }
+
+ private char[] convertDataToBytes() {
+
+ // Calculate the size of the data
+ int sizeOfData = 2 * sizeof(int32_t); // Number of Update KV's and Guard KV's
+ sizeOfData += currentDataSize;
+
+ // Data handlers and storage
+ char[] dataArray = new char[sizeOfData];
+ ByteBuffer bbEncode = ByteBuffer.wrap(dataArray);
+
+ // Encode the size of the updates and guard sets
+ bbEncode.putInt(keyValueGuardSet.size());
+ bbEncode.putInt(keyValueUpdateSet.size());
+
+ // Encode all the guard conditions
+ for (KeyValue kv : keyValueGuardSet) {
+ kv.encode(bbEncode);
+ }
+
+ // Encode all the updates
+ for (KeyValue kv : keyValueUpdateSet) {
+ kv.encode(bbEncode);
+ }
+
+ return bbEncode.array();
+ }
+}
--- /dev/null
+
+/**
+ * Entry for tracking messages that the server rejected. We have to
+ * make sure that all clients know that this message was rejected to
+ * prevent the server from reusing these messages in an attack.
+ * @author Brian Demsky
+ * @version 1.0
+ */
+
+
+class RejectedMessage extends Entry {
+ /* Sequence number */
+ int64_t sequencenum;
+
+
+ /* Machine identifier */
+ int64_t machineid;
+ /* Oldest sequence number in range */
+ int64_t oldseqnum;
+ /* Newest sequence number in range */
+ int64_t newseqnum;
+ /* Is the machine identifier of the relevant slots equal to (or not
+ * equal to) the specified machine identifier. */
+ bool equalto;
+ /* Set of machines that have not received notification. */
+ HashSet<Long> watchset;
+
+ RejectedMessage(Slot slot, int64_t _sequencenum, int64_t _machineid, int64_t _oldseqnum, int64_t _newseqnum, bool _equalto) {
+ super(slot);
+ sequencenum = _sequencenum;
+ machineid=_machineid;
+ oldseqnum=_oldseqnum;
+ newseqnum=_newseqnum;
+ equalto=_equalto;
+ }
+
+ int64_t getOldSeqNum() {
+ return oldseqnum;
+ }
+
+ int64_t getNewSeqNum() {
+ return newseqnum;
+ }
+
+ bool getEqual() {
+ return equalto;
+ }
+
+ int64_t getMachineID() {
+ return machineid;
+ }
+
+
+ int64_t getSequenceNumber() {
+ return sequencenum;
+ }
+
+ static Entry decode(Slot slot, ByteBuffer bb) {
+ int64_t sequencenum=bb.getLong();
+ int64_t machineid=bb.getLong();
+ int64_t oldseqnum=bb.getLong();
+ int64_t newseqnum=bb.getLong();
+ char equalto=bb.get();
+ return new RejectedMessage(slot,sequencenum, machineid, oldseqnum, newseqnum, equalto==1);
+ }
+
+ void setWatchSet(HashSet<Long> _watchset) {
+ watchset=_watchset;
+ }
+
+ void removeWatcher(int64_t machineid) {
+ if (watchset.remove(machineid))
+ if (watchset.isEmpty())
+ setDead();
+ }
+
+ void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeRejectedMessage);
+ bb.putLong(sequencenum);
+ bb.putLong(machineid);
+ bb.putLong(oldseqnum);
+ bb.putLong(newseqnum);
+ bb.put(equalto?(char)1:(char)0);
+ }
+
+ int getSize() {
+ return 4*sizeof(int64_t) + 2*sizeof(char);
+ }
+
+ char getType() {
+ return Entry.TypeRejectedMessage;
+ }
+
+ Entry getCopy(Slot s) {
+ return new RejectedMessage(s,sequencenum, machineid, oldseqnum, newseqnum, equalto);
+ }
+}
--- /dev/null
+
+/**
+ * Entry for tracking messages that the server rejected. We have to
+ * make sure that all clients know that this message was rejected to
+ * prevent the server from reusing these messages in an attack.
+ * @author Brian Demsky
+ * @version 1.0
+ */
+
+
+class RejectedMessage extends Entry {
+ /* Sequence number */
+ private int64_t sequencenum;
+
+
+ /* Machine identifier */
+ private int64_t machineid;
+ /* Oldest sequence number in range */
+ private int64_t oldseqnum;
+ /* Newest sequence number in range */
+ private int64_t newseqnum;
+ /* Is the machine identifier of the relevant slots equal to (or not
+ * equal to) the specified machine identifier. */
+ private bool equalto;
+ /* Set of machines that have not received notification. */
+ private HashSet<Long> watchset;
+
+ RejectedMessage(Slot slot, int64_t _sequencenum, int64_t _machineid, int64_t _oldseqnum, int64_t _newseqnum, bool _equalto) {
+ super(slot);
+ sequencenum = _sequencenum;
+ machineid=_machineid;
+ oldseqnum=_oldseqnum;
+ newseqnum=_newseqnum;
+ equalto=_equalto;
+ }
+
+ int64_t getOldSeqNum() {
+ return oldseqnum;
+ }
+
+ int64_t getNewSeqNum() {
+ return newseqnum;
+ }
+
+ bool getEqual() {
+ return equalto;
+ }
+
+ int64_t getMachineID() {
+ return machineid;
+ }
+
+
+ int64_t getSequenceNumber() {
+ return sequencenum;
+ }
+
+ static Entry decode(Slot slot, ByteBuffer bb) {
+ int64_t sequencenum=bb.getLong();
+ int64_t machineid=bb.getLong();
+ int64_t oldseqnum=bb.getLong();
+ int64_t newseqnum=bb.getLong();
+ char equalto=bb.get();
+ return new RejectedMessage(slot,sequencenum, machineid, oldseqnum, newseqnum, equalto==1);
+ }
+
+ void setWatchSet(HashSet<Long> _watchset) {
+ watchset=_watchset;
+ }
+
+ void removeWatcher(int64_t machineid) {
+ if (watchset.remove(machineid))
+ if (watchset.isEmpty())
+ setDead();
+ }
+
+ void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeRejectedMessage);
+ bb.putLong(sequencenum);
+ bb.putLong(machineid);
+ bb.putLong(oldseqnum);
+ bb.putLong(newseqnum);
+ bb.put(equalto?(char)1:(char)0);
+ }
+
+ int getSize() {
+ return 4*sizeof(int64_t) + 2*sizeof(char);
+ }
+
+ char getType() {
+ return Entry.TypeRejectedMessage;
+ }
+
+ Entry getCopy(Slot s) {
+ return new RejectedMessage(s,sequencenum, machineid, oldseqnum, newseqnum, equalto);
+ }
+}
--- /dev/null
+
+class ServerException extends Exception {
+
+ static final char TypeConnectTimeout = 1;
+ static final char TypeInputTimeout = 2;
+ static final char TypeIncorrectResponseCode = 3;
+ static final char TypeSalt = 4;
+ char type = -1;
+
+ ServerException(String message, char _type) {
+ super(message);
+ type = _type;
+ }
+
+ char getType() {
+ return type;
+ }
+}
--- /dev/null
+
+public class ServerException extends Exception {
+
+ public static final char TypeConnectTimeout = 1;
+ public static final char TypeInputTimeout = 2;
+ public static final char TypeIncorrectResponseCode = 3;
+ public static final char TypeSalt = 4;
+ private char type = -1;
+
+ public ServerException(String message, char _type) {
+ super(message);
+ type = _type;
+ }
+
+ public char getType() {
+ return type;
+ }
+}
--- /dev/null
+#include "Slot.h"
+
+Slot::Slot(Table *_table, int64_t _seqnum, int64_t _machineid, char* _prevhmac, char* _hmac, int64_t _localSequenceNumber) {
+ seqnum = _seqnum;
+ machineid = _machineid;
+ prevhmac = _prevhmac;
+ hmac = _hmac;
+ entries = new Vector<Entry*>();
+ livecount = 1;
+ seqnumlive = true;
+ freespace = SLOT_SIZE - getBaseSize();
+ table = _table;
+ localSequenceNumber = _localSequenceNumber;
+}
+
+Slot::Slot(Table *_table, int64_t _seqnum, int64_t _machineid, char* _prevhmac, int64_t _localSequenceNumber) {
+ this(_table, _seqnum, _machineid, _prevhmac, NULL, _localSequenceNumber);
+}
+
+Slot::Slot(Table *_table, int64_t _seqnum, int64_t _machineid, int64_t _localSequenceNumber) {
+ this(_table, _seqnum, _machineid, new char[HMAC_SIZE], NULL, _localSequenceNumber);
+}
+
+Entry * Slot::addEntry(Entry * e) {
+ e = e->getCopy(this);
+ entries->add(e);
+ livecount++;
+ freespace -= e->getSize();
+ return e;
+}
+
+void Slot::removeEntry(Entry *e) {
+ entries->remove(e);
+ livecount--;
+ freespace += e->getSize();
+}
+
+void Slot::addShallowEntry(Entry *e) {
+ entries->add(e);
+ livecount++;
+ freespace -= e->getSize();
+}
+
+/**
+ * Returns true if the slot has free space to hold the entry without
+ * using its reserved space. */
+
+bool Slot::hasSpace(Entry *e) {
+ int newfreespace = freespace - e->getSize();
+ return newfreespace >= 0;
+}
+
+Vector<Entry*> * Slot::getEntries() {
+ return entries;
+}
+
+Slot * Slotdecode(Table * table, char* array, Mac * mac) {
+ mac->update(array, HMAC_SIZE, array.length - HMAC_SIZE);
+ char* realmac = mac->doFinal();
+
+ ByteBuffer * bb = ByteBuffer_wrap(array);
+ char* hmac = new char[HMAC_SIZE];
+ char* prevhmac = new char[HMAC_SIZE];
+ bb->get(hmac);
+ bb->get(prevhmac);
+ if (!Arrays.equals(realmac, hmac))
+ throw new Error("Server Error: Invalid HMAC! Potential Attack!");
+
+ int64_t seqnum = bb->getLong();
+ int64_t machineid = bb->getLong();
+ int numentries = bb->getInt();
+ Slot slot = new Slot(table, seqnum, machineid, prevhmac, hmac, -1);
+
+ for (int i = 0; i < numentries; i++) {
+ slot->addShallowEntry(Entry->decode(slot, bb));
+ }
+
+ return slot;
+}
+
+char* Slot::encode(Mac mac) {
+ char* array = new char[SLOT_SIZE];
+ ByteBuffer * bb = ByteBuffer_wrap(array);
+ /* Leave space for the slot HMAC. */
+ bb->position(HMAC_SIZE);
+ bb->put(prevhmac);
+ bb->putLong(seqnum);
+ bb->putLong(machineid);
+ bb->putInt(entries->size());
+ for (Entry entry : entries) {
+ entry->encode(bb);
+ }
+ /* Compute our HMAC */
+ mac->update(array, HMAC_SIZE, array.length - HMAC_SIZE);
+ char* realmac = mac->doFinal();
+ hmac = realmac;
+ bb->position(0);
+ bb->put(realmac);
+ return array;
+}
+
+
+/**
+ * Returns the live set of entries for this Slot. Generates a fake
+ * LastMessage entry to represent the information stored by the slot
+ * itself.
+ */
+
+Vector<Entry*> *Slot::getLiveEntries(bool resize) {
+ Vector<Entry*> *liveEntries = new Vector<Entry*>();
+ for (Entry *entry : entries) {
+ if (entry->isLive()) {
+ if (!resize || entry->getType() != Entry->TypeTableStatus)
+ liveEntries->add(entry);
+ }
+ }
+
+ if (seqnumlive && !resize)
+ liveEntries->add(new LastMessage(this, machineid, seqnum));
+
+ return liveEntries;
+}
+
+
+/**
+ * Records that a newer slot records the fact that this slot was
+ * sent by the relevant machine.
+ */
+
+void Slot::setDead() {
+ seqnumlive = false;
+ decrementLiveCount();
+}
+
+/**
+ * Update the count of live entries.
+ */
+
+void Slot::decrementLiveCount() {
+ livecount--;
+ if (livecount == 0) {
+ table->decrementLiveCount();
+ }
+}
+
+char* Slot::getSlotCryptIV() {
+ ByteBuffer * buffer = ByteBuffer_allocate(CloudComm.IV_SIZE);
+ buffer->putLong(machineid);
+ int64_t localSequenceNumberShift = localSequenceNumber << 16;
+ buffer->putLong(localSequenceNumberShift);
+ return buffer->array();
+}
--- /dev/null
+#ifndef SLOT_H
+#define SLOT_H
+
+#define SLOT_SIZE 2048
+#define HMAC_SIZE 32
+
+class Slot : public Liveness {
+ private:
+ /** Sequence number of the slot. */
+ int64_t seqnum;
+ /** HMAC of previous slot. */
+ char* prevhmac;
+ /** HMAC of this slot. */
+ char* hmac;
+ /** Machine that sent this slot. */
+ int64_t machineid;
+ /** Vector of entries in this slot. */
+ Vector<Entry *> * entries;
+ /** Pieces of information that are live. */
+ int livecount;
+ /** Flag that indicates whether this slot is still live for
+ * recording the machine that sent it. */
+ bool seqnumlive;
+ /** Number of chars of free space. */
+ int freespace;
+ /** Reference to Table */
+ Table * table;
+
+ int64_t localSequenceNumber;
+ void addShallowEntry(Entry * e);
+
+ public:
+ Slot(Table * _table, int64_t _seqnum, int64_t _machineid, char* _prevhmac, char* _hmac, int64_t _localSequenceNumber);
+ Slot(Table _table, int64_t _seqnum, int64_t _machineid, char* _prevhmac, int64_t _localSequenceNumber);
+ Slot(Table _table, int64_t _seqnum, int64_t _machineid, int64_t _localSequenceNumber);
+
+ char* getHMAC() { return hmac; }
+ char* getPrevHMAC() { return prevhmac; }
+ Entry * addEntry(Entry * e);
+ void removeEntry(Entry * e);
+ bool hasSpace(Entry * e);
+ Vector<Entry *> * getEntries();
+ char* encode(Mac * mac);
+ int getBaseSize() { return 2 * HMAC_SIZE + 2 * sizeof(int64_t) + sizeof(int); }
+ Vector<Entry *> * getLiveEntries(bool resize);
+ int64_t getSequenceNumber() { return seqnum; }
+ int64_t getMachineID() { return machineid; }
+ void setDead();
+ void decrementLiveCount();
+ bool isLive() { return livecount > 0; }
+ char* getSlotCryptIV();
+};
+
+Slot * Slotdecode(Table * table, char* array, Mac *mac);
+#endif
--- /dev/null
+
+/**
+ * Circular buffer that holds the live set of slots.
+ * @author Brian Demsky
+ * @version 1.0
+ */
+
+class SlotBuffer {
+ static final int DEFAULT_SIZE = 16;
+
+ Slot[] array;
+ int head;
+ int tail;
+ int64_t oldestseqn;
+
+ SlotBuffer() {
+ array = new Slot[DEFAULT_SIZE + 1];
+ head = tail = 0;
+ oldestseqn = 0;
+ }
+
+ int size() {
+ if (head >= tail)
+ return head - tail;
+ return (array.length + head) - tail;
+ }
+
+ int capacity() {
+ return array.length - 1;
+ }
+
+ void resize(int newsize) {
+ if (newsize == (array.length - 1))
+ return;
+
+ Slot[] newarray = new Slot[newsize + 1];
+ int currsize = size();
+ int index = tail;
+ for (int i = 0; i < currsize; i++) {
+ newarray[i] = array[index];
+ if ((++index) == array.length)
+ index = 0;
+ }
+ array = newarray;
+ tail = 0;
+ head = currsize;
+ }
+
+ void incrementHead() {
+ head++;
+ if (head >= array.length)
+ head = 0;
+ }
+
+ void incrementTail() {
+ tail++;
+ if (tail >= array.length)
+ tail = 0;
+ }
+
+ void putSlot(Slot s) {
+
+ int64_t checkNum = (getNewestSeqNum() + 1);
+
+ if (checkNum != s.getSequenceNumber()) {
+ // We have a gap so expunge all our slots
+ oldestseqn = s.getSequenceNumber();
+ tail = 0;
+ head = 1;
+ array[0] = s;
+ return;
+ }
+
+ array[head] = s;
+ incrementHead();
+
+ if (oldestseqn == 0) {
+ oldestseqn = s.getSequenceNumber();
+ }
+
+ if (head == tail) {
+ incrementTail();
+ oldestseqn++;
+ }
+ }
+
+ Slot getSlot(int64_t seqnum) {
+ int diff = (int) (seqnum - oldestseqn);
+ int index = diff + tail;
+
+ if (index < 0) {
+ // Really old message so we dont have it anymore
+ return NULL;
+ }
+
+ if (index >= array.length) {
+ if (head >= tail) {
+ return NULL;
+ }
+ index -= array.length;
+ }
+
+ if (index >= array.length) {
+
+ return NULL;
+ }
+ if (head >= tail && index >= head) {
+ return NULL;
+ }
+
+ return array[index];
+ }
+
+ int64_t getOldestSeqNum() {
+ return oldestseqn;
+ }
+
+ int64_t getNewestSeqNum() {
+ return oldestseqn + size() - 1;
+ }
+}
--- /dev/null
+
+/**
+ * Circular buffer that holds the live set of slots.
+ * @author Brian Demsky
+ * @version 1.0
+ */
+
+class SlotBuffer {
+ static final int DEFAULT_SIZE = 16;
+
+ private Slot[] array;
+ private int head;
+ private int tail;
+ public int64_t oldestseqn;
+
+ SlotBuffer() {
+ array = new Slot[DEFAULT_SIZE + 1];
+ head = tail = 0;
+ oldestseqn = 0;
+ }
+
+ int size() {
+ if (head >= tail)
+ return head - tail;
+ return (array.length + head) - tail;
+ }
+
+ int capacity() {
+ return array.length - 1;
+ }
+
+ void resize(int newsize) {
+ if (newsize == (array.length - 1))
+ return;
+
+ Slot[] newarray = new Slot[newsize + 1];
+ int currsize = size();
+ int index = tail;
+ for (int i = 0; i < currsize; i++) {
+ newarray[i] = array[index];
+ if ((++index) == array.length)
+ index = 0;
+ }
+ array = newarray;
+ tail = 0;
+ head = currsize;
+ }
+
+ private void incrementHead() {
+ head++;
+ if (head >= array.length)
+ head = 0;
+ }
+
+ private void incrementTail() {
+ tail++;
+ if (tail >= array.length)
+ tail = 0;
+ }
+
+ void putSlot(Slot s) {
+
+ int64_t checkNum = (getNewestSeqNum() + 1);
+
+ if (checkNum != s.getSequenceNumber()) {
+ // We have a gap so expunge all our slots
+ oldestseqn = s.getSequenceNumber();
+ tail = 0;
+ head = 1;
+ array[0] = s;
+ return;
+ }
+
+ array[head] = s;
+ incrementHead();
+
+ if (oldestseqn == 0) {
+ oldestseqn = s.getSequenceNumber();
+ }
+
+ if (head == tail) {
+ incrementTail();
+ oldestseqn++;
+ }
+ }
+
+ Slot getSlot(int64_t seqnum) {
+ int diff = (int) (seqnum - oldestseqn);
+ int index = diff + tail;
+
+ if (index < 0) {
+ // Really old message so we dont have it anymore
+ return NULL;
+ }
+
+ if (index >= array.length) {
+ if (head >= tail) {
+ return NULL;
+ }
+ index -= array.length;
+ }
+
+ if (index >= array.length) {
+
+ return NULL;
+ }
+ if (head >= tail && index >= head) {
+ return NULL;
+ }
+
+ return array[index];
+ }
+
+ int64_t getOldestSeqNum() {
+ return oldestseqn;
+ }
+
+ int64_t getNewestSeqNum() {
+ return oldestseqn + size() - 1;
+ }
+}
--- /dev/null
+
+/**
+ * Slot indexer allows slots in both the slot buffer and the new
+ * server response to looked up in a consistent fashion.
+ * @author Brian Demsky
+ * @version 1.0
+ */
+
+class SlotIndexer {
+ Slot[] updates;
+ SlotBuffer buffer;
+ int64_t firstslotseqnum;
+
+ SlotIndexer(Slot[] _updates, SlotBuffer _buffer) {
+ buffer = _buffer;
+ updates = _updates;
+ firstslotseqnum = updates[0].getSequenceNumber();
+ }
+
+ Slot getSlot(int64_t seqnum) {
+ if (seqnum >= firstslotseqnum) {
+ int offset = (int) (seqnum - firstslotseqnum);
+ if (offset >= updates.length)
+ throw new Error("Invalid Slot Sequence Number Reference");
+ else
+ return updates[offset];
+ } else
+ return buffer.getSlot(seqnum);
+ }
+}
--- /dev/null
+
+/**
+ * Slot indexer allows slots in both the slot buffer and the new
+ * server response to looked up in a consistent fashion.
+ * @author Brian Demsky
+ * @version 1.0
+ */
+
+class SlotIndexer {
+ private Slot[] updates;
+ private SlotBuffer buffer;
+ private int64_t firstslotseqnum;
+
+ SlotIndexer(Slot[] _updates, SlotBuffer _buffer) {
+ buffer = _buffer;
+ updates = _updates;
+ firstslotseqnum = updates[0].getSequenceNumber();
+ }
+
+ Slot getSlot(int64_t seqnum) {
+ if (seqnum >= firstslotseqnum) {
+ int offset = (int) (seqnum - firstslotseqnum);
+ if (offset >= updates.length)
+ throw new Error("Invalid Slot Sequence Number Reference");
+ else
+ return updates[offset];
+ } else
+ return buffer.getSlot(seqnum);
+ }
+}
--- /dev/null
+
+
+/**
+ * IoTTable data structure. Provides client interface.
+ * @author Brian Demsky
+ * @version 1.0
+ */
+
+final class Table {
+
+ /* Constants */
+ static final int FREE_SLOTS = 2; // Number of slots that should be kept free // 10
+ static final int SKIP_THRESHOLD = 10;
+ static final double RESIZE_MULTIPLE = 1.2;
+ static final double RESIZE_THRESHOLD = 0.75;
+ static final int REJECTED_THRESHOLD = 5;
+
+ /* Helper Objects */
+ SlotBuffer buffer = NULL;
+ CloudComm cloud = NULL;
+ Random random = NULL;
+ TableStatus liveTableStatus = NULL;
+ PendingTransaction pendingTransactionBuilder = NULL; // Pending Transaction used in building a Pending Transaction
+ Transaction lastPendingTransactionSpeculatedOn = NULL; // Last transaction that was speculated on from the pending transaction
+ Transaction firstPendingTransaction = NULL; // first transaction in the pending transaction list
+
+ /* Variables */
+ int numberOfSlots = 0; // Number of slots stored in buffer
+ int bufferResizeThreshold = 0; // Threshold on the number of live slots before a resize is needed
+ int64_t liveSlotCount = 0; // Number of currently live slots
+ int64_t oldestLiveSlotSequenceNumver = 0; // Smallest sequence number of the slot with a live entry
+ int64_t localMachineId = 0; // Machine ID of this client device
+ int64_t sequenceNumber = 0; // Largest sequence number a client has received
+ int64_t localSequenceNumber = 0;
+
+ // int smallestTableStatusSeen = -1; // Smallest Table Status that was seen in the latest slots sent from the server
+ // int largestTableStatusSeen = -1; // Largest Table Status that was seen in the latest slots sent from the server
+ int64_t localTransactionSequenceNumber = 0; // Local sequence number counter for transactions
+ int64_t lastTransactionSequenceNumberSpeculatedOn = -1; // the last transaction that was speculated on
+ int64_t oldestTransactionSequenceNumberSpeculatedOn = -1; // the oldest transaction that was speculated on
+ int64_t localArbitrationSequenceNumber = 0;
+ bool hadPartialSendToServer = false;
+ bool attemptedToSendToServer = false;
+ int64_t expectedsize;
+ bool didFindTableStatus = false;
+ int64_t currMaxSize = 0;
+
+ Slot lastSlotAttemptedToSend = NULL;
+ bool lastIsNewKey = false;
+ int lastNewSize = 0;
+ Map<Transaction, List<Integer>> lastTransactionPartsSent = NULL;
+ List<Entry> lastPendingSendArbitrationEntriesToDelete = NULL;
+ NewKey lastNewKey = NULL;
+
+
+ /* Data Structures */
+ Map<IoTString, KeyValue> committedKeyValueTable = NULL; // Table of committed key value pairs
+ Map<IoTString, KeyValue> speculatedKeyValueTable = NULL; // Table of speculated key value pairs, if there is a speculative value
+ Map<IoTString, KeyValue> pendingTransactionSpeculatedKeyValueTable = NULL; // Table of speculated key value pairs, if there is a speculative value from the pending transactions
+ Map<IoTString, NewKey> liveNewKeyTable = NULL; // Table of live new keys
+ HashMap<Long, Pair<Long, Liveness>> lastMessageTable = NULL; // Last message sent by a client machine id -> (Seq Num, Slot or LastMessage);
+ HashMap<Long, HashSet<RejectedMessage>> rejectedMessageWatchListTable = NULL; // Table of machine Ids and the set of rejected messages they have not seen yet
+ Map<IoTString, Long> arbitratorTable = NULL; // Table of keys and their arbitrators
+ Map<Pair<Long, Long>, Abort> liveAbortTable = NULL; // Table live abort messages
+ Map<Long, Map<Pair<Long, Integer>, TransactionPart>> newTransactionParts = NULL; // transaction parts that are seen in this latest round of slots from the server
+ Map<Long, Map<Pair<Long, Integer>, CommitPart>> newCommitParts = NULL; // commit parts that are seen in this latest round of slots from the server
+ Map<Long, Long> lastArbitratedTransactionNumberByArbitratorTable = NULL; // Last transaction sequence number that an arbitrator arbitrated on
+ Map<Long, Transaction> liveTransactionBySequenceNumberTable = NULL; // live transaction grouped by the sequence number
+ Map<Pair<Long, Long>, Transaction> liveTransactionByTransactionIdTable = NULL; // live transaction grouped by the transaction ID
+ Map<Long, Map<Long, Commit>> liveCommitsTable = NULL;
+ Map<IoTString, Commit> liveCommitsByKeyTable = NULL;
+ Map<Long, Long> lastCommitSeenSequenceNumberByArbitratorTable = NULL;
+ Vector<Long> rejectedSlotList = NULL; // List of rejected slots that have yet to be sent to the server
+ List<Transaction> pendingTransactionQueue = NULL;
+ List<ArbitrationRound> pendingSendArbitrationRounds = NULL;
+ List<Entry> pendingSendArbitrationEntriesToDelete = NULL;
+ Map<Transaction, List<Integer>> transactionPartsSent = NULL;
+ Map<Long, TransactionStatus> outstandingTransactionStatus = NULL;
+ Map<Long, Abort> liveAbortsGeneratedByLocal = NULL;
+ Set<Pair<Long, Long>> offlineTransactionsCommittedAndAtServer = NULL;
+ Map<Long, Pair<String, Integer>> localCommunicationTable = NULL;
+ Map<Long, Long> lastTransactionSeenFromMachineFromServer = NULL;
+ Map<Long, Long> lastArbitrationDataLocalSequenceNumberSeenFromArbitrator = NULL;
+
+
+ Table(String baseurl, String password, int64_t _localMachineId, int listeningPort) {
+ localMachineId = _localMachineId;
+ cloud = new CloudComm(this, baseurl, password, listeningPort);
+
+ init();
+ }
+
+ Table(CloudComm _cloud, int64_t _localMachineId) {
+ localMachineId = _localMachineId;
+ cloud = _cloud;
+
+ init();
+ }
+
+ /**
+ * Init all the stuff needed for for table usage
+ */
+ void init() {
+
+ // Init helper objects
+ random = new Random();
+ buffer = new SlotBuffer();
+
+ // Set Variables
+ oldestLiveSlotSequenceNumver = 1;
+
+ // init data structs
+ committedKeyValueTable = new HashMap<IoTString, KeyValue>();
+ speculatedKeyValueTable = new HashMap<IoTString, KeyValue>();
+ pendingTransactionSpeculatedKeyValueTable = new HashMap<IoTString, KeyValue>();
+ liveNewKeyTable = new HashMap<IoTString, NewKey>();
+ lastMessageTable = new HashMap<Long, Pair<Long, Liveness>>();
+ rejectedMessageWatchListTable = new HashMap<Long, HashSet<RejectedMessage>>();
+ arbitratorTable = new HashMap<IoTString, Long>();
+ liveAbortTable = new HashMap<Pair<Long, Long>, Abort>();
+ newTransactionParts = new HashMap<Long, Map<Pair<Long, Integer>, TransactionPart>>();
+ newCommitParts = new HashMap<Long, Map<Pair<Long, Integer>, CommitPart>>();
+ lastArbitratedTransactionNumberByArbitratorTable = new HashMap<Long, Long>();
+ liveTransactionBySequenceNumberTable = new HashMap<Long, Transaction>();
+ liveTransactionByTransactionIdTable = new HashMap<Pair<Long, Long>, Transaction>();
+ liveCommitsTable = new HashMap<Long, Map<Long, Commit>>();
+ liveCommitsByKeyTable = new HashMap<IoTString, Commit>();
+ lastCommitSeenSequenceNumberByArbitratorTable = new HashMap<Long, Long>();
+ rejectedSlotList = new Vector<Long>();
+ pendingTransactionQueue = new ArrayList<Transaction>();
+ pendingSendArbitrationEntriesToDelete = new ArrayList<Entry>();
+ transactionPartsSent = new HashMap<Transaction, List<Integer>>();
+ outstandingTransactionStatus = new HashMap<Long, TransactionStatus>();
+ liveAbortsGeneratedByLocal = new HashMap<Long, Abort>();
+ offlineTransactionsCommittedAndAtServer = new HashSet<Pair<Long, Long>>();
+ localCommunicationTable = new HashMap<Long, Pair<String, Integer>>();
+ lastTransactionSeenFromMachineFromServer = new HashMap<Long, Long>();
+ pendingSendArbitrationRounds = new ArrayList<ArbitrationRound>();
+ lastArbitrationDataLocalSequenceNumberSeenFromArbitrator = new HashMap<Long, Long>();
+
+
+ // Other init stuff
+ numberOfSlots = buffer.capacity();
+ setResizeThreshold();
+ }
+
+ // TODO: delete method
+ synchronized void printSlots() {
+ int64_t o = buffer.getOldestSeqNum();
+ int64_t n = buffer.getNewestSeqNum();
+
+ int[] types = new int[10];
+
+ int num = 0;
+
+ int livec = 0;
+ int deadc = 0;
+
+ int casdasd = 0;
+
+ int liveslo = 0;
+
+ for (int64_t i = o; i < (n + 1); i++) {
+ Slot s = buffer.getSlot(i);
+
+
+ if (s.isLive()) {
+ liveslo++;
+ }
+
+ Vector<Entry> entries = s.getEntries();
+
+ for (Entry e : entries) {
+ if (e.isLive()) {
+ int type = e.getType();
+
+
+ if (type == 6) {
+ RejectedMessage rej = (RejectedMessage)e;
+ casdasd++;
+
+ System.out.println(rej.getMachineID());
+ }
+
+
+ types[type] = types[type] + 1;
+ num++;
+ livec++;
+ } else {
+ deadc++;
+ }
+ }
+ }
+
+ for (int i = 0; i < 10; i++) {
+ System.out.println(i + " " + types[i]);
+ }
+ System.out.println("Live count: " + livec);
+ System.out.println("Live Slot count: " + liveslo);
+
+ System.out.println("Dead count: " + deadc);
+ System.out.println("Old: " + o);
+ System.out.println("New: " + n);
+ System.out.println("Size: " + buffer.size());
+ // System.out.println("Commits: " + liveCommitsTable.size());
+ System.out.println("pendingTrans: " + pendingTransactionQueue.size());
+ System.out.println("Trans Status Out: " + outstandingTransactionStatus.size());
+
+ for (Long k : lastArbitratedTransactionNumberByArbitratorTable.keySet()) {
+ System.out.println(k + ": " + lastArbitratedTransactionNumberByArbitratorTable.get(k));
+ }
+
+
+ for (Long a : liveCommitsTable.keySet()) {
+ for (Long b : liveCommitsTable.get(a).keySet()) {
+ for (KeyValue kv : liveCommitsTable.get(a).get(b).getKeyValueUpdateSet()) {
+ System.out.print(kv + " ");
+ }
+ System.out.print("|| ");
+ }
+ System.out.println();
+ }
+
+ }
+
+ /**
+ * Initialize the table by inserting a table status as the first entry into the table status
+ * also initialize the crypto stuff.
+ */
+ synchronized void initTable() throws ServerException {
+ cloud.initSecurity();
+
+ // Create the first insertion into the block chain which is the table status
+ Slot s = new Slot(this, 1, localMachineId, localSequenceNumber);
+ localSequenceNumber++;
+ TableStatus status = new TableStatus(s, numberOfSlots);
+ s.addEntry(status);
+ Slot[] array = cloud.putSlot(s, numberOfSlots);
+
+ if (array == NULL) {
+ array = new Slot[] {s};
+ // update local block chain
+ validateAndUpdate(array, true);
+ } else if (array.length == 1) {
+ // in case we did push the slot BUT we failed to init it
+ validateAndUpdate(array, true);
+ } else {
+ throw new Error("Error on initialization");
+ }
+ }
+
+ /**
+ * Rebuild the table from scratch by pulling the latest block chain from the server.
+ */
+ synchronized void rebuild() throws ServerException {
+ // Just pull the latest slots from the server
+ Slot[] newslots = cloud.getSlots(sequenceNumber + 1);
+ validateAndUpdate(newslots, true);
+ sendToServer(NULL);
+ updateLiveTransactionsAndStatus();
+
+ }
+
+ // String toString() {
+ // String retString = " Committed Table: \n";
+ // retString += "---------------------------\n";
+ // retString += commitedTable.toString();
+
+ // retString += "\n\n";
+
+ // retString += " Speculative Table: \n";
+ // retString += "---------------------------\n";
+ // retString += speculativeTable.toString();
+
+ // return retString;
+ // }
+
+ synchronized void addLocalCommunication(int64_t arbitrator, String hostName, int portNumber) {
+ localCommunicationTable.put(arbitrator, new Pair<String, Integer>(hostName, portNumber));
+ }
+
+ synchronized Long getArbitrator(IoTString key) {
+ return arbitratorTable.get(key);
+ }
+
+ synchronized void close() {
+ cloud.close();
+ }
+
+ synchronized IoTString getCommitted(IoTString key) {
+ KeyValue kv = committedKeyValueTable.get(key);
+
+ if (kv != NULL) {
+ return kv.getValue();
+ } else {
+ return NULL;
+ }
+ }
+
+ synchronized IoTString getSpeculative(IoTString key) {
+ KeyValue kv = pendingTransactionSpeculatedKeyValueTable.get(key);
+
+ if (kv == NULL) {
+ kv = speculatedKeyValueTable.get(key);
+ }
+
+ if (kv == NULL) {
+ kv = committedKeyValueTable.get(key);
+ }
+
+ if (kv != NULL) {
+ return kv.getValue();
+ } else {
+ return NULL;
+ }
+ }
+
+ synchronized IoTString getCommittedAtomic(IoTString key) {
+ KeyValue kv = committedKeyValueTable.get(key);
+
+ if (arbitratorTable.get(key) == NULL) {
+ throw new Error("Key not Found.");
+ }
+
+ // Make sure new key value pair matches the current arbitrator
+ if (!pendingTransactionBuilder.checkArbitrator(arbitratorTable.get(key))) {
+ // TODO: Maybe not throw en error
+ throw new Error("Not all Key Values Match Arbitrator.");
+ }
+
+ if (kv != NULL) {
+ pendingTransactionBuilder.addKVGuard(new KeyValue(key, kv.getValue()));
+ return kv.getValue();
+ } else {
+ pendingTransactionBuilder.addKVGuard(new KeyValue(key, NULL));
+ return NULL;
+ }
+ }
+
+ synchronized IoTString getSpeculativeAtomic(IoTString key) {
+ if (arbitratorTable.get(key) == NULL) {
+ throw new Error("Key not Found.");
+ }
+
+ // Make sure new key value pair matches the current arbitrator
+ if (!pendingTransactionBuilder.checkArbitrator(arbitratorTable.get(key))) {
+ // TODO: Maybe not throw en error
+ throw new Error("Not all Key Values Match Arbitrator.");
+ }
+
+ KeyValue kv = pendingTransactionSpeculatedKeyValueTable.get(key);
+
+ if (kv == NULL) {
+ kv = speculatedKeyValueTable.get(key);
+ }
+
+ if (kv == NULL) {
+ kv = committedKeyValueTable.get(key);
+ }
+
+ if (kv != NULL) {
+ pendingTransactionBuilder.addKVGuard(new KeyValue(key, kv.getValue()));
+ return kv.getValue();
+ } else {
+ pendingTransactionBuilder.addKVGuard(new KeyValue(key, NULL));
+ return NULL;
+ }
+ }
+
+ synchronized bool update() {
+ try {
+ Slot[] newSlots = cloud.getSlots(sequenceNumber + 1);
+ validateAndUpdate(newSlots, false);
+ sendToServer(NULL);
+
+
+ updateLiveTransactionsAndStatus();
+
+ return true;
+ } catch (Exception e) {
+ // e.printStackTrace();
+
+ for (Long m : localCommunicationTable.keySet()) {
+ updateFromLocal(m);
+ }
+ }
+
+ return false;
+ }
+
+ synchronized bool createNewKey(IoTString keyName, int64_t machineId) throws ServerException {
+ while (true) {
+ if (arbitratorTable.get(keyName) != NULL) {
+ // There is already an arbitrator
+ return false;
+ }
+
+ NewKey newKey = new NewKey(NULL, keyName, machineId);
+
+ if (sendToServer(newKey)) {
+ // If successfully inserted
+ return true;
+ }
+ }
+ }
+
+ synchronized void startTransaction() {
+ // Create a new transaction, invalidates any old pending transactions.
+ pendingTransactionBuilder = new PendingTransaction(localMachineId);
+ }
+
+ synchronized void addKV(IoTString key, IoTString value) {
+
+ // Make sure it is a valid key
+ if (arbitratorTable.get(key) == NULL) {
+ throw new Error("Key not Found.");
+ }
+
+ // Make sure new key value pair matches the current arbitrator
+ if (!pendingTransactionBuilder.checkArbitrator(arbitratorTable.get(key))) {
+ // TODO: Maybe not throw en error
+ throw new Error("Not all Key Values Match Arbitrator.");
+ }
+
+ // Add the key value to this transaction
+ KeyValue kv = new KeyValue(key, value);
+ pendingTransactionBuilder.addKV(kv);
+ }
+
+ synchronized TransactionStatus commitTransaction() {
+
+ if (pendingTransactionBuilder.getKVUpdates().size() == 0) {
+ // transaction with no updates will have no effect on the system
+ return new TransactionStatus(TransactionStatus.StatusNoEffect, -1);
+ }
+
+ // Set the local transaction sequence number and increment
+ pendingTransactionBuilder.setClientLocalSequenceNumber(localTransactionSequenceNumber);
+ localTransactionSequenceNumber++;
+
+ // Create the transaction status
+ TransactionStatus transactionStatus = new TransactionStatus(TransactionStatus.StatusPending, pendingTransactionBuilder.getArbitrator());
+
+ // Create the new transaction
+ Transaction newTransaction = pendingTransactionBuilder.createTransaction();
+ newTransaction.setTransactionStatus(transactionStatus);
+
+ if (pendingTransactionBuilder.getArbitrator() != localMachineId) {
+ // Add it to the queue and invalidate the builder for safety
+ pendingTransactionQueue.add(newTransaction);
+ } else {
+ arbitrateOnLocalTransaction(newTransaction);
+ updateLiveStateFromLocal();
+ }
+
+ pendingTransactionBuilder = new PendingTransaction(localMachineId);
+
+ try {
+ sendToServer(NULL);
+ } catch (ServerException e) {
+
+ Set<Long> arbitratorTriedAndFailed = new HashSet<Long>();
+ for (Iterator<Transaction> iter = pendingTransactionQueue.iterator(); iter.hasNext(); ) {
+ Transaction transaction = iter.next();
+
+ if (arbitratorTriedAndFailed.contains(transaction.getArbitrator())) {
+ // Already contacted this client so ignore all attempts to contact this client
+ // to preserve ordering for arbitrator
+ continue;
+ }
+
+ Pair<Boolean, Boolean> sendReturn = sendTransactionToLocal(transaction);
+
+ if (sendReturn.getFirst()) {
+ // Failed to contact over local
+ arbitratorTriedAndFailed.add(transaction.getArbitrator());
+ } else {
+ // Successful contact or should not contact
+
+ if (sendReturn.getSecond()) {
+ // did arbitrate
+ iter.remove();
+ }
+ }
+ }
+ }
+
+ updateLiveStateFromLocal();
+
+ return transactionStatus;
+ }
+
+ /**
+ * Get the machine ID for this client
+ */
+ int64_t getMachineId() {
+ return localMachineId;
+ }
+
+ /**
+ * Decrement the number of live slots that we currently have
+ */
+ void decrementLiveCount() {
+ liveSlotCount--;
+ }
+
+ /**
+ * Recalculate the new resize threshold
+ */
+ void setResizeThreshold() {
+ int resizeLower = (int) (RESIZE_THRESHOLD * numberOfSlots);
+ bufferResizeThreshold = resizeLower - 1 + random.nextInt(numberOfSlots - resizeLower);
+ }
+
+ int64_t getLocalSequenceNumber() {
+ return localSequenceNumber;
+ }
+
+
+ bool lastInsertedNewKey = false;
+
+ bool sendToServer(NewKey newKey) throws ServerException {
+
+ bool fromRetry = false;
+
+ try {
+ if (hadPartialSendToServer) {
+ Slot[] newSlots = cloud.getSlots(sequenceNumber + 1);
+ if (newSlots.length == 0) {
+ fromRetry = true;
+ ThreeTuple<Boolean, Boolean, Slot[]> sendSlotsReturn = sendSlotsToServer(lastSlotAttemptedToSend, lastNewSize, lastIsNewKey);
+
+ if (sendSlotsReturn.getFirst()) {
+ if (newKey != NULL) {
+ if (lastInsertedNewKey && (lastNewKey.getKey() == newKey.getKey()) && (lastNewKey.getMachineID() == newKey.getMachineID())) {
+ newKey = NULL;
+ }
+ }
+
+ for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+
+ // Update which transactions parts still need to be sent
+ transaction.removeSentParts(lastTransactionPartsSent.get(transaction));
+
+ // Add the transaction status to the outstanding list
+ outstandingTransactionStatus.put(transaction.getSequenceNumber(), transaction.getTransactionStatus());
+
+ // Update the transaction status
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentPartial);
+
+ // Check if all the transaction parts were successfully sent and if so then remove it from pending
+ if (transaction.didSendAllParts()) {
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentFully);
+ pendingTransactionQueue.remove(transaction);
+ }
+ }
+ } else {
+
+ newSlots = sendSlotsReturn.getThird();
+
+ bool isInserted = false;
+ for (Slot s : newSlots) {
+ if ((s.getSequenceNumber() == lastSlotAttemptedToSend.getSequenceNumber()) && (s.getMachineID() == localMachineId)) {
+ isInserted = true;
+ break;
+ }
+ }
+
+ for (Slot s : newSlots) {
+ if (isInserted) {
+ break;
+ }
+
+ // Process each entry in the slot
+ for (Entry entry : s.getEntries()) {
+
+ if (entry.getType() == Entry.TypeLastMessage) {
+ LastMessage lastMessage = (LastMessage)entry;
+ if ((lastMessage.getMachineID() == localMachineId) && (lastMessage.getSequenceNumber() == lastSlotAttemptedToSend.getSequenceNumber())) {
+ isInserted = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (isInserted) {
+ if (newKey != NULL) {
+ if (lastInsertedNewKey && (lastNewKey.getKey() == newKey.getKey()) && (lastNewKey.getMachineID() == newKey.getMachineID())) {
+ newKey = NULL;
+ }
+ }
+
+ for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+
+ // Update which transactions parts still need to be sent
+ transaction.removeSentParts(lastTransactionPartsSent.get(transaction));
+
+ // Add the transaction status to the outstanding list
+ outstandingTransactionStatus.put(transaction.getSequenceNumber(), transaction.getTransactionStatus());
+
+ // Update the transaction status
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentPartial);
+
+ // Check if all the transaction parts were successfully sent and if so then remove it from pending
+ if (transaction.didSendAllParts()) {
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentFully);
+ pendingTransactionQueue.remove(transaction);
+ } else {
+ transaction.resetServerFailure();
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+ }
+ }
+ }
+
+ for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+
+ if (sendSlotsReturn.getThird().length != 0) {
+ // insert into the local block chain
+ validateAndUpdate(sendSlotsReturn.getThird(), true);
+ }
+ // continue;
+ } else {
+ bool isInserted = false;
+ for (Slot s : newSlots) {
+ if ((s.getSequenceNumber() == lastSlotAttemptedToSend.getSequenceNumber()) && (s.getMachineID() == localMachineId)) {
+ isInserted = true;
+ break;
+ }
+ }
+
+ for (Slot s : newSlots) {
+ if (isInserted) {
+ break;
+ }
+
+ // Process each entry in the slot
+ for (Entry entry : s.getEntries()) {
+
+ if (entry.getType() == Entry.TypeLastMessage) {
+ LastMessage lastMessage = (LastMessage)entry;
+ if ((lastMessage.getMachineID() == localMachineId) && (lastMessage.getSequenceNumber() == lastSlotAttemptedToSend.getSequenceNumber())) {
+ isInserted = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (isInserted) {
+ if (newKey != NULL) {
+ if (lastInsertedNewKey && (lastNewKey.getKey() == newKey.getKey()) && (lastNewKey.getMachineID() == newKey.getMachineID())) {
+ newKey = NULL;
+ }
+ }
+
+ for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+
+ // Update which transactions parts still need to be sent
+ transaction.removeSentParts(lastTransactionPartsSent.get(transaction));
+
+ // Add the transaction status to the outstanding list
+ outstandingTransactionStatus.put(transaction.getSequenceNumber(), transaction.getTransactionStatus());
+
+ // Update the transaction status
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentPartial);
+
+ // Check if all the transaction parts were successfully sent and if so then remove it from pending
+ if (transaction.didSendAllParts()) {
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentFully);
+ pendingTransactionQueue.remove(transaction);
+ } else {
+ transaction.resetServerFailure();
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+ }
+ } else {
+ for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+ }
+
+ // insert into the local block chain
+ validateAndUpdate(newSlots, true);
+ }
+ }
+ } catch (ServerException e) {
+ throw e;
+ }
+
+
+
+ try {
+ // While we have stuff that needs inserting into the block chain
+ while ((pendingTransactionQueue.size() > 0) || (pendingSendArbitrationRounds.size() > 0) || (newKey != NULL)) {
+
+ fromRetry = false;
+
+ if (hadPartialSendToServer) {
+ throw new Error("Should Be error free");
+ }
+
+
+
+ // If there is a new key with same name then end
+ if ((newKey != NULL) && (arbitratorTable.get(newKey.getKey()) != NULL)) {
+ return false;
+ }
+
+ // Create the slot
+ Slot slot = new Slot(this, sequenceNumber + 1, localMachineId, buffer.getSlot(sequenceNumber).getHMAC(), localSequenceNumber);
+ localSequenceNumber++;
+
+ // Try to fill the slot with data
+ ThreeTuple<Boolean, Integer, Boolean> fillSlotsReturn = fillSlot(slot, false, newKey);
+ bool needsResize = fillSlotsReturn.getFirst();
+ int newSize = fillSlotsReturn.getSecond();
+ Boolean insertedNewKey = fillSlotsReturn.getThird();
+
+ if (needsResize) {
+ // Reset which transaction to send
+ for (Transaction transaction : transactionPartsSent.keySet()) {
+ transaction.resetNextPartToSend();
+
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer() && !transaction.getServerFailure()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+
+ // Clear the sent data since we are trying again
+ pendingSendArbitrationEntriesToDelete.clear();
+ transactionPartsSent.clear();
+
+ // We needed a resize so try again
+ fillSlot(slot, true, newKey);
+ }
+
+ lastSlotAttemptedToSend = slot;
+ lastIsNewKey = (newKey != NULL);
+ lastInsertedNewKey = insertedNewKey;
+ lastNewSize = newSize;
+ lastNewKey = newKey;
+ lastTransactionPartsSent = new HashMap<Transaction, List<Integer>>(transactionPartsSent);
+ lastPendingSendArbitrationEntriesToDelete = new ArrayList<Entry>(pendingSendArbitrationEntriesToDelete);
+
+
+ ThreeTuple<Boolean, Boolean, Slot[]> sendSlotsReturn = sendSlotsToServer(slot, newSize, newKey != NULL);
+
+ if (sendSlotsReturn.getFirst()) {
+
+ // Did insert into the block chain
+
+ if (insertedNewKey) {
+ // This slot was what was inserted not a previous slot
+
+ // New Key was successfully inserted into the block chain so dont want to insert it again
+ newKey = NULL;
+ }
+
+ // Remove the aborts and commit parts that were sent from the pending to send queue
+ for (Iterator<ArbitrationRound> iter = pendingSendArbitrationRounds.iterator(); iter.hasNext(); ) {
+ ArbitrationRound round = iter.next();
+ round.removeParts(pendingSendArbitrationEntriesToDelete);
+
+ if (round.isDoneSending()) {
+ // Sent all the parts
+ iter.remove();
+ }
+ }
+
+ for (Transaction transaction : transactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+
+ // Update which transactions parts still need to be sent
+ transaction.removeSentParts(transactionPartsSent.get(transaction));
+
+ // Add the transaction status to the outstanding list
+ outstandingTransactionStatus.put(transaction.getSequenceNumber(), transaction.getTransactionStatus());
+
+ // Update the transaction status
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentPartial);
+
+ // Check if all the transaction parts were successfully sent and if so then remove it from pending
+ if (transaction.didSendAllParts()) {
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentFully);
+ pendingTransactionQueue.remove(transaction);
+ }
+ }
+ } else {
+
+ // if (!sendSlotsReturn.getSecond()) {
+ // for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ // transaction.resetServerFailure();
+ // }
+ // } else {
+ // for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ // transaction.resetServerFailure();
+
+ // // Update which transactions parts still need to be sent
+ // transaction.removeSentParts(transactionPartsSent.get(transaction));
+
+ // // Add the transaction status to the outstanding list
+ // outstandingTransactionStatus.put(transaction.getSequenceNumber(), transaction.getTransactionStatus());
+
+ // // Update the transaction status
+ // transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentPartial);
+
+ // // Check if all the transaction parts were successfully sent and if so then remove it from pending
+ // if (transaction.didSendAllParts()) {
+ // transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentFully);
+ // pendingTransactionQueue.remove(transaction);
+
+ // for (KeyValue kv : transaction.getKeyValueUpdateSet()) {
+ // System.out.println("Sent: " + kv + " from: " + localMachineId + " Slot:" + lastSlotAttemptedToSend.getSequenceNumber() + " Claimed:" + transaction.getSequenceNumber());
+ // }
+ // }
+ // }
+ // }
+
+ // Reset which transaction to send
+ for (Transaction transaction : transactionPartsSent.keySet()) {
+ transaction.resetNextPartToSend();
+ // transaction.resetNextPartToSend();
+
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer() && !transaction.getServerFailure()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+ }
+
+ // Clear the sent data in preparation for next send
+ pendingSendArbitrationEntriesToDelete.clear();
+ transactionPartsSent.clear();
+
+ if (sendSlotsReturn.getThird().length != 0) {
+ // insert into the local block chain
+ validateAndUpdate(sendSlotsReturn.getThird(), true);
+ }
+ }
+
+ } catch (ServerException e) {
+
+ if (e.getType() != ServerException.TypeInputTimeout) {
+ // e.printStackTrace();
+
+ // Nothing was able to be sent to the server so just clear these data structures
+ for (Transaction transaction : transactionPartsSent.keySet()) {
+ transaction.resetNextPartToSend();
+
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer() && !transaction.getServerFailure()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+ } else {
+ // There was a partial send to the server
+ hadPartialSendToServer = true;
+
+
+ // if (!fromRetry) {
+ // lastTransactionPartsSent = new HashMap<Transaction, List<Integer>>(transactionPartsSent);
+ // lastPendingSendArbitrationEntriesToDelete = new ArrayList<Entry>(pendingSendArbitrationEntriesToDelete);
+ // }
+
+ // Nothing was able to be sent to the server so just clear these data structures
+ for (Transaction transaction : transactionPartsSent.keySet()) {
+ transaction.resetNextPartToSend();
+ transaction.setServerFailure();
+ }
+ }
+
+ pendingSendArbitrationEntriesToDelete.clear();
+ transactionPartsSent.clear();
+
+ throw e;
+ }
+
+ return newKey == NULL;
+ }
+
+ synchronized bool updateFromLocal(int64_t machineId) {
+ Pair<String, Integer> localCommunicationInformation = localCommunicationTable.get(machineId);
+ if (localCommunicationInformation == NULL) {
+ // Cant talk to that device locally so do nothing
+ return false;
+ }
+
+ // Get the size of the send data
+ int sendDataSize = sizeof(int32_t) + sizeof(int64_t);
+
+ Long lastArbitrationDataLocalSequenceNumber = (int64_t) - 1;
+ if (lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(machineId) != NULL) {
+ lastArbitrationDataLocalSequenceNumber = lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(machineId);
+ }
+
+ char[] sendData = new char[sendDataSize];
+ ByteBuffer bbEncode = ByteBuffer.wrap(sendData);
+
+ // Encode the data
+ bbEncode.putLong(lastArbitrationDataLocalSequenceNumber);
+ bbEncode.putInt(0);
+
+ // Send by local
+ char[] returnData = cloud.sendLocalData(sendData, localSequenceNumber, localCommunicationInformation.getFirst(), localCommunicationInformation.getSecond());
+ localSequenceNumber++;
+
+ if (returnData == NULL) {
+ // Could not contact server
+ return false;
+ }
+
+ // Decode the data
+ ByteBuffer bbDecode = ByteBuffer.wrap(returnData);
+ int numberOfEntries = bbDecode.getInt();
+
+ for (int i = 0; i < numberOfEntries; i++) {
+ char type = bbDecode.get();
+ if (type == Entry.TypeAbort) {
+ Abort abort = (Abort)Abort.decode(NULL, bbDecode);
+ processEntry(abort);
+ } else if (type == Entry.TypeCommitPart) {
+ CommitPart commitPart = (CommitPart)CommitPart.decode(NULL, bbDecode);
+ processEntry(commitPart);
+ }
+ }
+
+ updateLiveStateFromLocal();
+
+ return true;
+ }
+
+ Pair<Boolean, Boolean> sendTransactionToLocal(Transaction transaction) {
+
+ // Get the devices local communications
+ Pair<String, Integer> localCommunicationInformation = localCommunicationTable.get(transaction.getArbitrator());
+
+ if (localCommunicationInformation == NULL) {
+ // Cant talk to that device locally so do nothing
+ return new Pair<Boolean, Boolean>(true, false);
+ }
+
+ // Get the size of the send data
+ int sendDataSize = sizeof(int32_t) + sizeof(int64_t);
+ for (TransactionPart part : transaction.getParts().values()) {
+ sendDataSize += part.getSize();
+ }
+
+ Long lastArbitrationDataLocalSequenceNumber = (int64_t) - 1;
+ if (lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(transaction.getArbitrator()) != NULL) {
+ lastArbitrationDataLocalSequenceNumber = lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(transaction.getArbitrator());
+ }
+
+ // Make the send data size
+ char[] sendData = new char[sendDataSize];
+ ByteBuffer bbEncode = ByteBuffer.wrap(sendData);
+
+ // Encode the data
+ bbEncode.putLong(lastArbitrationDataLocalSequenceNumber);
+ bbEncode.putInt(transaction.getParts().size());
+ for (TransactionPart part : transaction.getParts().values()) {
+ part.encode(bbEncode);
+ }
+
+
+ // Send by local
+ char[] returnData = cloud.sendLocalData(sendData, localSequenceNumber, localCommunicationInformation.getFirst(), localCommunicationInformation.getSecond());
+ localSequenceNumber++;
+
+ if (returnData == NULL) {
+ // Could not contact server
+ return new Pair<Boolean, Boolean>(true, false);
+ }
+
+ // Decode the data
+ ByteBuffer bbDecode = ByteBuffer.wrap(returnData);
+ bool didCommit = bbDecode.get() == 1;
+ bool couldArbitrate = bbDecode.get() == 1;
+ int numberOfEntries = bbDecode.getInt();
+ bool foundAbort = false;
+
+ for (int i = 0; i < numberOfEntries; i++) {
+ char type = bbDecode.get();
+ if (type == Entry.TypeAbort) {
+ Abort abort = (Abort)Abort.decode(NULL, bbDecode);
+
+ if ((abort.getTransactionMachineId() == localMachineId) && (abort.getTransactionClientLocalSequenceNumber() == transaction.getClientLocalSequenceNumber())) {
+ foundAbort = true;
+ }
+
+ processEntry(abort);
+ } else if (type == Entry.TypeCommitPart) {
+ CommitPart commitPart = (CommitPart)CommitPart.decode(NULL, bbDecode);
+ processEntry(commitPart);
+ }
+ }
+
+ updateLiveStateFromLocal();
+
+ if (couldArbitrate) {
+ TransactionStatus status = transaction.getTransactionStatus();
+ if (didCommit) {
+ status.setStatus(TransactionStatus.StatusCommitted);
+ } else {
+ status.setStatus(TransactionStatus.StatusAborted);
+ }
+ } else {
+ TransactionStatus status = transaction.getTransactionStatus();
+ if (foundAbort) {
+ status.setStatus(TransactionStatus.StatusAborted);
+ } else {
+ status.setStatus(TransactionStatus.StatusCommitted);
+ }
+ }
+
+ return new Pair<Boolean, Boolean>(false, true);
+ }
+
+ synchronized char[] acceptDataFromLocal(char[] data) {
+
+ // Decode the data
+ ByteBuffer bbDecode = ByteBuffer.wrap(data);
+ int64_t lastArbitratedSequenceNumberSeen = bbDecode.getLong();
+ int numberOfParts = bbDecode.getInt();
+
+ // If we did commit a transaction or not
+ bool didCommit = false;
+ bool couldArbitrate = false;
+
+ if (numberOfParts != 0) {
+
+ // decode the transaction
+ Transaction transaction = new Transaction();
+ for (int i = 0; i < numberOfParts; i++) {
+ bbDecode.get();
+ TransactionPart newPart = (TransactionPart)TransactionPart.decode(NULL, bbDecode);
+ transaction.addPartDecode(newPart);
+ }
+
+ // Arbitrate on transaction and pull relevant return data
+ Pair<Boolean, Boolean> localArbitrateReturn = arbitrateOnLocalTransaction(transaction);
+ couldArbitrate = localArbitrateReturn.getFirst();
+ didCommit = localArbitrateReturn.getSecond();
+
+ updateLiveStateFromLocal();
+
+ // Transaction was sent to the server so keep track of it to prevent double commit
+ if (transaction.getSequenceNumber() != -1) {
+ offlineTransactionsCommittedAndAtServer.add(transaction.getId());
+ }
+ }
+
+ // The data to send back
+ int returnDataSize = 0;
+ List<Entry> unseenArbitrations = new ArrayList<Entry>();
+
+ // Get the aborts to send back
+ List<Long> abortLocalSequenceNumbers = new ArrayList<Long >(liveAbortsGeneratedByLocal.keySet());
+ Collections.sort(abortLocalSequenceNumbers);
+ for (Long localSequenceNumber : abortLocalSequenceNumbers) {
+ if (localSequenceNumber <= lastArbitratedSequenceNumberSeen) {
+ continue;
+ }
+
+ Abort abort = liveAbortsGeneratedByLocal.get(localSequenceNumber);
+ unseenArbitrations.add(abort);
+ returnDataSize += abort.getSize();
+ }
+
+ // Get the commits to send back
+ Map<Long, Commit> commitForClientTable = liveCommitsTable.get(localMachineId);
+ if (commitForClientTable != NULL) {
+ List<Long> commitLocalSequenceNumbers = new ArrayList<Long>(commitForClientTable.keySet());
+ Collections.sort(commitLocalSequenceNumbers);
+
+ for (Long localSequenceNumber : commitLocalSequenceNumbers) {
+ Commit commit = commitForClientTable.get(localSequenceNumber);
+
+ if (localSequenceNumber <= lastArbitratedSequenceNumberSeen) {
+ continue;
+ }
+
+ unseenArbitrations.addAll(commit.getParts().values());
+
+ for (CommitPart commitPart : commit.getParts().values()) {
+ returnDataSize += commitPart.getSize();
+ }
+ }
+ }
+
+ // Number of arbitration entries to decode
+ returnDataSize += 2 * sizeof(int32_t);
+
+ // Boolean of did commit or not
+ if (numberOfParts != 0) {
+ returnDataSize += sizeof(char);
+ }
+
+ // Data to send Back
+ char[] returnData = new char[returnDataSize];
+ ByteBuffer bbEncode = ByteBuffer.wrap(returnData);
+
+ if (numberOfParts != 0) {
+ if (didCommit) {
+ bbEncode.put((char)1);
+ } else {
+ bbEncode.put((char)0);
+ }
+ if (couldArbitrate) {
+ bbEncode.put((char)1);
+ } else {
+ bbEncode.put((char)0);
+ }
+ }
+
+ bbEncode.putInt(unseenArbitrations.size());
+ for (Entry entry : unseenArbitrations) {
+ entry.encode(bbEncode);
+ }
+
+
+ localSequenceNumber++;
+ return returnData;
+ }
+
+ ThreeTuple<Boolean, Boolean, Slot[]> sendSlotsToServer(Slot slot, int newSize, bool isNewKey) throws ServerException {
+
+ bool attemptedToSendToServerTmp = attemptedToSendToServer;
+ attemptedToSendToServer = true;
+
+ bool inserted = false;
+ bool lastTryInserted = false;
+
+ Slot[] array = cloud.putSlot(slot, newSize);
+ if (array == NULL) {
+ array = new Slot[] {slot};
+ rejectedSlotList.clear();
+ inserted = true;
+ } else {
+ if (array.length == 0) {
+ throw new Error("Server Error: Did not send any slots");
+ }
+
+ // if (attemptedToSendToServerTmp) {
+ if (hadPartialSendToServer) {
+
+ bool isInserted = false;
+ for (Slot s : array) {
+ if ((s.getSequenceNumber() == slot.getSequenceNumber()) && (s.getMachineID() == localMachineId)) {
+ isInserted = true;
+ break;
+ }
+ }
+
+ for (Slot s : array) {
+ if (isInserted) {
+ break;
+ }
+
+ // Process each entry in the slot
+ for (Entry entry : s.getEntries()) {
+
+ if (entry.getType() == Entry.TypeLastMessage) {
+ LastMessage lastMessage = (LastMessage)entry;
+
+ if ((lastMessage.getMachineID() == localMachineId) && (lastMessage.getSequenceNumber() == slot.getSequenceNumber())) {
+ isInserted = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!isInserted) {
+ rejectedSlotList.add(slot.getSequenceNumber());
+ lastTryInserted = false;
+ } else {
+ lastTryInserted = true;
+ }
+ } else {
+ rejectedSlotList.add(slot.getSequenceNumber());
+ lastTryInserted = false;
+ }
+ }
+
+ return new ThreeTuple<Boolean, Boolean, Slot[]>(inserted, lastTryInserted, array);
+ }
+
+ /**
+ * Returns false if a resize was needed
+ */
+ ThreeTuple<Boolean, Integer, Boolean> fillSlot(Slot slot, bool resize, NewKey newKeyEntry) {
+
+
+ int newSize = 0;
+ if (liveSlotCount > bufferResizeThreshold) {
+ resize = true; //Resize is forced
+
+ }
+
+ if (resize) {
+ newSize = (int) (numberOfSlots * RESIZE_MULTIPLE);
+ TableStatus status = new TableStatus(slot, newSize);
+ slot.addEntry(status);
+ }
+
+ // Fill with rejected slots first before doing anything else
+ doRejectedMessages(slot);
+
+ // Do mandatory rescue of entries
+ ThreeTuple<Boolean, Boolean, Long> mandatoryRescueReturn = doMandatoryResuce(slot, resize);
+
+ // Extract working variables
+ bool needsResize = mandatoryRescueReturn.getFirst();
+ bool seenLiveSlot = mandatoryRescueReturn.getSecond();
+ int64_t currentRescueSequenceNumber = mandatoryRescueReturn.getThird();
+
+ if (needsResize && !resize) {
+ // We need to resize but we are not resizing so return false
+ return new ThreeTuple<Boolean, Integer, Boolean>(true, NULL, NULL);
+ }
+
+ bool inserted = false;
+ if (newKeyEntry != NULL) {
+ newKeyEntry.setSlot(slot);
+ if (slot.hasSpace(newKeyEntry)) {
+
+ slot.addEntry(newKeyEntry);
+ inserted = true;
+ }
+ }
+
+ // Clear the transactions, aborts and commits that were sent previously
+ transactionPartsSent.clear();
+ pendingSendArbitrationEntriesToDelete.clear();
+
+ for (ArbitrationRound round : pendingSendArbitrationRounds) {
+ bool isFull = false;
+ round.generateParts();
+ List<Entry> parts = round.getParts();
+
+ // Insert pending arbitration data
+ for (Entry arbitrationData : parts) {
+
+ // If it is an abort then we need to set some information
+ if (arbitrationData instanceof Abort) {
+ ((Abort)arbitrationData).setSequenceNumber(slot.getSequenceNumber());
+ }
+
+ if (!slot.hasSpace(arbitrationData)) {
+ // No space so cant do anything else with these data entries
+ isFull = true;
+ break;
+ }
+
+ // Add to this current slot and add it to entries to delete
+ slot.addEntry(arbitrationData);
+ pendingSendArbitrationEntriesToDelete.add(arbitrationData);
+ }
+
+ if (isFull) {
+ break;
+ }
+ }
+
+ if (pendingTransactionQueue.size() > 0) {
+
+ Transaction transaction = pendingTransactionQueue.get(0);
+
+ // Set the transaction sequence number if it has yet to be inserted into the block chain
+ // if ((!transaction.didSendAPartToServer() && !transaction.getServerFailure()) || (transaction.getSequenceNumber() == -1)) {
+ // transaction.setSequenceNumber(slot.getSequenceNumber());
+ // }
+
+ if ((!transaction.didSendAPartToServer()) || (transaction.getSequenceNumber() == -1)) {
+ transaction.setSequenceNumber(slot.getSequenceNumber());
+ }
+
+
+ while (true) {
+ TransactionPart part = transaction.getNextPartToSend();
+
+ if (part == NULL) {
+ // Ran out of parts to send for this transaction so move on
+ break;
+ }
+
+ if (slot.hasSpace(part)) {
+ slot.addEntry(part);
+ List<Integer> partsSent = transactionPartsSent.get(transaction);
+ if (partsSent == NULL) {
+ partsSent = new ArrayList<Integer>();
+ transactionPartsSent.put(transaction, partsSent);
+ }
+ partsSent.add(part.getPartNumber());
+ transactionPartsSent.put(transaction, partsSent);
+ } else {
+ break;
+ }
+ }
+ }
+
+ // Fill the remainder of the slot with rescue data
+ doOptionalRescue(slot, seenLiveSlot, currentRescueSequenceNumber, resize);
+
+ return new ThreeTuple<Boolean, Integer, Boolean>(false, newSize, inserted);
+ }
+
+ void doRejectedMessages(Slot s) {
+ if (! rejectedSlotList.isEmpty()) {
+ /* TODO: We should avoid generating a rejected message entry if
+ * there is already a sufficient entry in the queue (e.g.,
+ * equalsto value of true and same sequence number). */
+
+ int64_t old_seqn = rejectedSlotList.firstElement();
+ if (rejectedSlotList.size() > REJECTED_THRESHOLD) {
+ int64_t new_seqn = rejectedSlotList.lastElement();
+ RejectedMessage rm = new RejectedMessage(s, s.getSequenceNumber(), localMachineId, old_seqn, new_seqn, false);
+ s.addEntry(rm);
+ } else {
+ int64_t prev_seqn = -1;
+ int i = 0;
+ /* Go through list of missing messages */
+ for (; i < rejectedSlotList.size(); i++) {
+ int64_t curr_seqn = rejectedSlotList.get(i);
+ Slot s_msg = buffer.getSlot(curr_seqn);
+ if (s_msg != NULL)
+ break;
+ prev_seqn = curr_seqn;
+ }
+ /* Generate rejected message entry for missing messages */
+ if (prev_seqn != -1) {
+ RejectedMessage rm = new RejectedMessage(s, s.getSequenceNumber(), localMachineId, old_seqn, prev_seqn, false);
+ s.addEntry(rm);
+ }
+ /* Generate rejected message entries for present messages */
+ for (; i < rejectedSlotList.size(); i++) {
+ int64_t curr_seqn = rejectedSlotList.get(i);
+ Slot s_msg = buffer.getSlot(curr_seqn);
+ int64_t machineid = s_msg.getMachineID();
+ RejectedMessage rm = new RejectedMessage(s, s.getSequenceNumber(), machineid, curr_seqn, curr_seqn, true);
+ s.addEntry(rm);
+ }
+ }
+ }
+ }
+
+ ThreeTuple<Boolean, Boolean, Long> doMandatoryResuce(Slot slot, bool resize) {
+ int64_t newestSequenceNumber = buffer.getNewestSeqNum();
+ int64_t oldestSequenceNumber = buffer.getOldestSeqNum();
+ if (oldestLiveSlotSequenceNumver < oldestSequenceNumber) {
+ oldestLiveSlotSequenceNumver = oldestSequenceNumber;
+ }
+
+ int64_t currentSequenceNumber = oldestLiveSlotSequenceNumver;
+ bool seenLiveSlot = false;
+ int64_t firstIfFull = newestSequenceNumber + 1 - numberOfSlots; // smallest seq number in the buffer if it is full
+ int64_t threshold = firstIfFull + FREE_SLOTS; // we want the buffer to be clear of live entries up to this point
+
+
+ // Mandatory Rescue
+ for (; currentSequenceNumber < threshold; currentSequenceNumber++) {
+ Slot previousSlot = buffer.getSlot(currentSequenceNumber);
+ // Push slot number forward
+ if (! seenLiveSlot) {
+ oldestLiveSlotSequenceNumver = currentSequenceNumber;
+ }
+
+ if (!previousSlot.isLive()) {
+ continue;
+ }
+
+ // We have seen a live slot
+ seenLiveSlot = true;
+
+ // Get all the live entries for a slot
+ Vector<Entry> liveEntries = previousSlot.getLiveEntries(resize);
+
+ // Iterate over all the live entries and try to rescue them
+ for (Entry liveEntry : liveEntries) {
+ if (slot.hasSpace(liveEntry)) {
+
+ // Enough space to rescue the entry
+ slot.addEntry(liveEntry);
+ } else if (currentSequenceNumber == firstIfFull) {
+ //if there's no space but the entry is about to fall off the queue
+ System.out.println("B"); //?
+ return new ThreeTuple<Boolean, Boolean, Long>(true, seenLiveSlot, currentSequenceNumber);
+
+ }
+ }
+ }
+
+ // Did not resize
+ return new ThreeTuple<Boolean, Boolean, Long>(false, seenLiveSlot, currentSequenceNumber);
+ }
+
+ void doOptionalRescue(Slot s, bool seenliveslot, int64_t seqn, bool resize) {
+ /* now go through live entries from least to greatest sequence number until
+ * either all live slots added, or the slot doesn't have enough room
+ * for SKIP_THRESHOLD consecutive entries*/
+ int skipcount = 0;
+ int64_t newestseqnum = buffer.getNewestSeqNum();
+ search:
+ for (; seqn <= newestseqnum; seqn++) {
+ Slot prevslot = buffer.getSlot(seqn);
+ //Push slot number forward
+ if (!seenliveslot)
+ oldestLiveSlotSequenceNumver = seqn;
+
+ if (!prevslot.isLive())
+ continue;
+ seenliveslot = true;
+ Vector<Entry> liveentries = prevslot.getLiveEntries(resize);
+ for (Entry liveentry : liveentries) {
+ if (s.hasSpace(liveentry))
+ s.addEntry(liveentry);
+ else {
+ skipcount++;
+ if (skipcount > SKIP_THRESHOLD)
+ break search;
+ }
+ }
+ }
+ }
+
+ /**
+ * Checks for malicious activity and updates the local copy of the block chain.
+ */
+ void validateAndUpdate(Slot[] newSlots, bool acceptUpdatesToLocal) {
+
+ // The cloud communication layer has checked slot HMACs already before decoding
+ if (newSlots.length == 0) {
+ return;
+ }
+
+ // Make sure all slots are newer than the last largest slot this client has seen
+ int64_t firstSeqNum = newSlots[0].getSequenceNumber();
+ if (firstSeqNum <= sequenceNumber) {
+ throw new Error("Server Error: Sent older slots!");
+ }
+
+ // Create an object that can access both new slots and slots in our local chain
+ // without committing slots to our local chain
+ SlotIndexer indexer = new SlotIndexer(newSlots, buffer);
+
+ // Check that the HMAC chain is not broken
+ checkHMACChain(indexer, newSlots);
+
+ // Set to keep track of messages from clients
+ HashSet<Long> machineSet = new HashSet<Long>(lastMessageTable.keySet());
+
+ // Process each slots data
+ for (Slot slot : newSlots) {
+ processSlot(indexer, slot, acceptUpdatesToLocal, machineSet);
+
+ updateExpectedSize();
+ }
+
+ // If there is a gap, check to see if the server sent us everything.
+ if (firstSeqNum != (sequenceNumber + 1)) {
+
+ // Check the size of the slots that were sent down by the server.
+ // Can only check the size if there was a gap
+ checkNumSlots(newSlots.length);
+
+ // Since there was a gap every machine must have pushed a slot or must have
+ // a last message message. If not then the server is hiding slots
+ if (!machineSet.isEmpty()) {
+ throw new Error("Missing record for machines: " + machineSet);
+ }
+ }
+
+ // Update the size of our local block chain.
+ commitNewMaxSize();
+
+ // Commit new to slots to the local block chain.
+ for (Slot slot : newSlots) {
+
+ // Insert this slot into our local block chain copy.
+ buffer.putSlot(slot);
+
+ // Keep track of how many slots are currently live (have live data in them).
+ liveSlotCount++;
+ }
+
+ // Get the sequence number of the latest slot in the system
+ sequenceNumber = newSlots[newSlots.length - 1].getSequenceNumber();
+
+ updateLiveStateFromServer();
+
+ // No Need to remember after we pulled from the server
+ offlineTransactionsCommittedAndAtServer.clear();
+
+ // This is invalidated now
+ hadPartialSendToServer = false;
+ }
+
+ void updateLiveStateFromServer() {
+ // Process the new transaction parts
+ processNewTransactionParts();
+
+ // Do arbitration on new transactions that were received
+ arbitrateFromServer();
+
+ // Update all the committed keys
+ bool didCommitOrSpeculate = updateCommittedTable();
+
+ // Delete the transactions that are now dead
+ updateLiveTransactionsAndStatus();
+
+ // Do speculations
+ didCommitOrSpeculate |= updateSpeculativeTable(didCommitOrSpeculate);
+ updatePendingTransactionSpeculativeTable(didCommitOrSpeculate);
+ }
+
+ void updateLiveStateFromLocal() {
+ // Update all the committed keys
+ bool didCommitOrSpeculate = updateCommittedTable();
+
+ // Delete the transactions that are now dead
+ updateLiveTransactionsAndStatus();
+
+ // Do speculations
+ didCommitOrSpeculate |= updateSpeculativeTable(didCommitOrSpeculate);
+ updatePendingTransactionSpeculativeTable(didCommitOrSpeculate);
+ }
+
+ void initExpectedSize(int64_t firstSequenceNumber, int64_t numberOfSlots) {
+ // if (didFindTableStatus) {
+ // return;
+ // }
+ int64_t prevslots = firstSequenceNumber;
+
+
+ if (didFindTableStatus) {
+ // expectedsize = (prevslots < ((int64_t) numberOfSlots)) ? (int) prevslots : expectedsize;
+ // System.out.println("Here2: " + expectedsize + " " + numberOfSlots + " " + prevslots);
+
+ } else {
+ expectedsize = (prevslots < ((int64_t) numberOfSlots)) ? (int) prevslots : numberOfSlots;
+ // System.out.println("Here: " + expectedsize);
+ }
+
+ // System.out.println(numberOfSlots);
+
+ didFindTableStatus = true;
+ currMaxSize = numberOfSlots;
+ }
+
+ void updateExpectedSize() {
+ expectedsize++;
+
+ if (expectedsize > currMaxSize) {
+ expectedsize = currMaxSize;
+ }
+ }
+
+
+ /**
+ * Check the size of the block chain to make sure there are enough slots sent back by the server.
+ * This is only called when we have a gap between the slots that we have locally and the slots
+ * sent by the server therefore in the slots sent by the server there will be at least 1 Table
+ * status message
+ */
+ void checkNumSlots(int numberOfSlots) {
+ if (numberOfSlots != expectedsize) {
+ throw new Error("Server Error: Server did not send all slots. Expected: " + expectedsize + " Received:" + numberOfSlots);
+ }
+ }
+
+ void updateCurrMaxSize(int newmaxsize) {
+ currMaxSize = newmaxsize;
+ }
+
+
+ /**
+ * Update the size of of the local buffer if it is needed.
+ */
+ void commitNewMaxSize() {
+ didFindTableStatus = false;
+
+ // Resize the local slot buffer
+ if (numberOfSlots != currMaxSize) {
+ buffer.resize((int)currMaxSize);
+ }
+
+ // Change the number of local slots to the new size
+ numberOfSlots = (int)currMaxSize;
+
+
+ // Recalculate the resize threshold since the size of the local buffer has changed
+ setResizeThreshold();
+ }
+
+ /**
+ * Process the new transaction parts from this latest round of slots received from the server
+ */
+ void processNewTransactionParts() {
+
+ if (newTransactionParts.size() == 0) {
+ // Nothing new to process
+ return;
+ }
+
+ // Iterate through all the machine Ids that we received new parts for
+ for (Long machineId : newTransactionParts.keySet()) {
+ Map<Pair<Long, Integer>, TransactionPart> parts = newTransactionParts.get(machineId);
+
+ // Iterate through all the parts for that machine Id
+ for (Pair<Long, Integer> partId : parts.keySet()) {
+ TransactionPart part = parts.get(partId);
+
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(part.getArbitratorId());
+ if ((lastTransactionNumber != NULL) && (lastTransactionNumber >= part.getSequenceNumber())) {
+ // Set dead the transaction part
+ part.setDead();
+ continue;
+ }
+
+ // Get the transaction object for that sequence number
+ Transaction transaction = liveTransactionBySequenceNumberTable.get(part.getSequenceNumber());
+
+ if (transaction == NULL) {
+ // This is a new transaction that we dont have so make a new one
+ transaction = new Transaction();
+
+ // Insert this new transaction into the live tables
+ liveTransactionBySequenceNumberTable.put(part.getSequenceNumber(), transaction);
+ liveTransactionByTransactionIdTable.put(part.getTransactionId(), transaction);
+ }
+
+ // Add that part to the transaction
+ transaction.addPartDecode(part);
+ }
+ }
+
+ // Clear all the new transaction parts in preparation for the next time the server sends slots
+ newTransactionParts.clear();
+ }
+
+
+ int64_t lastSeqNumArbOn = 0;
+
+ void arbitrateFromServer() {
+
+ if (liveTransactionBySequenceNumberTable.size() == 0) {
+ // Nothing to arbitrate on so move on
+ return;
+ }
+
+ // Get the transaction sequence numbers and sort from oldest to newest
+ List<Long> transactionSequenceNumbers = new ArrayList<Long>(liveTransactionBySequenceNumberTable.keySet());
+ Collections.sort(transactionSequenceNumbers);
+
+ // Collection of key value pairs that are
+ Map<IoTString, KeyValue> speculativeTableTmp = new HashMap<IoTString, KeyValue>();
+
+ // The last transaction arbitrated on
+ int64_t lastTransactionCommitted = -1;
+ Set<Abort> generatedAborts = new HashSet<Abort>();
+
+ for (Long transactionSequenceNumber : transactionSequenceNumbers) {
+ Transaction transaction = liveTransactionBySequenceNumberTable.get(transactionSequenceNumber);
+
+
+
+ // Check if this machine arbitrates for this transaction if not then we cant arbitrate this transaction
+ if (transaction.getArbitrator() != localMachineId) {
+ continue;
+ }
+
+ if (transactionSequenceNumber < lastSeqNumArbOn) {
+ continue;
+ }
+
+ if (offlineTransactionsCommittedAndAtServer.contains(transaction.getId())) {
+ // We have seen this already locally so dont commit again
+ continue;
+ }
+
+
+ if (!transaction.isComplete()) {
+ // Will arbitrate in incorrect order if we continue so just break
+ // Most likely this
+ break;
+ }
+
+
+ // update the largest transaction seen by arbitrator from server
+ if (lastTransactionSeenFromMachineFromServer.get(transaction.getMachineId()) == NULL) {
+ lastTransactionSeenFromMachineFromServer.put(transaction.getMachineId(), transaction.getClientLocalSequenceNumber());
+ } else {
+ Long lastTransactionSeenFromMachine = lastTransactionSeenFromMachineFromServer.get(transaction.getMachineId());
+ if (transaction.getClientLocalSequenceNumber() > lastTransactionSeenFromMachine) {
+ lastTransactionSeenFromMachineFromServer.put(transaction.getMachineId(), transaction.getClientLocalSequenceNumber());
+ }
+ }
+
+ if (transaction.evaluateGuard(committedKeyValueTable, speculativeTableTmp, NULL)) {
+ // Guard evaluated as true
+
+ // Update the local changes so we can make the commit
+ for (KeyValue kv : transaction.getKeyValueUpdateSet()) {
+ speculativeTableTmp.put(kv.getKey(), kv);
+ }
+
+ // Update what the last transaction committed was for use in batch commit
+ lastTransactionCommitted = transactionSequenceNumber;
+ } else {
+ // Guard evaluated was false so create abort
+
+ // Create the abort
+ Abort newAbort = new Abort(NULL,
+ transaction.getClientLocalSequenceNumber(),
+ transaction.getSequenceNumber(),
+ transaction.getMachineId(),
+ transaction.getArbitrator(),
+ localArbitrationSequenceNumber);
+ localArbitrationSequenceNumber++;
+
+ generatedAborts.add(newAbort);
+
+ // Insert the abort so we can process
+ processEntry(newAbort);
+ }
+
+ lastSeqNumArbOn = transactionSequenceNumber;
+
+ // liveTransactionBySequenceNumberTable.remove(transactionSequenceNumber);
+ }
+
+ Commit newCommit = NULL;
+
+ // If there is something to commit
+ if (speculativeTableTmp.size() != 0) {
+
+ // Create the commit and increment the commit sequence number
+ newCommit = new Commit(localArbitrationSequenceNumber, localMachineId, lastTransactionCommitted);
+ localArbitrationSequenceNumber++;
+
+ // Add all the new keys to the commit
+ for (KeyValue kv : speculativeTableTmp.values()) {
+ newCommit.addKV(kv);
+ }
+
+ // create the commit parts
+ newCommit.createCommitParts();
+
+ // Append all the commit parts to the end of the pending queue waiting for sending to the server
+
+ // Insert the commit so we can process it
+ for (CommitPart commitPart : newCommit.getParts().values()) {
+ processEntry(commitPart);
+ }
+ }
+
+ if ((newCommit != NULL) || (generatedAborts.size() > 0)) {
+ ArbitrationRound arbitrationRound = new ArbitrationRound(newCommit, generatedAborts);
+ pendingSendArbitrationRounds.add(arbitrationRound);
+
+ if (compactArbitrationData()) {
+ ArbitrationRound newArbitrationRound = pendingSendArbitrationRounds.get(pendingSendArbitrationRounds.size() - 1);
+ if (newArbitrationRound.getCommit() != NULL) {
+ for (CommitPart commitPart : newArbitrationRound.getCommit().getParts().values()) {
+ processEntry(commitPart);
+ }
+ }
+ }
+ }
+ }
+
+ Pair<Boolean, Boolean> arbitrateOnLocalTransaction(Transaction transaction) {
+
+ // Check if this machine arbitrates for this transaction if not then we cant arbitrate this transaction
+ if (transaction.getArbitrator() != localMachineId) {
+ return new Pair<Boolean, Boolean>(false, false);
+ }
+
+ if (!transaction.isComplete()) {
+ // Will arbitrate in incorrect order if we continue so just break
+ // Most likely this
+ return new Pair<Boolean, Boolean>(false, false);
+ }
+
+ if (transaction.getMachineId() != localMachineId) {
+ // dont do this check for local transactions
+ if (lastTransactionSeenFromMachineFromServer.get(transaction.getMachineId()) != NULL) {
+ if (lastTransactionSeenFromMachineFromServer.get(transaction.getMachineId()) > transaction.getClientLocalSequenceNumber()) {
+ // We've have already seen this from the server
+ return new Pair<Boolean, Boolean>(false, false);
+ }
+ }
+ }
+
+ if (transaction.evaluateGuard(committedKeyValueTable, NULL, NULL)) {
+ // Guard evaluated as true
+
+ // Create the commit and increment the commit sequence number
+ Commit newCommit = new Commit(localArbitrationSequenceNumber, localMachineId, -1);
+ localArbitrationSequenceNumber++;
+
+ // Update the local changes so we can make the commit
+ for (KeyValue kv : transaction.getKeyValueUpdateSet()) {
+ newCommit.addKV(kv);
+ }
+
+ // create the commit parts
+ newCommit.createCommitParts();
+
+ // Append all the commit parts to the end of the pending queue waiting for sending to the server
+ ArbitrationRound arbitrationRound = new ArbitrationRound(newCommit, new HashSet<Abort>());
+ pendingSendArbitrationRounds.add(arbitrationRound);
+
+ if (compactArbitrationData()) {
+ ArbitrationRound newArbitrationRound = pendingSendArbitrationRounds.get(pendingSendArbitrationRounds.size() - 1);
+ for (CommitPart commitPart : newArbitrationRound.getCommit().getParts().values()) {
+ processEntry(commitPart);
+ }
+ } else {
+ // Insert the commit so we can process it
+ for (CommitPart commitPart : newCommit.getParts().values()) {
+ processEntry(commitPart);
+ }
+ }
+
+ if (transaction.getMachineId() == localMachineId) {
+ TransactionStatus status = transaction.getTransactionStatus();
+ if (status != NULL) {
+ status.setStatus(TransactionStatus.StatusCommitted);
+ }
+ }
+
+ updateLiveStateFromLocal();
+ return new Pair<Boolean, Boolean>(true, true);
+ } else {
+
+ if (transaction.getMachineId() == localMachineId) {
+ // For locally created messages update the status
+
+ // Guard evaluated was false so create abort
+ TransactionStatus status = transaction.getTransactionStatus();
+ if (status != NULL) {
+ status.setStatus(TransactionStatus.StatusAborted);
+ }
+ } else {
+ Set addAbortSet = new HashSet<Abort>();
+
+
+ // Create the abort
+ Abort newAbort = new Abort(NULL,
+ transaction.getClientLocalSequenceNumber(),
+ -1,
+ transaction.getMachineId(),
+ transaction.getArbitrator(),
+ localArbitrationSequenceNumber);
+ localArbitrationSequenceNumber++;
+
+ addAbortSet.add(newAbort);
+
+
+ // Append all the commit parts to the end of the pending queue waiting for sending to the server
+ ArbitrationRound arbitrationRound = new ArbitrationRound(NULL, addAbortSet);
+ pendingSendArbitrationRounds.add(arbitrationRound);
+
+ if (compactArbitrationData()) {
+ ArbitrationRound newArbitrationRound = pendingSendArbitrationRounds.get(pendingSendArbitrationRounds.size() - 1);
+ for (CommitPart commitPart : newArbitrationRound.getCommit().getParts().values()) {
+ processEntry(commitPart);
+ }
+ }
+ }
+
+ updateLiveStateFromLocal();
+ return new Pair<Boolean, Boolean>(true, false);
+ }
+ }
+
+ /**
+ * Compacts the arbitration data my merging commits and aggregating aborts so that a single large push of commits can be done instead of many small updates
+ */
+ bool compactArbitrationData() {
+
+ if (pendingSendArbitrationRounds.size() < 2) {
+ // Nothing to compact so do nothing
+ return false;
+ }
+
+ ArbitrationRound lastRound = pendingSendArbitrationRounds.get(pendingSendArbitrationRounds.size() - 1);
+ if (lastRound.didSendPart()) {
+ return false;
+ }
+
+ bool hadCommit = (lastRound.getCommit() == NULL);
+ bool gotNewCommit = false;
+
+ int numberToDelete = 1;
+ while (numberToDelete < pendingSendArbitrationRounds.size()) {
+ ArbitrationRound round = pendingSendArbitrationRounds.get(pendingSendArbitrationRounds.size() - numberToDelete - 1);
+
+ if (round.isFull() || round.didSendPart()) {
+ // Stop since there is a part that cannot be compacted and we need to compact in order
+ break;
+ }
+
+ if (round.getCommit() == NULL) {
+
+ // Try compacting aborts only
+ int newSize = round.getCurrentSize() + lastRound.getAbortsCount();
+ if (newSize > ArbitrationRound.MAX_PARTS) {
+ // Cant compact since it would be too large
+ break;
+ }
+ lastRound.addAborts(round.getAborts());
+ } else {
+
+ // Create a new larger commit
+ Commit newCommit = Commit.merge(lastRound.getCommit(), round.getCommit(), localArbitrationSequenceNumber);
+ localArbitrationSequenceNumber++;
+
+ // Create the commit parts so that we can count them
+ newCommit.createCommitParts();
+
+ // Calculate the new size of the parts
+ int newSize = newCommit.getNumberOfParts();
+ newSize += lastRound.getAbortsCount();
+ newSize += round.getAbortsCount();
+
+ if (newSize > ArbitrationRound.MAX_PARTS) {
+ // Cant compact since it would be too large
+ break;
+ }
+
+ // Set the new compacted part
+ lastRound.setCommit(newCommit);
+ lastRound.addAborts(round.getAborts());
+ gotNewCommit = true;
+ }
+
+ numberToDelete++;
+ }
+
+ if (numberToDelete != 1) {
+ // If there is a compaction
+
+ // Delete the previous pieces that are now in the new compacted piece
+ if (numberToDelete == pendingSendArbitrationRounds.size()) {
+ pendingSendArbitrationRounds.clear();
+ } else {
+ for (int i = 0; i < numberToDelete; i++) {
+ pendingSendArbitrationRounds.remove(pendingSendArbitrationRounds.size() - 1);
+ }
+ }
+
+ // Add the new compacted into the pending to send list
+ pendingSendArbitrationRounds.add(lastRound);
+
+ // Should reinsert into the commit processor
+ if (hadCommit && gotNewCommit) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+ // bool compactArbitrationData() {
+ // return false;
+ // }
+
+ /**
+ * Update all the commits and the committed tables, sets dead the dead transactions
+ */
+ bool updateCommittedTable() {
+
+ if (newCommitParts.size() == 0) {
+ // Nothing new to process
+ return false;
+ }
+
+ // Iterate through all the machine Ids that we received new parts for
+ for (Long machineId : newCommitParts.keySet()) {
+ Map<Pair<Long, Integer>, CommitPart> parts = newCommitParts.get(machineId);
+
+ // Iterate through all the parts for that machine Id
+ for (Pair<Long, Integer> partId : parts.keySet()) {
+ CommitPart part = parts.get(partId);
+
+ // Get the transaction object for that sequence number
+ Map<Long, Commit> commitForClientTable = liveCommitsTable.get(part.getMachineId());
+
+ if (commitForClientTable == NULL) {
+ // This is the first commit from this device
+ commitForClientTable = new HashMap<Long, Commit>();
+ liveCommitsTable.put(part.getMachineId(), commitForClientTable);
+ }
+
+ Commit commit = commitForClientTable.get(part.getSequenceNumber());
+
+ if (commit == NULL) {
+ // This is a new commit that we dont have so make a new one
+ commit = new Commit();
+
+ // Insert this new commit into the live tables
+ commitForClientTable.put(part.getSequenceNumber(), commit);
+ }
+
+ // Add that part to the commit
+ commit.addPartDecode(part);
+ }
+ }
+
+ // Clear all the new commits parts in preparation for the next time the server sends slots
+ newCommitParts.clear();
+
+ // If we process a new commit keep track of it for future use
+ bool didProcessANewCommit = false;
+
+ // Process the commits one by one
+ for (Long arbitratorId : liveCommitsTable.keySet()) {
+
+ // Get all the commits for a specific arbitrator
+ Map<Long, Commit> commitForClientTable = liveCommitsTable.get(arbitratorId);
+
+ // Sort the commits in order
+ List<Long> commitSequenceNumbers = new ArrayList<Long>(commitForClientTable.keySet());
+ Collections.sort(commitSequenceNumbers);
+
+ // Get the last commit seen from this arbitrator
+ int64_t lastCommitSeenSequenceNumber = -1;
+ if (lastCommitSeenSequenceNumberByArbitratorTable.get(arbitratorId) != NULL) {
+ lastCommitSeenSequenceNumber = lastCommitSeenSequenceNumberByArbitratorTable.get(arbitratorId);
+ }
+
+ // Go through each new commit one by one
+ for (int i = 0; i < commitSequenceNumbers.size(); i++) {
+ Long commitSequenceNumber = commitSequenceNumbers.get(i);
+ Commit commit = commitForClientTable.get(commitSequenceNumber);
+
+ // Special processing if a commit is not complete
+ if (!commit.isComplete()) {
+ if (i == (commitSequenceNumbers.size() - 1)) {
+ // If there is an incomplete commit and this commit is the latest one seen then this commit cannot be processed and there are no other commits
+ break;
+ } else {
+ // This is a commit that was already dead but parts of it are still in the block chain (not flushed out yet).
+ // Delete it and move on
+ commit.setDead();
+ commitForClientTable.remove(commit.getSequenceNumber());
+ continue;
+ }
+ }
+
+ // Update the last transaction that was updated if we can
+ if (commit.getTransactionSequenceNumber() != -1) {
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(commit.getMachineId());
+
+ // Update the last transaction sequence number that the arbitrator arbitrated on
+ if ((lastTransactionNumber == NULL) || (lastTransactionNumber < commit.getTransactionSequenceNumber())) {
+ lastArbitratedTransactionNumberByArbitratorTable.put(commit.getMachineId(), commit.getTransactionSequenceNumber());
+ }
+ }
+
+ // Update the last arbitration data that we have seen so far
+ if (lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(commit.getMachineId()) != NULL) {
+
+ int64_t lastArbitrationSequenceNumber = lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(commit.getMachineId());
+ if (commit.getSequenceNumber() > lastArbitrationSequenceNumber) {
+ // Is larger
+ lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.put(commit.getMachineId(), commit.getSequenceNumber());
+ }
+ } else {
+ // Never seen any data from this arbitrator so record the first one
+ lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.put(commit.getMachineId(), commit.getSequenceNumber());
+ }
+
+ // We have already seen this commit before so need to do the full processing on this commit
+ if (commit.getSequenceNumber() <= lastCommitSeenSequenceNumber) {
+
+ // Update the last transaction that was updated if we can
+ if (commit.getTransactionSequenceNumber() != -1) {
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(commit.getMachineId());
+
+ // Update the last transaction sequence number that the arbitrator arbitrated on
+ if ((lastTransactionNumber == NULL) || (lastTransactionNumber < commit.getTransactionSequenceNumber())) {
+ lastArbitratedTransactionNumberByArbitratorTable.put(commit.getMachineId(), commit.getTransactionSequenceNumber());
+ }
+ }
+
+ continue;
+ }
+
+ // If we got here then this is a brand new commit and needs full processing
+
+ // Get what commits should be edited, these are the commits that have live values for their keys
+ Set<Commit> commitsToEdit = new HashSet<Commit>();
+ for (KeyValue kv : commit.getKeyValueUpdateSet()) {
+ commitsToEdit.add(liveCommitsByKeyTable.get(kv.getKey()));
+ }
+ commitsToEdit.remove(NULL); // remove NULL since it could be in this set
+
+ // Update each previous commit that needs to be updated
+ for (Commit previousCommit : commitsToEdit) {
+
+ // Only bother with live commits (TODO: Maybe remove this check)
+ if (previousCommit.isLive()) {
+
+ // Update which keys in the old commits are still live
+ for (KeyValue kv : commit.getKeyValueUpdateSet()) {
+ previousCommit.invalidateKey(kv.getKey());
+ }
+
+ // if the commit is now dead then remove it
+ if (!previousCommit.isLive()) {
+ commitForClientTable.remove(previousCommit);
+ }
+ }
+ }
+
+ // Update the last seen sequence number from this arbitrator
+ if (lastCommitSeenSequenceNumberByArbitratorTable.get(commit.getMachineId()) != NULL) {
+ if (commit.getSequenceNumber() > lastCommitSeenSequenceNumberByArbitratorTable.get(commit.getMachineId())) {
+ lastCommitSeenSequenceNumberByArbitratorTable.put(commit.getMachineId(), commit.getSequenceNumber());
+ }
+ } else {
+ lastCommitSeenSequenceNumberByArbitratorTable.put(commit.getMachineId(), commit.getSequenceNumber());
+ }
+
+ // We processed a new commit that we havent seen before
+ didProcessANewCommit = true;
+
+ // Update the committed table of keys and which commit is using which key
+ for (KeyValue kv : commit.getKeyValueUpdateSet()) {
+ committedKeyValueTable.put(kv.getKey(), kv);
+ liveCommitsByKeyTable.put(kv.getKey(), commit);
+ }
+ }
+ }
+
+ return didProcessANewCommit;
+ }
+
+ /**
+ * Create the speculative table from transactions that are still live and have come from the cloud
+ */
+ bool updateSpeculativeTable(bool didProcessNewCommits) {
+ if (liveTransactionBySequenceNumberTable.keySet().size() == 0) {
+ // There is nothing to speculate on
+ return false;
+ }
+
+ // Create a list of the transaction sequence numbers and sort them from oldest to newest
+ List<Long> transactionSequenceNumbersSorted = new ArrayList<Long>(liveTransactionBySequenceNumberTable.keySet());
+ Collections.sort(transactionSequenceNumbersSorted);
+
+ bool hasGapInTransactionSequenceNumbers = transactionSequenceNumbersSorted.get(0) != oldestTransactionSequenceNumberSpeculatedOn;
+
+
+ if (hasGapInTransactionSequenceNumbers || didProcessNewCommits) {
+ // If there is a gap in the transaction sequence numbers then there was a commit or an abort of a transaction
+ // OR there was a new commit (Could be from offline commit) so a redo the speculation from scratch
+
+ // Start from scratch
+ speculatedKeyValueTable.clear();
+ lastTransactionSequenceNumberSpeculatedOn = -1;
+ oldestTransactionSequenceNumberSpeculatedOn = -1;
+
+ }
+
+ // Remember the front of the transaction list
+ oldestTransactionSequenceNumberSpeculatedOn = transactionSequenceNumbersSorted.get(0);
+
+ // Find where to start arbitration from
+ int startIndex = transactionSequenceNumbersSorted.indexOf(lastTransactionSequenceNumberSpeculatedOn) + 1;
+
+ if (startIndex >= transactionSequenceNumbersSorted.size()) {
+ // Make sure we are not out of bounds
+ return false; // did not speculate
+ }
+
+ Set<Long> incompleteTransactionArbitrator = new HashSet<Long>();
+ bool didSkip = true;
+
+ for (int i = startIndex; i < transactionSequenceNumbersSorted.size(); i++) {
+ int64_t transactionSequenceNumber = transactionSequenceNumbersSorted.get(i);
+ Transaction transaction = liveTransactionBySequenceNumberTable.get(transactionSequenceNumber);
+
+ if (!transaction.isComplete()) {
+ // If there is an incomplete transaction then there is nothing we can do
+ // add this transactions arbitrator to the list of arbitrators we should ignore
+ incompleteTransactionArbitrator.add(transaction.getArbitrator());
+ didSkip = true;
+ continue;
+ }
+
+ if (incompleteTransactionArbitrator.contains(transaction.getArbitrator())) {
+ continue;
+ }
+
+ lastTransactionSequenceNumberSpeculatedOn = transactionSequenceNumber;
+
+ if (transaction.evaluateGuard(committedKeyValueTable, speculatedKeyValueTable, NULL)) {
+ // Guard evaluated to true so update the speculative table
+ for (KeyValue kv : transaction.getKeyValueUpdateSet()) {
+ speculatedKeyValueTable.put(kv.getKey(), kv);
+ }
+ }
+ }
+
+ if (didSkip) {
+ // Since there was a skip we need to redo the speculation next time around
+ lastTransactionSequenceNumberSpeculatedOn = -1;
+ oldestTransactionSequenceNumberSpeculatedOn = -1;
+ }
+
+ // We did some speculation
+ return true;
+ }
+
+ /**
+ * Create the pending transaction speculative table from transactions that are still in the pending transaction buffer
+ */
+ void updatePendingTransactionSpeculativeTable(bool didProcessNewCommitsOrSpeculate) {
+ if (pendingTransactionQueue.size() == 0) {
+ // There is nothing to speculate on
+ return;
+ }
+
+
+ if (didProcessNewCommitsOrSpeculate || (firstPendingTransaction != pendingTransactionQueue.get(0))) {
+ // need to reset on the pending speculation
+ lastPendingTransactionSpeculatedOn = NULL;
+ firstPendingTransaction = pendingTransactionQueue.get(0);
+ pendingTransactionSpeculatedKeyValueTable.clear();
+ }
+
+ // Find where to start arbitration from
+ int startIndex = pendingTransactionQueue.indexOf(firstPendingTransaction) + 1;
+
+ if (startIndex >= pendingTransactionQueue.size()) {
+ // Make sure we are not out of bounds
+ return;
+ }
+
+ for (int i = startIndex; i < pendingTransactionQueue.size(); i++) {
+ Transaction transaction = pendingTransactionQueue.get(i);
+
+ lastPendingTransactionSpeculatedOn = transaction;
+
+ if (transaction.evaluateGuard(committedKeyValueTable, speculatedKeyValueTable, pendingTransactionSpeculatedKeyValueTable)) {
+ // Guard evaluated to true so update the speculative table
+ for (KeyValue kv : transaction.getKeyValueUpdateSet()) {
+ pendingTransactionSpeculatedKeyValueTable.put(kv.getKey(), kv);
+ }
+ }
+ }
+ }
+
+ /**
+ * Set dead and remove from the live transaction tables the transactions that are dead
+ */
+ void updateLiveTransactionsAndStatus() {
+
+ // Go through each of the transactions
+ for (Iterator<Map.Entry<Long, Transaction>> iter = liveTransactionBySequenceNumberTable.entrySet().iterator(); iter.hasNext();) {
+ Transaction transaction = iter.next().getValue();
+
+ // Check if the transaction is dead
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(transaction.getArbitrator());
+ if ((lastTransactionNumber != NULL) && (lastTransactionNumber >= transaction.getSequenceNumber())) {
+
+ // Set dead the transaction
+ transaction.setDead();
+
+ // Remove the transaction from the live table
+ iter.remove();
+ liveTransactionByTransactionIdTable.remove(transaction.getId());
+ }
+ }
+
+ // Go through each of the transactions
+ for (Iterator<Map.Entry<Long, TransactionStatus>> iter = outstandingTransactionStatus.entrySet().iterator(); iter.hasNext();) {
+ TransactionStatus status = iter.next().getValue();
+
+ // Check if the transaction is dead
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(status.getTransactionArbitrator());
+ if ((lastTransactionNumber != NULL) && (lastTransactionNumber >= status.getTransactionSequenceNumber())) {
+
+ // Set committed
+ status.setStatus(TransactionStatus.StatusCommitted);
+
+ // Remove
+ iter.remove();
+ }
+ }
+ }
+
+ /**
+ * Process this slot, entry by entry. Also update the latest message sent by slot
+ */
+ void processSlot(SlotIndexer indexer, Slot slot, bool acceptUpdatesToLocal, HashSet<Long> machineSet) {
+
+ // Update the last message seen
+ updateLastMessage(slot.getMachineID(), slot.getSequenceNumber(), slot, acceptUpdatesToLocal, machineSet);
+
+ // Process each entry in the slot
+ for (Entry entry : slot.getEntries()) {
+ switch (entry.getType()) {
+
+ case Entry.TypeCommitPart:
+ processEntry((CommitPart)entry);
+ break;
+
+ case Entry.TypeAbort:
+ processEntry((Abort)entry);
+ break;
+
+ case Entry.TypeTransactionPart:
+ processEntry((TransactionPart)entry);
+ break;
+
+ case Entry.TypeNewKey:
+ processEntry((NewKey)entry);
+ break;
+
+ case Entry.TypeLastMessage:
+ processEntry((LastMessage)entry, machineSet);
+ break;
+
+ case Entry.TypeRejectedMessage:
+ processEntry((RejectedMessage)entry, indexer);
+ break;
+
+ case Entry.TypeTableStatus:
+ processEntry((TableStatus)entry, slot.getSequenceNumber());
+ break;
+
+ default:
+ throw new Error("Unrecognized type: " + entry.getType());
+ }
+ }
+ }
+
+ /**
+ * Update the last message that was sent for a machine Id
+ */
+ void processEntry(LastMessage entry, HashSet<Long> machineSet) {
+ // Update what the last message received by a machine was
+ updateLastMessage(entry.getMachineID(), entry.getSequenceNumber(), entry, false, machineSet);
+ }
+
+ /**
+ * Add the new key to the arbitrators table and update the set of live new keys (in case of a rescued new key message)
+ */
+ void processEntry(NewKey entry) {
+
+ // Update the arbitrator table with the new key information
+ arbitratorTable.put(entry.getKey(), entry.getMachineID());
+
+ // Update what the latest live new key is
+ NewKey oldNewKey = liveNewKeyTable.put(entry.getKey(), entry);
+ if (oldNewKey != NULL) {
+ // Delete the old new key messages
+ oldNewKey.setDead();
+ }
+ }
+
+ /**
+ * Process new table status entries and set dead the old ones as new ones come in.
+ * keeps track of the largest and smallest table status seen in this current round
+ * of updating the local copy of the block chain
+ */
+ void processEntry(TableStatus entry, int64_t seq) {
+ int newNumSlots = entry.getMaxSlots();
+ updateCurrMaxSize(newNumSlots);
+
+ initExpectedSize(seq, newNumSlots);
+
+ if (liveTableStatus != NULL) {
+ // We have a larger table status so the old table status is no int64_ter alive
+ liveTableStatus.setDead();
+ }
+
+ // Make this new table status the latest alive table status
+ liveTableStatus = entry;
+ }
+
+ /**
+ * Check old messages to see if there is a block chain violation. Also
+ */
+ void processEntry(RejectedMessage entry, SlotIndexer indexer) {
+ int64_t oldSeqNum = entry.getOldSeqNum();
+ int64_t newSeqNum = entry.getNewSeqNum();
+ bool isequal = entry.getEqual();
+ int64_t machineId = entry.getMachineID();
+ int64_t seq = entry.getSequenceNumber();
+
+
+ // Check if we have messages that were supposed to be rejected in our local block chain
+ for (int64_t seqNum = oldSeqNum; seqNum <= newSeqNum; seqNum++) {
+
+ // Get the slot
+ Slot slot = indexer.getSlot(seqNum);
+
+ if (slot != NULL) {
+ // If we have this slot make sure that it was not supposed to be a rejected slot
+
+ int64_t slotMachineId = slot.getMachineID();
+ if (isequal != (slotMachineId == machineId)) {
+ throw new Error("Server Error: Trying to insert rejected message for slot " + seqNum);
+ }
+ }
+ }
+
+
+ // Create a list of clients to watch until they see this rejected message entry.
+ HashSet<Long> deviceWatchSet = new HashSet<Long>();
+ for (Map.Entry<Long, Pair<Long, Liveness>> lastMessageEntry : lastMessageTable.entrySet()) {
+
+ // Machine ID for the last message entry
+ int64_t lastMessageEntryMachineId = lastMessageEntry.getKey();
+
+ // We've seen it, don't need to continue to watch. Our next
+ // message will implicitly acknowledge it.
+ if (lastMessageEntryMachineId == localMachineId) {
+ continue;
+ }
+
+ Pair<Long, Liveness> lastMessageValue = lastMessageEntry.getValue();
+ int64_t entrySequenceNumber = lastMessageValue.getFirst();
+
+ if (entrySequenceNumber < seq) {
+
+ // Add this rejected message to the set of messages that this machine ID did not see yet
+ addWatchList(lastMessageEntryMachineId, entry);
+
+ // This client did not see this rejected message yet so add it to the watch set to monitor
+ deviceWatchSet.add(lastMessageEntryMachineId);
+ }
+ }
+
+ if (deviceWatchSet.isEmpty()) {
+ // This rejected message has been seen by all the clients so
+ entry.setDead();
+ } else {
+ // We need to watch this rejected message
+ entry.setWatchSet(deviceWatchSet);
+ }
+ }
+
+ /**
+ * Check if this abort is live, if not then save it so we can kill it later.
+ * update the last transaction number that was arbitrated on.
+ */
+ void processEntry(Abort entry) {
+
+
+ if (entry.getTransactionSequenceNumber() != -1) {
+ // update the transaction status if it was sent to the server
+ TransactionStatus status = outstandingTransactionStatus.remove(entry.getTransactionSequenceNumber());
+ if (status != NULL) {
+ status.setStatus(TransactionStatus.StatusAborted);
+ }
+ }
+
+ // Abort has not been seen by the client it is for yet so we need to keep track of it
+ Abort previouslySeenAbort = liveAbortTable.put(entry.getAbortId(), entry);
+ if (previouslySeenAbort != NULL) {
+ previouslySeenAbort.setDead(); // Delete old version of the abort since we got a rescued newer version
+ }
+
+ if (entry.getTransactionArbitrator() == localMachineId) {
+ liveAbortsGeneratedByLocal.put(entry.getArbitratorLocalSequenceNumber(), entry);
+ }
+
+ if ((entry.getSequenceNumber() != -1) && (lastMessageTable.get(entry.getTransactionMachineId()).getFirst() >= entry.getSequenceNumber())) {
+
+ // The machine already saw this so it is dead
+ entry.setDead();
+ liveAbortTable.remove(entry.getAbortId());
+
+ if (entry.getTransactionArbitrator() == localMachineId) {
+ liveAbortsGeneratedByLocal.remove(entry.getArbitratorLocalSequenceNumber());
+ }
+
+ return;
+ }
+
+
+
+
+ // Update the last arbitration data that we have seen so far
+ if (lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(entry.getTransactionArbitrator()) != NULL) {
+
+ int64_t lastArbitrationSequenceNumber = lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(entry.getTransactionArbitrator());
+ if (entry.getSequenceNumber() > lastArbitrationSequenceNumber) {
+ // Is larger
+ lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.put(entry.getTransactionArbitrator(), entry.getSequenceNumber());
+ }
+ } else {
+ // Never seen any data from this arbitrator so record the first one
+ lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.put(entry.getTransactionArbitrator(), entry.getSequenceNumber());
+ }
+
+
+ // Set dead a transaction if we can
+ Transaction transactionToSetDead = liveTransactionByTransactionIdTable.remove(new Pair<Long, Long>(entry.getTransactionMachineId(), entry.getTransactionClientLocalSequenceNumber()));
+ if (transactionToSetDead != NULL) {
+ liveTransactionBySequenceNumberTable.remove(transactionToSetDead.getSequenceNumber());
+ }
+
+ // Update the last transaction sequence number that the arbitrator arbitrated on
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(entry.getTransactionArbitrator());
+ if ((lastTransactionNumber == NULL) || (lastTransactionNumber < entry.getTransactionSequenceNumber())) {
+
+ // Is a valid one
+ if (entry.getTransactionSequenceNumber() != -1) {
+ lastArbitratedTransactionNumberByArbitratorTable.put(entry.getTransactionArbitrator(), entry.getTransactionSequenceNumber());
+ }
+ }
+ }
+
+ /**
+ * Set dead the transaction part if that transaction is dead and keep track of all new parts
+ */
+ void processEntry(TransactionPart entry) {
+ // Check if we have already seen this transaction and set it dead OR if it is not alive
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(entry.getArbitratorId());
+ if ((lastTransactionNumber != NULL) && (lastTransactionNumber >= entry.getSequenceNumber())) {
+ // This transaction is dead, it was already committed or aborted
+ entry.setDead();
+ return;
+ }
+
+ // This part is still alive
+ Map<Pair<Long, Integer>, TransactionPart> transactionPart = newTransactionParts.get(entry.getMachineId());
+
+ if (transactionPart == NULL) {
+ // Dont have a table for this machine Id yet so make one
+ transactionPart = new HashMap<Pair<Long, Integer>, TransactionPart>();
+ newTransactionParts.put(entry.getMachineId(), transactionPart);
+ }
+
+ // Update the part and set dead ones we have already seen (got a rescued version)
+ TransactionPart previouslySeenPart = transactionPart.put(entry.getPartId(), entry);
+ if (previouslySeenPart != NULL) {
+ previouslySeenPart.setDead();
+ }
+ }
+
+ /**
+ * Process new commit entries and save them for future use. Delete duplicates
+ */
+ void processEntry(CommitPart entry) {
+
+
+ // Update the last transaction that was updated if we can
+ if (entry.getTransactionSequenceNumber() != -1) {
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(entry.getMachineId());
+
+ // Update the last transaction sequence number that the arbitrator arbitrated on
+ if ((lastTransactionNumber == NULL) || (lastTransactionNumber < entry.getTransactionSequenceNumber())) {
+ lastArbitratedTransactionNumberByArbitratorTable.put(entry.getMachineId(), entry.getTransactionSequenceNumber());
+ }
+ }
+
+
+
+
+ Map<Pair<Long, Integer>, CommitPart> commitPart = newCommitParts.get(entry.getMachineId());
+
+ if (commitPart == NULL) {
+ // Don't have a table for this machine Id yet so make one
+ commitPart = new HashMap<Pair<Long, Integer>, CommitPart>();
+ newCommitParts.put(entry.getMachineId(), commitPart);
+ }
+
+ // Update the part and set dead ones we have already seen (got a rescued version)
+ CommitPart previouslySeenPart = commitPart.put(entry.getPartId(), entry);
+ if (previouslySeenPart != NULL) {
+ previouslySeenPart.setDead();
+ }
+ }
+
+ /**
+ * Update the last message seen table. Update and set dead the appropriate RejectedMessages as clients see them.
+ * Updates the live aborts, removes those that are dead and sets them dead.
+ * Check that the last message seen is correct and that there is no mismatch of our own last message or that
+ * other clients have not had a rollback on the last message.
+ */
+ void updateLastMessage(int64_t machineId, int64_t seqNum, Liveness liveness, bool acceptUpdatesToLocal, HashSet<Long> machineSet) {
+
+ // We have seen this machine ID
+ machineSet.remove(machineId);
+
+ // Get the set of rejected messages that this machine Id is has not seen yet
+ HashSet<RejectedMessage> watchset = rejectedMessageWatchListTable.get(machineId);
+
+ // If there is a rejected message that this machine Id has not seen yet
+ if (watchset != NULL) {
+
+ // Go through each rejected message that this machine Id has not seen yet
+ for (Iterator<RejectedMessage> rmit = watchset.iterator(); rmit.hasNext(); ) {
+
+ RejectedMessage rm = rmit.next();
+
+ // If this machine Id has seen this rejected message...
+ if (rm.getSequenceNumber() <= seqNum) {
+
+ // Remove it from our watchlist
+ rmit.remove();
+
+ // Decrement machines that need to see this notification
+ rm.removeWatcher(machineId);
+ }
+ }
+ }
+
+ // Set dead the abort
+ for (Iterator<Map.Entry<Pair<Long, Long>, Abort>> i = liveAbortTable.entrySet().iterator(); i.hasNext();) {
+ Abort abort = i.next().getValue();
+
+ if ((abort.getTransactionMachineId() == machineId) && (abort.getSequenceNumber() <= seqNum)) {
+ abort.setDead();
+ i.remove();
+
+ if (abort.getTransactionArbitrator() == localMachineId) {
+ liveAbortsGeneratedByLocal.remove(abort.getArbitratorLocalSequenceNumber());
+ }
+ }
+ }
+
+
+
+ if (machineId == localMachineId) {
+ // Our own messages are immediately dead.
+ if (liveness instanceof LastMessage) {
+ ((LastMessage)liveness).setDead();
+ } else if (liveness instanceof Slot) {
+ ((Slot)liveness).setDead();
+ } else {
+ throw new Error("Unrecognized type");
+ }
+ }
+
+ // Get the old last message for this device
+ Pair<Long, Liveness> lastMessageEntry = lastMessageTable.put(machineId, new Pair<Long, Liveness>(seqNum, liveness));
+ if (lastMessageEntry == NULL) {
+ // If no last message then there is nothing else to process
+ return;
+ }
+
+ int64_t lastMessageSeqNum = lastMessageEntry.getFirst();
+ Liveness lastEntry = lastMessageEntry.getSecond();
+
+ // If it is not our machine Id since we already set ours to dead
+ if (machineId != localMachineId) {
+ if (lastEntry instanceof LastMessage) {
+ ((LastMessage)lastEntry).setDead();
+ } else if (lastEntry instanceof Slot) {
+ ((Slot)lastEntry).setDead();
+ } else {
+ throw new Error("Unrecognized type");
+ }
+ }
+
+ // Make sure the server is not playing any games
+ if (machineId == localMachineId) {
+
+ if (hadPartialSendToServer) {
+ // We were not making any updates and we had a machine mismatch
+ if (lastMessageSeqNum > seqNum && !acceptUpdatesToLocal) {
+ throw new Error("Server Error: Mismatch on local machine sequence number, needed at least: " + lastMessageSeqNum + " got: " + seqNum);
+ }
+
+ } else {
+ // We were not making any updates and we had a machine mismatch
+ if (lastMessageSeqNum != seqNum && !acceptUpdatesToLocal) {
+ throw new Error("Server Error: Mismatch on local machine sequence number, needed: " + lastMessageSeqNum + " got: " + seqNum);
+ }
+ }
+ } else {
+ if (lastMessageSeqNum > seqNum) {
+ throw new Error("Server Error: Rollback on remote machine sequence number");
+ }
+ }
+ }
+
+ /**
+ * Add a rejected message entry to the watch set to keep track of which clients have seen that
+ * rejected message entry and which have not.
+ */
+ void addWatchList(int64_t machineId, RejectedMessage entry) {
+ HashSet<RejectedMessage> entries = rejectedMessageWatchListTable.get(machineId);
+ if (entries == NULL) {
+ // There is no set for this machine ID yet so create one
+ entries = new HashSet<RejectedMessage>();
+ rejectedMessageWatchListTable.put(machineId, entries);
+ }
+ entries.add(entry);
+ }
+
+ /**
+ * Check if the HMAC chain is not violated
+ */
+ void checkHMACChain(SlotIndexer indexer, Slot[] newSlots) {
+ for (int i = 0; i < newSlots.length; i++) {
+ Slot currSlot = newSlots[i];
+ Slot prevSlot = indexer.getSlot(currSlot.getSequenceNumber() - 1);
+ if (prevSlot != NULL &&
+ !Arrays.equals(prevSlot.getHMAC(), currSlot.getPrevHMAC()))
+ throw new Error("Server Error: Invalid HMAC Chain" + currSlot + " " + prevSlot);
+ }
+ }
+}
--- /dev/null
+
+
+/**
+ * IoTTable data structure. Provides client interface.
+ * @author Brian Demsky
+ * @version 1.0
+ */
+
+final public class Table {
+
+ /* Constants */
+ static final int FREE_SLOTS = 2; // Number of slots that should be kept free // 10
+ static final int SKIP_THRESHOLD = 10;
+ static final double RESIZE_MULTIPLE = 1.2;
+ static final double RESIZE_THRESHOLD = 0.75;
+ static final int REJECTED_THRESHOLD = 5;
+
+ /* Helper Objects */
+ private SlotBuffer buffer = NULL;
+ private CloudComm cloud = NULL;
+ private Random random = NULL;
+ private TableStatus liveTableStatus = NULL;
+ private PendingTransaction pendingTransactionBuilder = NULL; // Pending Transaction used in building a Pending Transaction
+ private Transaction lastPendingTransactionSpeculatedOn = NULL; // Last transaction that was speculated on from the pending transaction
+ private Transaction firstPendingTransaction = NULL; // first transaction in the pending transaction list
+
+ /* Variables */
+ private int numberOfSlots = 0; // Number of slots stored in buffer
+ private int bufferResizeThreshold = 0; // Threshold on the number of live slots before a resize is needed
+ private int64_t liveSlotCount = 0; // Number of currently live slots
+ private int64_t oldestLiveSlotSequenceNumver = 0; // Smallest sequence number of the slot with a live entry
+ private int64_t localMachineId = 0; // Machine ID of this client device
+ private int64_t sequenceNumber = 0; // Largest sequence number a client has received
+ private int64_t localSequenceNumber = 0;
+
+ // private int smallestTableStatusSeen = -1; // Smallest Table Status that was seen in the latest slots sent from the server
+ // private int largestTableStatusSeen = -1; // Largest Table Status that was seen in the latest slots sent from the server
+ private int64_t localTransactionSequenceNumber = 0; // Local sequence number counter for transactions
+ private int64_t lastTransactionSequenceNumberSpeculatedOn = -1; // the last transaction that was speculated on
+ private int64_t oldestTransactionSequenceNumberSpeculatedOn = -1; // the oldest transaction that was speculated on
+ private int64_t localArbitrationSequenceNumber = 0;
+ private bool hadPartialSendToServer = false;
+ private bool attemptedToSendToServer = false;
+ private int64_t expectedsize;
+ private bool didFindTableStatus = false;
+ private int64_t currMaxSize = 0;
+
+ private Slot lastSlotAttemptedToSend = NULL;
+ private bool lastIsNewKey = false;
+ private int lastNewSize = 0;
+ private Map<Transaction, List<Integer>> lastTransactionPartsSent = NULL;
+ private List<Entry> lastPendingSendArbitrationEntriesToDelete = NULL;
+ private NewKey lastNewKey = NULL;
+
+
+ /* Data Structures */
+ private Map<IoTString, KeyValue> committedKeyValueTable = NULL; // Table of committed key value pairs
+ private Map<IoTString, KeyValue> speculatedKeyValueTable = NULL; // Table of speculated key value pairs, if there is a speculative value
+ private Map<IoTString, KeyValue> pendingTransactionSpeculatedKeyValueTable = NULL; // Table of speculated key value pairs, if there is a speculative value from the pending transactions
+ private Map<IoTString, NewKey> liveNewKeyTable = NULL; // Table of live new keys
+ private HashMap<Long, Pair<Long, Liveness>> lastMessageTable = NULL; // Last message sent by a client machine id -> (Seq Num, Slot or LastMessage);
+ private HashMap<Long, HashSet<RejectedMessage>> rejectedMessageWatchListTable = NULL; // Table of machine Ids and the set of rejected messages they have not seen yet
+ private Map<IoTString, Long> arbitratorTable = NULL; // Table of keys and their arbitrators
+ private Map<Pair<Long, Long>, Abort> liveAbortTable = NULL; // Table live abort messages
+ private Map<Long, Map<Pair<Long, Integer>, TransactionPart>> newTransactionParts = NULL; // transaction parts that are seen in this latest round of slots from the server
+ private Map<Long, Map<Pair<Long, Integer>, CommitPart>> newCommitParts = NULL; // commit parts that are seen in this latest round of slots from the server
+ private Map<Long, Long> lastArbitratedTransactionNumberByArbitratorTable = NULL; // Last transaction sequence number that an arbitrator arbitrated on
+ private Map<Long, Transaction> liveTransactionBySequenceNumberTable = NULL; // live transaction grouped by the sequence number
+ private Map<Pair<Long, Long>, Transaction> liveTransactionByTransactionIdTable = NULL; // live transaction grouped by the transaction ID
+ private Map<Long, Map<Long, Commit>> liveCommitsTable = NULL;
+ private Map<IoTString, Commit> liveCommitsByKeyTable = NULL;
+ private Map<Long, Long> lastCommitSeenSequenceNumberByArbitratorTable = NULL;
+ private Vector<Long> rejectedSlotList = NULL; // List of rejected slots that have yet to be sent to the server
+ private List<Transaction> pendingTransactionQueue = NULL;
+ private List<ArbitrationRound> pendingSendArbitrationRounds = NULL;
+ private List<Entry> pendingSendArbitrationEntriesToDelete = NULL;
+ private Map<Transaction, List<Integer>> transactionPartsSent = NULL;
+ private Map<Long, TransactionStatus> outstandingTransactionStatus = NULL;
+ private Map<Long, Abort> liveAbortsGeneratedByLocal = NULL;
+ private Set<Pair<Long, Long>> offlineTransactionsCommittedAndAtServer = NULL;
+ private Map<Long, Pair<String, Integer>> localCommunicationTable = NULL;
+ private Map<Long, Long> lastTransactionSeenFromMachineFromServer = NULL;
+ private Map<Long, Long> lastArbitrationDataLocalSequenceNumberSeenFromArbitrator = NULL;
+
+
+ public Table(String baseurl, String password, int64_t _localMachineId, int listeningPort) {
+ localMachineId = _localMachineId;
+ cloud = new CloudComm(this, baseurl, password, listeningPort);
+
+ init();
+ }
+
+ public Table(CloudComm _cloud, int64_t _localMachineId) {
+ localMachineId = _localMachineId;
+ cloud = _cloud;
+
+ init();
+ }
+
+ /**
+ * Init all the stuff needed for for table usage
+ */
+ private void init() {
+
+ // Init helper objects
+ random = new Random();
+ buffer = new SlotBuffer();
+
+ // Set Variables
+ oldestLiveSlotSequenceNumver = 1;
+
+ // init data structs
+ committedKeyValueTable = new HashMap<IoTString, KeyValue>();
+ speculatedKeyValueTable = new HashMap<IoTString, KeyValue>();
+ pendingTransactionSpeculatedKeyValueTable = new HashMap<IoTString, KeyValue>();
+ liveNewKeyTable = new HashMap<IoTString, NewKey>();
+ lastMessageTable = new HashMap<Long, Pair<Long, Liveness>>();
+ rejectedMessageWatchListTable = new HashMap<Long, HashSet<RejectedMessage>>();
+ arbitratorTable = new HashMap<IoTString, Long>();
+ liveAbortTable = new HashMap<Pair<Long, Long>, Abort>();
+ newTransactionParts = new HashMap<Long, Map<Pair<Long, Integer>, TransactionPart>>();
+ newCommitParts = new HashMap<Long, Map<Pair<Long, Integer>, CommitPart>>();
+ lastArbitratedTransactionNumberByArbitratorTable = new HashMap<Long, Long>();
+ liveTransactionBySequenceNumberTable = new HashMap<Long, Transaction>();
+ liveTransactionByTransactionIdTable = new HashMap<Pair<Long, Long>, Transaction>();
+ liveCommitsTable = new HashMap<Long, Map<Long, Commit>>();
+ liveCommitsByKeyTable = new HashMap<IoTString, Commit>();
+ lastCommitSeenSequenceNumberByArbitratorTable = new HashMap<Long, Long>();
+ rejectedSlotList = new Vector<Long>();
+ pendingTransactionQueue = new ArrayList<Transaction>();
+ pendingSendArbitrationEntriesToDelete = new ArrayList<Entry>();
+ transactionPartsSent = new HashMap<Transaction, List<Integer>>();
+ outstandingTransactionStatus = new HashMap<Long, TransactionStatus>();
+ liveAbortsGeneratedByLocal = new HashMap<Long, Abort>();
+ offlineTransactionsCommittedAndAtServer = new HashSet<Pair<Long, Long>>();
+ localCommunicationTable = new HashMap<Long, Pair<String, Integer>>();
+ lastTransactionSeenFromMachineFromServer = new HashMap<Long, Long>();
+ pendingSendArbitrationRounds = new ArrayList<ArbitrationRound>();
+ lastArbitrationDataLocalSequenceNumberSeenFromArbitrator = new HashMap<Long, Long>();
+
+
+ // Other init stuff
+ numberOfSlots = buffer.capacity();
+ setResizeThreshold();
+ }
+
+ // TODO: delete method
+ public synchronized void printSlots() {
+ int64_t o = buffer.getOldestSeqNum();
+ int64_t n = buffer.getNewestSeqNum();
+
+ int[] types = new int[10];
+
+ int num = 0;
+
+ int livec = 0;
+ int deadc = 0;
+
+ int casdasd = 0;
+
+ int liveslo = 0;
+
+ for (int64_t i = o; i < (n + 1); i++) {
+ Slot s = buffer.getSlot(i);
+
+
+ if (s.isLive()) {
+ liveslo++;
+ }
+
+ Vector<Entry> entries = s.getEntries();
+
+ for (Entry e : entries) {
+ if (e.isLive()) {
+ int type = e.getType();
+
+
+ if (type == 6) {
+ RejectedMessage rej = (RejectedMessage)e;
+ casdasd++;
+
+ System.out.println(rej.getMachineID());
+ }
+
+
+ types[type] = types[type] + 1;
+ num++;
+ livec++;
+ } else {
+ deadc++;
+ }
+ }
+ }
+
+ for (int i = 0; i < 10; i++) {
+ System.out.println(i + " " + types[i]);
+ }
+ System.out.println("Live count: " + livec);
+ System.out.println("Live Slot count: " + liveslo);
+
+ System.out.println("Dead count: " + deadc);
+ System.out.println("Old: " + o);
+ System.out.println("New: " + n);
+ System.out.println("Size: " + buffer.size());
+ // System.out.println("Commits: " + liveCommitsTable.size());
+ System.out.println("pendingTrans: " + pendingTransactionQueue.size());
+ System.out.println("Trans Status Out: " + outstandingTransactionStatus.size());
+
+ for (Long k : lastArbitratedTransactionNumberByArbitratorTable.keySet()) {
+ System.out.println(k + ": " + lastArbitratedTransactionNumberByArbitratorTable.get(k));
+ }
+
+
+ for (Long a : liveCommitsTable.keySet()) {
+ for (Long b : liveCommitsTable.get(a).keySet()) {
+ for (KeyValue kv : liveCommitsTable.get(a).get(b).getKeyValueUpdateSet()) {
+ System.out.print(kv + " ");
+ }
+ System.out.print("|| ");
+ }
+ System.out.println();
+ }
+
+ }
+
+ /**
+ * Initialize the table by inserting a table status as the first entry into the table status
+ * also initialize the crypto stuff.
+ */
+ public synchronized void initTable() throws ServerException {
+ cloud.initSecurity();
+
+ // Create the first insertion into the block chain which is the table status
+ Slot s = new Slot(this, 1, localMachineId, localSequenceNumber);
+ localSequenceNumber++;
+ TableStatus status = new TableStatus(s, numberOfSlots);
+ s.addEntry(status);
+ Slot[] array = cloud.putSlot(s, numberOfSlots);
+
+ if (array == NULL) {
+ array = new Slot[] {s};
+ // update local block chain
+ validateAndUpdate(array, true);
+ } else if (array.length == 1) {
+ // in case we did push the slot BUT we failed to init it
+ validateAndUpdate(array, true);
+ } else {
+ throw new Error("Error on initialization");
+ }
+ }
+
+ /**
+ * Rebuild the table from scratch by pulling the latest block chain from the server.
+ */
+ public synchronized void rebuild() throws ServerException {
+ // Just pull the latest slots from the server
+ Slot[] newslots = cloud.getSlots(sequenceNumber + 1);
+ validateAndUpdate(newslots, true);
+ sendToServer(NULL);
+ updateLiveTransactionsAndStatus();
+
+ }
+
+ // public String toString() {
+ // String retString = " Committed Table: \n";
+ // retString += "---------------------------\n";
+ // retString += commitedTable.toString();
+
+ // retString += "\n\n";
+
+ // retString += " Speculative Table: \n";
+ // retString += "---------------------------\n";
+ // retString += speculativeTable.toString();
+
+ // return retString;
+ // }
+
+ public synchronized void addLocalCommunication(int64_t arbitrator, String hostName, int portNumber) {
+ localCommunicationTable.put(arbitrator, new Pair<String, Integer>(hostName, portNumber));
+ }
+
+ public synchronized Long getArbitrator(IoTString key) {
+ return arbitratorTable.get(key);
+ }
+
+ public synchronized void close() {
+ cloud.close();
+ }
+
+ public synchronized IoTString getCommitted(IoTString key) {
+ KeyValue kv = committedKeyValueTable.get(key);
+
+ if (kv != NULL) {
+ return kv.getValue();
+ } else {
+ return NULL;
+ }
+ }
+
+ public synchronized IoTString getSpeculative(IoTString key) {
+ KeyValue kv = pendingTransactionSpeculatedKeyValueTable.get(key);
+
+ if (kv == NULL) {
+ kv = speculatedKeyValueTable.get(key);
+ }
+
+ if (kv == NULL) {
+ kv = committedKeyValueTable.get(key);
+ }
+
+ if (kv != NULL) {
+ return kv.getValue();
+ } else {
+ return NULL;
+ }
+ }
+
+ public synchronized IoTString getCommittedAtomic(IoTString key) {
+ KeyValue kv = committedKeyValueTable.get(key);
+
+ if (arbitratorTable.get(key) == NULL) {
+ throw new Error("Key not Found.");
+ }
+
+ // Make sure new key value pair matches the current arbitrator
+ if (!pendingTransactionBuilder.checkArbitrator(arbitratorTable.get(key))) {
+ // TODO: Maybe not throw en error
+ throw new Error("Not all Key Values Match Arbitrator.");
+ }
+
+ if (kv != NULL) {
+ pendingTransactionBuilder.addKVGuard(new KeyValue(key, kv.getValue()));
+ return kv.getValue();
+ } else {
+ pendingTransactionBuilder.addKVGuard(new KeyValue(key, NULL));
+ return NULL;
+ }
+ }
+
+ public synchronized IoTString getSpeculativeAtomic(IoTString key) {
+ if (arbitratorTable.get(key) == NULL) {
+ throw new Error("Key not Found.");
+ }
+
+ // Make sure new key value pair matches the current arbitrator
+ if (!pendingTransactionBuilder.checkArbitrator(arbitratorTable.get(key))) {
+ // TODO: Maybe not throw en error
+ throw new Error("Not all Key Values Match Arbitrator.");
+ }
+
+ KeyValue kv = pendingTransactionSpeculatedKeyValueTable.get(key);
+
+ if (kv == NULL) {
+ kv = speculatedKeyValueTable.get(key);
+ }
+
+ if (kv == NULL) {
+ kv = committedKeyValueTable.get(key);
+ }
+
+ if (kv != NULL) {
+ pendingTransactionBuilder.addKVGuard(new KeyValue(key, kv.getValue()));
+ return kv.getValue();
+ } else {
+ pendingTransactionBuilder.addKVGuard(new KeyValue(key, NULL));
+ return NULL;
+ }
+ }
+
+ public synchronized bool update() {
+ try {
+ Slot[] newSlots = cloud.getSlots(sequenceNumber + 1);
+ validateAndUpdate(newSlots, false);
+ sendToServer(NULL);
+
+
+ updateLiveTransactionsAndStatus();
+
+ return true;
+ } catch (Exception e) {
+ // e.printStackTrace();
+
+ for (Long m : localCommunicationTable.keySet()) {
+ updateFromLocal(m);
+ }
+ }
+
+ return false;
+ }
+
+ public synchronized bool createNewKey(IoTString keyName, int64_t machineId) throws ServerException {
+ while (true) {
+ if (arbitratorTable.get(keyName) != NULL) {
+ // There is already an arbitrator
+ return false;
+ }
+
+ NewKey newKey = new NewKey(NULL, keyName, machineId);
+
+ if (sendToServer(newKey)) {
+ // If successfully inserted
+ return true;
+ }
+ }
+ }
+
+ public synchronized void startTransaction() {
+ // Create a new transaction, invalidates any old pending transactions.
+ pendingTransactionBuilder = new PendingTransaction(localMachineId);
+ }
+
+ public synchronized void addKV(IoTString key, IoTString value) {
+
+ // Make sure it is a valid key
+ if (arbitratorTable.get(key) == NULL) {
+ throw new Error("Key not Found.");
+ }
+
+ // Make sure new key value pair matches the current arbitrator
+ if (!pendingTransactionBuilder.checkArbitrator(arbitratorTable.get(key))) {
+ // TODO: Maybe not throw en error
+ throw new Error("Not all Key Values Match Arbitrator.");
+ }
+
+ // Add the key value to this transaction
+ KeyValue kv = new KeyValue(key, value);
+ pendingTransactionBuilder.addKV(kv);
+ }
+
+ public synchronized TransactionStatus commitTransaction() {
+
+ if (pendingTransactionBuilder.getKVUpdates().size() == 0) {
+ // transaction with no updates will have no effect on the system
+ return new TransactionStatus(TransactionStatus.StatusNoEffect, -1);
+ }
+
+ // Set the local transaction sequence number and increment
+ pendingTransactionBuilder.setClientLocalSequenceNumber(localTransactionSequenceNumber);
+ localTransactionSequenceNumber++;
+
+ // Create the transaction status
+ TransactionStatus transactionStatus = new TransactionStatus(TransactionStatus.StatusPending, pendingTransactionBuilder.getArbitrator());
+
+ // Create the new transaction
+ Transaction newTransaction = pendingTransactionBuilder.createTransaction();
+ newTransaction.setTransactionStatus(transactionStatus);
+
+ if (pendingTransactionBuilder.getArbitrator() != localMachineId) {
+ // Add it to the queue and invalidate the builder for safety
+ pendingTransactionQueue.add(newTransaction);
+ } else {
+ arbitrateOnLocalTransaction(newTransaction);
+ updateLiveStateFromLocal();
+ }
+
+ pendingTransactionBuilder = new PendingTransaction(localMachineId);
+
+ try {
+ sendToServer(NULL);
+ } catch (ServerException e) {
+
+ Set<Long> arbitratorTriedAndFailed = new HashSet<Long>();
+ for (Iterator<Transaction> iter = pendingTransactionQueue.iterator(); iter.hasNext(); ) {
+ Transaction transaction = iter.next();
+
+ if (arbitratorTriedAndFailed.contains(transaction.getArbitrator())) {
+ // Already contacted this client so ignore all attempts to contact this client
+ // to preserve ordering for arbitrator
+ continue;
+ }
+
+ Pair<Boolean, Boolean> sendReturn = sendTransactionToLocal(transaction);
+
+ if (sendReturn.getFirst()) {
+ // Failed to contact over local
+ arbitratorTriedAndFailed.add(transaction.getArbitrator());
+ } else {
+ // Successful contact or should not contact
+
+ if (sendReturn.getSecond()) {
+ // did arbitrate
+ iter.remove();
+ }
+ }
+ }
+ }
+
+ updateLiveStateFromLocal();
+
+ return transactionStatus;
+ }
+
+ /**
+ * Get the machine ID for this client
+ */
+ public int64_t getMachineId() {
+ return localMachineId;
+ }
+
+ /**
+ * Decrement the number of live slots that we currently have
+ */
+ public void decrementLiveCount() {
+ liveSlotCount--;
+ }
+
+ /**
+ * Recalculate the new resize threshold
+ */
+ private void setResizeThreshold() {
+ int resizeLower = (int) (RESIZE_THRESHOLD * numberOfSlots);
+ bufferResizeThreshold = resizeLower - 1 + random.nextInt(numberOfSlots - resizeLower);
+ }
+
+ public int64_t getLocalSequenceNumber() {
+ return localSequenceNumber;
+ }
+
+
+ bool lastInsertedNewKey = false;
+
+ private bool sendToServer(NewKey newKey) throws ServerException {
+
+ bool fromRetry = false;
+
+ try {
+ if (hadPartialSendToServer) {
+ Slot[] newSlots = cloud.getSlots(sequenceNumber + 1);
+ if (newSlots.length == 0) {
+ fromRetry = true;
+ ThreeTuple<Boolean, Boolean, Slot[]> sendSlotsReturn = sendSlotsToServer(lastSlotAttemptedToSend, lastNewSize, lastIsNewKey);
+
+ if (sendSlotsReturn.getFirst()) {
+ if (newKey != NULL) {
+ if (lastInsertedNewKey && (lastNewKey.getKey() == newKey.getKey()) && (lastNewKey.getMachineID() == newKey.getMachineID())) {
+ newKey = NULL;
+ }
+ }
+
+ for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+
+ // Update which transactions parts still need to be sent
+ transaction.removeSentParts(lastTransactionPartsSent.get(transaction));
+
+ // Add the transaction status to the outstanding list
+ outstandingTransactionStatus.put(transaction.getSequenceNumber(), transaction.getTransactionStatus());
+
+ // Update the transaction status
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentPartial);
+
+ // Check if all the transaction parts were successfully sent and if so then remove it from pending
+ if (transaction.didSendAllParts()) {
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentFully);
+ pendingTransactionQueue.remove(transaction);
+ }
+ }
+ } else {
+
+ newSlots = sendSlotsReturn.getThird();
+
+ bool isInserted = false;
+ for (Slot s : newSlots) {
+ if ((s.getSequenceNumber() == lastSlotAttemptedToSend.getSequenceNumber()) && (s.getMachineID() == localMachineId)) {
+ isInserted = true;
+ break;
+ }
+ }
+
+ for (Slot s : newSlots) {
+ if (isInserted) {
+ break;
+ }
+
+ // Process each entry in the slot
+ for (Entry entry : s.getEntries()) {
+
+ if (entry.getType() == Entry.TypeLastMessage) {
+ LastMessage lastMessage = (LastMessage)entry;
+ if ((lastMessage.getMachineID() == localMachineId) && (lastMessage.getSequenceNumber() == lastSlotAttemptedToSend.getSequenceNumber())) {
+ isInserted = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (isInserted) {
+ if (newKey != NULL) {
+ if (lastInsertedNewKey && (lastNewKey.getKey() == newKey.getKey()) && (lastNewKey.getMachineID() == newKey.getMachineID())) {
+ newKey = NULL;
+ }
+ }
+
+ for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+
+ // Update which transactions parts still need to be sent
+ transaction.removeSentParts(lastTransactionPartsSent.get(transaction));
+
+ // Add the transaction status to the outstanding list
+ outstandingTransactionStatus.put(transaction.getSequenceNumber(), transaction.getTransactionStatus());
+
+ // Update the transaction status
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentPartial);
+
+ // Check if all the transaction parts were successfully sent and if so then remove it from pending
+ if (transaction.didSendAllParts()) {
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentFully);
+ pendingTransactionQueue.remove(transaction);
+ } else {
+ transaction.resetServerFailure();
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+ }
+ }
+ }
+
+ for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+
+ if (sendSlotsReturn.getThird().length != 0) {
+ // insert into the local block chain
+ validateAndUpdate(sendSlotsReturn.getThird(), true);
+ }
+ // continue;
+ } else {
+ bool isInserted = false;
+ for (Slot s : newSlots) {
+ if ((s.getSequenceNumber() == lastSlotAttemptedToSend.getSequenceNumber()) && (s.getMachineID() == localMachineId)) {
+ isInserted = true;
+ break;
+ }
+ }
+
+ for (Slot s : newSlots) {
+ if (isInserted) {
+ break;
+ }
+
+ // Process each entry in the slot
+ for (Entry entry : s.getEntries()) {
+
+ if (entry.getType() == Entry.TypeLastMessage) {
+ LastMessage lastMessage = (LastMessage)entry;
+ if ((lastMessage.getMachineID() == localMachineId) && (lastMessage.getSequenceNumber() == lastSlotAttemptedToSend.getSequenceNumber())) {
+ isInserted = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (isInserted) {
+ if (newKey != NULL) {
+ if (lastInsertedNewKey && (lastNewKey.getKey() == newKey.getKey()) && (lastNewKey.getMachineID() == newKey.getMachineID())) {
+ newKey = NULL;
+ }
+ }
+
+ for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+
+ // Update which transactions parts still need to be sent
+ transaction.removeSentParts(lastTransactionPartsSent.get(transaction));
+
+ // Add the transaction status to the outstanding list
+ outstandingTransactionStatus.put(transaction.getSequenceNumber(), transaction.getTransactionStatus());
+
+ // Update the transaction status
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentPartial);
+
+ // Check if all the transaction parts were successfully sent and if so then remove it from pending
+ if (transaction.didSendAllParts()) {
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentFully);
+ pendingTransactionQueue.remove(transaction);
+ } else {
+ transaction.resetServerFailure();
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+ }
+ } else {
+ for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+ }
+
+ // insert into the local block chain
+ validateAndUpdate(newSlots, true);
+ }
+ }
+ } catch (ServerException e) {
+ throw e;
+ }
+
+
+
+ try {
+ // While we have stuff that needs inserting into the block chain
+ while ((pendingTransactionQueue.size() > 0) || (pendingSendArbitrationRounds.size() > 0) || (newKey != NULL)) {
+
+ fromRetry = false;
+
+ if (hadPartialSendToServer) {
+ throw new Error("Should Be error free");
+ }
+
+
+
+ // If there is a new key with same name then end
+ if ((newKey != NULL) && (arbitratorTable.get(newKey.getKey()) != NULL)) {
+ return false;
+ }
+
+ // Create the slot
+ Slot slot = new Slot(this, sequenceNumber + 1, localMachineId, buffer.getSlot(sequenceNumber).getHMAC(), localSequenceNumber);
+ localSequenceNumber++;
+
+ // Try to fill the slot with data
+ ThreeTuple<Boolean, Integer, Boolean> fillSlotsReturn = fillSlot(slot, false, newKey);
+ bool needsResize = fillSlotsReturn.getFirst();
+ int newSize = fillSlotsReturn.getSecond();
+ Boolean insertedNewKey = fillSlotsReturn.getThird();
+
+ if (needsResize) {
+ // Reset which transaction to send
+ for (Transaction transaction : transactionPartsSent.keySet()) {
+ transaction.resetNextPartToSend();
+
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer() && !transaction.getServerFailure()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+
+ // Clear the sent data since we are trying again
+ pendingSendArbitrationEntriesToDelete.clear();
+ transactionPartsSent.clear();
+
+ // We needed a resize so try again
+ fillSlot(slot, true, newKey);
+ }
+
+ lastSlotAttemptedToSend = slot;
+ lastIsNewKey = (newKey != NULL);
+ lastInsertedNewKey = insertedNewKey;
+ lastNewSize = newSize;
+ lastNewKey = newKey;
+ lastTransactionPartsSent = new HashMap<Transaction, List<Integer>>(transactionPartsSent);
+ lastPendingSendArbitrationEntriesToDelete = new ArrayList<Entry>(pendingSendArbitrationEntriesToDelete);
+
+
+ ThreeTuple<Boolean, Boolean, Slot[]> sendSlotsReturn = sendSlotsToServer(slot, newSize, newKey != NULL);
+
+ if (sendSlotsReturn.getFirst()) {
+
+ // Did insert into the block chain
+
+ if (insertedNewKey) {
+ // This slot was what was inserted not a previous slot
+
+ // New Key was successfully inserted into the block chain so dont want to insert it again
+ newKey = NULL;
+ }
+
+ // Remove the aborts and commit parts that were sent from the pending to send queue
+ for (Iterator<ArbitrationRound> iter = pendingSendArbitrationRounds.iterator(); iter.hasNext(); ) {
+ ArbitrationRound round = iter.next();
+ round.removeParts(pendingSendArbitrationEntriesToDelete);
+
+ if (round.isDoneSending()) {
+ // Sent all the parts
+ iter.remove();
+ }
+ }
+
+ for (Transaction transaction : transactionPartsSent.keySet()) {
+ transaction.resetServerFailure();
+
+ // Update which transactions parts still need to be sent
+ transaction.removeSentParts(transactionPartsSent.get(transaction));
+
+ // Add the transaction status to the outstanding list
+ outstandingTransactionStatus.put(transaction.getSequenceNumber(), transaction.getTransactionStatus());
+
+ // Update the transaction status
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentPartial);
+
+ // Check if all the transaction parts were successfully sent and if so then remove it from pending
+ if (transaction.didSendAllParts()) {
+ transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentFully);
+ pendingTransactionQueue.remove(transaction);
+ }
+ }
+ } else {
+
+ // if (!sendSlotsReturn.getSecond()) {
+ // for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ // transaction.resetServerFailure();
+ // }
+ // } else {
+ // for (Transaction transaction : lastTransactionPartsSent.keySet()) {
+ // transaction.resetServerFailure();
+
+ // // Update which transactions parts still need to be sent
+ // transaction.removeSentParts(transactionPartsSent.get(transaction));
+
+ // // Add the transaction status to the outstanding list
+ // outstandingTransactionStatus.put(transaction.getSequenceNumber(), transaction.getTransactionStatus());
+
+ // // Update the transaction status
+ // transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentPartial);
+
+ // // Check if all the transaction parts were successfully sent and if so then remove it from pending
+ // if (transaction.didSendAllParts()) {
+ // transaction.getTransactionStatus().setStatus(TransactionStatus.StatusSentFully);
+ // pendingTransactionQueue.remove(transaction);
+
+ // for (KeyValue kv : transaction.getKeyValueUpdateSet()) {
+ // System.out.println("Sent: " + kv + " from: " + localMachineId + " Slot:" + lastSlotAttemptedToSend.getSequenceNumber() + " Claimed:" + transaction.getSequenceNumber());
+ // }
+ // }
+ // }
+ // }
+
+ // Reset which transaction to send
+ for (Transaction transaction : transactionPartsSent.keySet()) {
+ transaction.resetNextPartToSend();
+ // transaction.resetNextPartToSend();
+
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer() && !transaction.getServerFailure()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+ }
+
+ // Clear the sent data in preparation for next send
+ pendingSendArbitrationEntriesToDelete.clear();
+ transactionPartsSent.clear();
+
+ if (sendSlotsReturn.getThird().length != 0) {
+ // insert into the local block chain
+ validateAndUpdate(sendSlotsReturn.getThird(), true);
+ }
+ }
+
+ } catch (ServerException e) {
+
+ if (e.getType() != ServerException.TypeInputTimeout) {
+ // e.printStackTrace();
+
+ // Nothing was able to be sent to the server so just clear these data structures
+ for (Transaction transaction : transactionPartsSent.keySet()) {
+ transaction.resetNextPartToSend();
+
+ // Set the transaction sequence number back to nothing
+ if (!transaction.didSendAPartToServer() && !transaction.getServerFailure()) {
+ transaction.setSequenceNumber(-1);
+ }
+ }
+ } else {
+ // There was a partial send to the server
+ hadPartialSendToServer = true;
+
+
+ // if (!fromRetry) {
+ // lastTransactionPartsSent = new HashMap<Transaction, List<Integer>>(transactionPartsSent);
+ // lastPendingSendArbitrationEntriesToDelete = new ArrayList<Entry>(pendingSendArbitrationEntriesToDelete);
+ // }
+
+ // Nothing was able to be sent to the server so just clear these data structures
+ for (Transaction transaction : transactionPartsSent.keySet()) {
+ transaction.resetNextPartToSend();
+ transaction.setServerFailure();
+ }
+ }
+
+ pendingSendArbitrationEntriesToDelete.clear();
+ transactionPartsSent.clear();
+
+ throw e;
+ }
+
+ return newKey == NULL;
+ }
+
+ private synchronized bool updateFromLocal(int64_t machineId) {
+ Pair<String, Integer> localCommunicationInformation = localCommunicationTable.get(machineId);
+ if (localCommunicationInformation == NULL) {
+ // Cant talk to that device locally so do nothing
+ return false;
+ }
+
+ // Get the size of the send data
+ int sendDataSize = sizeof(int32_t) + sizeof(int64_t);
+
+ Long lastArbitrationDataLocalSequenceNumber = (int64_t) - 1;
+ if (lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(machineId) != NULL) {
+ lastArbitrationDataLocalSequenceNumber = lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(machineId);
+ }
+
+ char[] sendData = new char[sendDataSize];
+ ByteBuffer bbEncode = ByteBuffer.wrap(sendData);
+
+ // Encode the data
+ bbEncode.putLong(lastArbitrationDataLocalSequenceNumber);
+ bbEncode.putInt(0);
+
+ // Send by local
+ char[] returnData = cloud.sendLocalData(sendData, localSequenceNumber, localCommunicationInformation.getFirst(), localCommunicationInformation.getSecond());
+ localSequenceNumber++;
+
+ if (returnData == NULL) {
+ // Could not contact server
+ return false;
+ }
+
+ // Decode the data
+ ByteBuffer bbDecode = ByteBuffer.wrap(returnData);
+ int numberOfEntries = bbDecode.getInt();
+
+ for (int i = 0; i < numberOfEntries; i++) {
+ char type = bbDecode.get();
+ if (type == Entry.TypeAbort) {
+ Abort abort = (Abort)Abort.decode(NULL, bbDecode);
+ processEntry(abort);
+ } else if (type == Entry.TypeCommitPart) {
+ CommitPart commitPart = (CommitPart)CommitPart.decode(NULL, bbDecode);
+ processEntry(commitPart);
+ }
+ }
+
+ updateLiveStateFromLocal();
+
+ return true;
+ }
+
+ private Pair<Boolean, Boolean> sendTransactionToLocal(Transaction transaction) {
+
+ // Get the devices local communications
+ Pair<String, Integer> localCommunicationInformation = localCommunicationTable.get(transaction.getArbitrator());
+
+ if (localCommunicationInformation == NULL) {
+ // Cant talk to that device locally so do nothing
+ return new Pair<Boolean, Boolean>(true, false);
+ }
+
+ // Get the size of the send data
+ int sendDataSize = sizeof(int32_t) + sizeof(int64_t);
+ for (TransactionPart part : transaction.getParts().values()) {
+ sendDataSize += part.getSize();
+ }
+
+ Long lastArbitrationDataLocalSequenceNumber = (int64_t) - 1;
+ if (lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(transaction.getArbitrator()) != NULL) {
+ lastArbitrationDataLocalSequenceNumber = lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(transaction.getArbitrator());
+ }
+
+ // Make the send data size
+ char[] sendData = new char[sendDataSize];
+ ByteBuffer bbEncode = ByteBuffer.wrap(sendData);
+
+ // Encode the data
+ bbEncode.putLong(lastArbitrationDataLocalSequenceNumber);
+ bbEncode.putInt(transaction.getParts().size());
+ for (TransactionPart part : transaction.getParts().values()) {
+ part.encode(bbEncode);
+ }
+
+
+ // Send by local
+ char[] returnData = cloud.sendLocalData(sendData, localSequenceNumber, localCommunicationInformation.getFirst(), localCommunicationInformation.getSecond());
+ localSequenceNumber++;
+
+ if (returnData == NULL) {
+ // Could not contact server
+ return new Pair<Boolean, Boolean>(true, false);
+ }
+
+ // Decode the data
+ ByteBuffer bbDecode = ByteBuffer.wrap(returnData);
+ bool didCommit = bbDecode.get() == 1;
+ bool couldArbitrate = bbDecode.get() == 1;
+ int numberOfEntries = bbDecode.getInt();
+ bool foundAbort = false;
+
+ for (int i = 0; i < numberOfEntries; i++) {
+ char type = bbDecode.get();
+ if (type == Entry.TypeAbort) {
+ Abort abort = (Abort)Abort.decode(NULL, bbDecode);
+
+ if ((abort.getTransactionMachineId() == localMachineId) && (abort.getTransactionClientLocalSequenceNumber() == transaction.getClientLocalSequenceNumber())) {
+ foundAbort = true;
+ }
+
+ processEntry(abort);
+ } else if (type == Entry.TypeCommitPart) {
+ CommitPart commitPart = (CommitPart)CommitPart.decode(NULL, bbDecode);
+ processEntry(commitPart);
+ }
+ }
+
+ updateLiveStateFromLocal();
+
+ if (couldArbitrate) {
+ TransactionStatus status = transaction.getTransactionStatus();
+ if (didCommit) {
+ status.setStatus(TransactionStatus.StatusCommitted);
+ } else {
+ status.setStatus(TransactionStatus.StatusAborted);
+ }
+ } else {
+ TransactionStatus status = transaction.getTransactionStatus();
+ if (foundAbort) {
+ status.setStatus(TransactionStatus.StatusAborted);
+ } else {
+ status.setStatus(TransactionStatus.StatusCommitted);
+ }
+ }
+
+ return new Pair<Boolean, Boolean>(false, true);
+ }
+
+ public synchronized char[] acceptDataFromLocal(char[] data) {
+
+ // Decode the data
+ ByteBuffer bbDecode = ByteBuffer.wrap(data);
+ int64_t lastArbitratedSequenceNumberSeen = bbDecode.getLong();
+ int numberOfParts = bbDecode.getInt();
+
+ // If we did commit a transaction or not
+ bool didCommit = false;
+ bool couldArbitrate = false;
+
+ if (numberOfParts != 0) {
+
+ // decode the transaction
+ Transaction transaction = new Transaction();
+ for (int i = 0; i < numberOfParts; i++) {
+ bbDecode.get();
+ TransactionPart newPart = (TransactionPart)TransactionPart.decode(NULL, bbDecode);
+ transaction.addPartDecode(newPart);
+ }
+
+ // Arbitrate on transaction and pull relevant return data
+ Pair<Boolean, Boolean> localArbitrateReturn = arbitrateOnLocalTransaction(transaction);
+ couldArbitrate = localArbitrateReturn.getFirst();
+ didCommit = localArbitrateReturn.getSecond();
+
+ updateLiveStateFromLocal();
+
+ // Transaction was sent to the server so keep track of it to prevent double commit
+ if (transaction.getSequenceNumber() != -1) {
+ offlineTransactionsCommittedAndAtServer.add(transaction.getId());
+ }
+ }
+
+ // The data to send back
+ int returnDataSize = 0;
+ List<Entry> unseenArbitrations = new ArrayList<Entry>();
+
+ // Get the aborts to send back
+ List<Long> abortLocalSequenceNumbers = new ArrayList<Long >(liveAbortsGeneratedByLocal.keySet());
+ Collections.sort(abortLocalSequenceNumbers);
+ for (Long localSequenceNumber : abortLocalSequenceNumbers) {
+ if (localSequenceNumber <= lastArbitratedSequenceNumberSeen) {
+ continue;
+ }
+
+ Abort abort = liveAbortsGeneratedByLocal.get(localSequenceNumber);
+ unseenArbitrations.add(abort);
+ returnDataSize += abort.getSize();
+ }
+
+ // Get the commits to send back
+ Map<Long, Commit> commitForClientTable = liveCommitsTable.get(localMachineId);
+ if (commitForClientTable != NULL) {
+ List<Long> commitLocalSequenceNumbers = new ArrayList<Long>(commitForClientTable.keySet());
+ Collections.sort(commitLocalSequenceNumbers);
+
+ for (Long localSequenceNumber : commitLocalSequenceNumbers) {
+ Commit commit = commitForClientTable.get(localSequenceNumber);
+
+ if (localSequenceNumber <= lastArbitratedSequenceNumberSeen) {
+ continue;
+ }
+
+ unseenArbitrations.addAll(commit.getParts().values());
+
+ for (CommitPart commitPart : commit.getParts().values()) {
+ returnDataSize += commitPart.getSize();
+ }
+ }
+ }
+
+ // Number of arbitration entries to decode
+ returnDataSize += 2 * sizeof(int32_t);
+
+ // Boolean of did commit or not
+ if (numberOfParts != 0) {
+ returnDataSize += sizeof(char);
+ }
+
+ // Data to send Back
+ char[] returnData = new char[returnDataSize];
+ ByteBuffer bbEncode = ByteBuffer.wrap(returnData);
+
+ if (numberOfParts != 0) {
+ if (didCommit) {
+ bbEncode.put((char)1);
+ } else {
+ bbEncode.put((char)0);
+ }
+ if (couldArbitrate) {
+ bbEncode.put((char)1);
+ } else {
+ bbEncode.put((char)0);
+ }
+ }
+
+ bbEncode.putInt(unseenArbitrations.size());
+ for (Entry entry : unseenArbitrations) {
+ entry.encode(bbEncode);
+ }
+
+
+ localSequenceNumber++;
+ return returnData;
+ }
+
+ private ThreeTuple<Boolean, Boolean, Slot[]> sendSlotsToServer(Slot slot, int newSize, bool isNewKey) throws ServerException {
+
+ bool attemptedToSendToServerTmp = attemptedToSendToServer;
+ attemptedToSendToServer = true;
+
+ bool inserted = false;
+ bool lastTryInserted = false;
+
+ Slot[] array = cloud.putSlot(slot, newSize);
+ if (array == NULL) {
+ array = new Slot[] {slot};
+ rejectedSlotList.clear();
+ inserted = true;
+ } else {
+ if (array.length == 0) {
+ throw new Error("Server Error: Did not send any slots");
+ }
+
+ // if (attemptedToSendToServerTmp) {
+ if (hadPartialSendToServer) {
+
+ bool isInserted = false;
+ for (Slot s : array) {
+ if ((s.getSequenceNumber() == slot.getSequenceNumber()) && (s.getMachineID() == localMachineId)) {
+ isInserted = true;
+ break;
+ }
+ }
+
+ for (Slot s : array) {
+ if (isInserted) {
+ break;
+ }
+
+ // Process each entry in the slot
+ for (Entry entry : s.getEntries()) {
+
+ if (entry.getType() == Entry.TypeLastMessage) {
+ LastMessage lastMessage = (LastMessage)entry;
+
+ if ((lastMessage.getMachineID() == localMachineId) && (lastMessage.getSequenceNumber() == slot.getSequenceNumber())) {
+ isInserted = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!isInserted) {
+ rejectedSlotList.add(slot.getSequenceNumber());
+ lastTryInserted = false;
+ } else {
+ lastTryInserted = true;
+ }
+ } else {
+ rejectedSlotList.add(slot.getSequenceNumber());
+ lastTryInserted = false;
+ }
+ }
+
+ return new ThreeTuple<Boolean, Boolean, Slot[]>(inserted, lastTryInserted, array);
+ }
+
+ /**
+ * Returns false if a resize was needed
+ */
+ private ThreeTuple<Boolean, Integer, Boolean> fillSlot(Slot slot, bool resize, NewKey newKeyEntry) {
+
+
+ int newSize = 0;
+ if (liveSlotCount > bufferResizeThreshold) {
+ resize = true; //Resize is forced
+
+ }
+
+ if (resize) {
+ newSize = (int) (numberOfSlots * RESIZE_MULTIPLE);
+ TableStatus status = new TableStatus(slot, newSize);
+ slot.addEntry(status);
+ }
+
+ // Fill with rejected slots first before doing anything else
+ doRejectedMessages(slot);
+
+ // Do mandatory rescue of entries
+ ThreeTuple<Boolean, Boolean, Long> mandatoryRescueReturn = doMandatoryResuce(slot, resize);
+
+ // Extract working variables
+ bool needsResize = mandatoryRescueReturn.getFirst();
+ bool seenLiveSlot = mandatoryRescueReturn.getSecond();
+ int64_t currentRescueSequenceNumber = mandatoryRescueReturn.getThird();
+
+ if (needsResize && !resize) {
+ // We need to resize but we are not resizing so return false
+ return new ThreeTuple<Boolean, Integer, Boolean>(true, NULL, NULL);
+ }
+
+ bool inserted = false;
+ if (newKeyEntry != NULL) {
+ newKeyEntry.setSlot(slot);
+ if (slot.hasSpace(newKeyEntry)) {
+
+ slot.addEntry(newKeyEntry);
+ inserted = true;
+ }
+ }
+
+ // Clear the transactions, aborts and commits that were sent previously
+ transactionPartsSent.clear();
+ pendingSendArbitrationEntriesToDelete.clear();
+
+ for (ArbitrationRound round : pendingSendArbitrationRounds) {
+ bool isFull = false;
+ round.generateParts();
+ List<Entry> parts = round.getParts();
+
+ // Insert pending arbitration data
+ for (Entry arbitrationData : parts) {
+
+ // If it is an abort then we need to set some information
+ if (arbitrationData instanceof Abort) {
+ ((Abort)arbitrationData).setSequenceNumber(slot.getSequenceNumber());
+ }
+
+ if (!slot.hasSpace(arbitrationData)) {
+ // No space so cant do anything else with these data entries
+ isFull = true;
+ break;
+ }
+
+ // Add to this current slot and add it to entries to delete
+ slot.addEntry(arbitrationData);
+ pendingSendArbitrationEntriesToDelete.add(arbitrationData);
+ }
+
+ if (isFull) {
+ break;
+ }
+ }
+
+ if (pendingTransactionQueue.size() > 0) {
+
+ Transaction transaction = pendingTransactionQueue.get(0);
+
+ // Set the transaction sequence number if it has yet to be inserted into the block chain
+ // if ((!transaction.didSendAPartToServer() && !transaction.getServerFailure()) || (transaction.getSequenceNumber() == -1)) {
+ // transaction.setSequenceNumber(slot.getSequenceNumber());
+ // }
+
+ if ((!transaction.didSendAPartToServer()) || (transaction.getSequenceNumber() == -1)) {
+ transaction.setSequenceNumber(slot.getSequenceNumber());
+ }
+
+
+ while (true) {
+ TransactionPart part = transaction.getNextPartToSend();
+
+ if (part == NULL) {
+ // Ran out of parts to send for this transaction so move on
+ break;
+ }
+
+ if (slot.hasSpace(part)) {
+ slot.addEntry(part);
+ List<Integer> partsSent = transactionPartsSent.get(transaction);
+ if (partsSent == NULL) {
+ partsSent = new ArrayList<Integer>();
+ transactionPartsSent.put(transaction, partsSent);
+ }
+ partsSent.add(part.getPartNumber());
+ transactionPartsSent.put(transaction, partsSent);
+ } else {
+ break;
+ }
+ }
+ }
+
+ // Fill the remainder of the slot with rescue data
+ doOptionalRescue(slot, seenLiveSlot, currentRescueSequenceNumber, resize);
+
+ return new ThreeTuple<Boolean, Integer, Boolean>(false, newSize, inserted);
+ }
+
+ private void doRejectedMessages(Slot s) {
+ if (! rejectedSlotList.isEmpty()) {
+ /* TODO: We should avoid generating a rejected message entry if
+ * there is already a sufficient entry in the queue (e.g.,
+ * equalsto value of true and same sequence number). */
+
+ int64_t old_seqn = rejectedSlotList.firstElement();
+ if (rejectedSlotList.size() > REJECTED_THRESHOLD) {
+ int64_t new_seqn = rejectedSlotList.lastElement();
+ RejectedMessage rm = new RejectedMessage(s, s.getSequenceNumber(), localMachineId, old_seqn, new_seqn, false);
+ s.addEntry(rm);
+ } else {
+ int64_t prev_seqn = -1;
+ int i = 0;
+ /* Go through list of missing messages */
+ for (; i < rejectedSlotList.size(); i++) {
+ int64_t curr_seqn = rejectedSlotList.get(i);
+ Slot s_msg = buffer.getSlot(curr_seqn);
+ if (s_msg != NULL)
+ break;
+ prev_seqn = curr_seqn;
+ }
+ /* Generate rejected message entry for missing messages */
+ if (prev_seqn != -1) {
+ RejectedMessage rm = new RejectedMessage(s, s.getSequenceNumber(), localMachineId, old_seqn, prev_seqn, false);
+ s.addEntry(rm);
+ }
+ /* Generate rejected message entries for present messages */
+ for (; i < rejectedSlotList.size(); i++) {
+ int64_t curr_seqn = rejectedSlotList.get(i);
+ Slot s_msg = buffer.getSlot(curr_seqn);
+ int64_t machineid = s_msg.getMachineID();
+ RejectedMessage rm = new RejectedMessage(s, s.getSequenceNumber(), machineid, curr_seqn, curr_seqn, true);
+ s.addEntry(rm);
+ }
+ }
+ }
+ }
+
+ private ThreeTuple<Boolean, Boolean, Long> doMandatoryResuce(Slot slot, bool resize) {
+ int64_t newestSequenceNumber = buffer.getNewestSeqNum();
+ int64_t oldestSequenceNumber = buffer.getOldestSeqNum();
+ if (oldestLiveSlotSequenceNumver < oldestSequenceNumber) {
+ oldestLiveSlotSequenceNumver = oldestSequenceNumber;
+ }
+
+ int64_t currentSequenceNumber = oldestLiveSlotSequenceNumver;
+ bool seenLiveSlot = false;
+ int64_t firstIfFull = newestSequenceNumber + 1 - numberOfSlots; // smallest seq number in the buffer if it is full
+ int64_t threshold = firstIfFull + FREE_SLOTS; // we want the buffer to be clear of live entries up to this point
+
+
+ // Mandatory Rescue
+ for (; currentSequenceNumber < threshold; currentSequenceNumber++) {
+ Slot previousSlot = buffer.getSlot(currentSequenceNumber);
+ // Push slot number forward
+ if (! seenLiveSlot) {
+ oldestLiveSlotSequenceNumver = currentSequenceNumber;
+ }
+
+ if (!previousSlot.isLive()) {
+ continue;
+ }
+
+ // We have seen a live slot
+ seenLiveSlot = true;
+
+ // Get all the live entries for a slot
+ Vector<Entry> liveEntries = previousSlot.getLiveEntries(resize);
+
+ // Iterate over all the live entries and try to rescue them
+ for (Entry liveEntry : liveEntries) {
+ if (slot.hasSpace(liveEntry)) {
+
+ // Enough space to rescue the entry
+ slot.addEntry(liveEntry);
+ } else if (currentSequenceNumber == firstIfFull) {
+ //if there's no space but the entry is about to fall off the queue
+ System.out.println("B"); //?
+ return new ThreeTuple<Boolean, Boolean, Long>(true, seenLiveSlot, currentSequenceNumber);
+
+ }
+ }
+ }
+
+ // Did not resize
+ return new ThreeTuple<Boolean, Boolean, Long>(false, seenLiveSlot, currentSequenceNumber);
+ }
+
+ private void doOptionalRescue(Slot s, bool seenliveslot, int64_t seqn, bool resize) {
+ /* now go through live entries from least to greatest sequence number until
+ * either all live slots added, or the slot doesn't have enough room
+ * for SKIP_THRESHOLD consecutive entries*/
+ int skipcount = 0;
+ int64_t newestseqnum = buffer.getNewestSeqNum();
+ search:
+ for (; seqn <= newestseqnum; seqn++) {
+ Slot prevslot = buffer.getSlot(seqn);
+ //Push slot number forward
+ if (!seenliveslot)
+ oldestLiveSlotSequenceNumver = seqn;
+
+ if (!prevslot.isLive())
+ continue;
+ seenliveslot = true;
+ Vector<Entry> liveentries = prevslot.getLiveEntries(resize);
+ for (Entry liveentry : liveentries) {
+ if (s.hasSpace(liveentry))
+ s.addEntry(liveentry);
+ else {
+ skipcount++;
+ if (skipcount > SKIP_THRESHOLD)
+ break search;
+ }
+ }
+ }
+ }
+
+ /**
+ * Checks for malicious activity and updates the local copy of the block chain.
+ */
+ private void validateAndUpdate(Slot[] newSlots, bool acceptUpdatesToLocal) {
+
+ // The cloud communication layer has checked slot HMACs already before decoding
+ if (newSlots.length == 0) {
+ return;
+ }
+
+ // Make sure all slots are newer than the last largest slot this client has seen
+ int64_t firstSeqNum = newSlots[0].getSequenceNumber();
+ if (firstSeqNum <= sequenceNumber) {
+ throw new Error("Server Error: Sent older slots!");
+ }
+
+ // Create an object that can access both new slots and slots in our local chain
+ // without committing slots to our local chain
+ SlotIndexer indexer = new SlotIndexer(newSlots, buffer);
+
+ // Check that the HMAC chain is not broken
+ checkHMACChain(indexer, newSlots);
+
+ // Set to keep track of messages from clients
+ HashSet<Long> machineSet = new HashSet<Long>(lastMessageTable.keySet());
+
+ // Process each slots data
+ for (Slot slot : newSlots) {
+ processSlot(indexer, slot, acceptUpdatesToLocal, machineSet);
+
+ updateExpectedSize();
+ }
+
+ // If there is a gap, check to see if the server sent us everything.
+ if (firstSeqNum != (sequenceNumber + 1)) {
+
+ // Check the size of the slots that were sent down by the server.
+ // Can only check the size if there was a gap
+ checkNumSlots(newSlots.length);
+
+ // Since there was a gap every machine must have pushed a slot or must have
+ // a last message message. If not then the server is hiding slots
+ if (!machineSet.isEmpty()) {
+ throw new Error("Missing record for machines: " + machineSet);
+ }
+ }
+
+ // Update the size of our local block chain.
+ commitNewMaxSize();
+
+ // Commit new to slots to the local block chain.
+ for (Slot slot : newSlots) {
+
+ // Insert this slot into our local block chain copy.
+ buffer.putSlot(slot);
+
+ // Keep track of how many slots are currently live (have live data in them).
+ liveSlotCount++;
+ }
+
+ // Get the sequence number of the latest slot in the system
+ sequenceNumber = newSlots[newSlots.length - 1].getSequenceNumber();
+
+ updateLiveStateFromServer();
+
+ // No Need to remember after we pulled from the server
+ offlineTransactionsCommittedAndAtServer.clear();
+
+ // This is invalidated now
+ hadPartialSendToServer = false;
+ }
+
+ private void updateLiveStateFromServer() {
+ // Process the new transaction parts
+ processNewTransactionParts();
+
+ // Do arbitration on new transactions that were received
+ arbitrateFromServer();
+
+ // Update all the committed keys
+ bool didCommitOrSpeculate = updateCommittedTable();
+
+ // Delete the transactions that are now dead
+ updateLiveTransactionsAndStatus();
+
+ // Do speculations
+ didCommitOrSpeculate |= updateSpeculativeTable(didCommitOrSpeculate);
+ updatePendingTransactionSpeculativeTable(didCommitOrSpeculate);
+ }
+
+ private void updateLiveStateFromLocal() {
+ // Update all the committed keys
+ bool didCommitOrSpeculate = updateCommittedTable();
+
+ // Delete the transactions that are now dead
+ updateLiveTransactionsAndStatus();
+
+ // Do speculations
+ didCommitOrSpeculate |= updateSpeculativeTable(didCommitOrSpeculate);
+ updatePendingTransactionSpeculativeTable(didCommitOrSpeculate);
+ }
+
+ private void initExpectedSize(int64_t firstSequenceNumber, int64_t numberOfSlots) {
+ // if (didFindTableStatus) {
+ // return;
+ // }
+ int64_t prevslots = firstSequenceNumber;
+
+
+ if (didFindTableStatus) {
+ // expectedsize = (prevslots < ((int64_t) numberOfSlots)) ? (int) prevslots : expectedsize;
+ // System.out.println("Here2: " + expectedsize + " " + numberOfSlots + " " + prevslots);
+
+ } else {
+ expectedsize = (prevslots < ((int64_t) numberOfSlots)) ? (int) prevslots : numberOfSlots;
+ // System.out.println("Here: " + expectedsize);
+ }
+
+ // System.out.println(numberOfSlots);
+
+ didFindTableStatus = true;
+ currMaxSize = numberOfSlots;
+ }
+
+ private void updateExpectedSize() {
+ expectedsize++;
+
+ if (expectedsize > currMaxSize) {
+ expectedsize = currMaxSize;
+ }
+ }
+
+
+ /**
+ * Check the size of the block chain to make sure there are enough slots sent back by the server.
+ * This is only called when we have a gap between the slots that we have locally and the slots
+ * sent by the server therefore in the slots sent by the server there will be at least 1 Table
+ * status message
+ */
+ private void checkNumSlots(int numberOfSlots) {
+ if (numberOfSlots != expectedsize) {
+ throw new Error("Server Error: Server did not send all slots. Expected: " + expectedsize + " Received:" + numberOfSlots);
+ }
+ }
+
+ private void updateCurrMaxSize(int newmaxsize) {
+ currMaxSize = newmaxsize;
+ }
+
+
+ /**
+ * Update the size of of the local buffer if it is needed.
+ */
+ private void commitNewMaxSize() {
+ didFindTableStatus = false;
+
+ // Resize the local slot buffer
+ if (numberOfSlots != currMaxSize) {
+ buffer.resize((int)currMaxSize);
+ }
+
+ // Change the number of local slots to the new size
+ numberOfSlots = (int)currMaxSize;
+
+
+ // Recalculate the resize threshold since the size of the local buffer has changed
+ setResizeThreshold();
+ }
+
+ /**
+ * Process the new transaction parts from this latest round of slots received from the server
+ */
+ private void processNewTransactionParts() {
+
+ if (newTransactionParts.size() == 0) {
+ // Nothing new to process
+ return;
+ }
+
+ // Iterate through all the machine Ids that we received new parts for
+ for (Long machineId : newTransactionParts.keySet()) {
+ Map<Pair<Long, Integer>, TransactionPart> parts = newTransactionParts.get(machineId);
+
+ // Iterate through all the parts for that machine Id
+ for (Pair<Long, Integer> partId : parts.keySet()) {
+ TransactionPart part = parts.get(partId);
+
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(part.getArbitratorId());
+ if ((lastTransactionNumber != NULL) && (lastTransactionNumber >= part.getSequenceNumber())) {
+ // Set dead the transaction part
+ part.setDead();
+ continue;
+ }
+
+ // Get the transaction object for that sequence number
+ Transaction transaction = liveTransactionBySequenceNumberTable.get(part.getSequenceNumber());
+
+ if (transaction == NULL) {
+ // This is a new transaction that we dont have so make a new one
+ transaction = new Transaction();
+
+ // Insert this new transaction into the live tables
+ liveTransactionBySequenceNumberTable.put(part.getSequenceNumber(), transaction);
+ liveTransactionByTransactionIdTable.put(part.getTransactionId(), transaction);
+ }
+
+ // Add that part to the transaction
+ transaction.addPartDecode(part);
+ }
+ }
+
+ // Clear all the new transaction parts in preparation for the next time the server sends slots
+ newTransactionParts.clear();
+ }
+
+
+ private int64_t lastSeqNumArbOn = 0;
+
+ private void arbitrateFromServer() {
+
+ if (liveTransactionBySequenceNumberTable.size() == 0) {
+ // Nothing to arbitrate on so move on
+ return;
+ }
+
+ // Get the transaction sequence numbers and sort from oldest to newest
+ List<Long> transactionSequenceNumbers = new ArrayList<Long>(liveTransactionBySequenceNumberTable.keySet());
+ Collections.sort(transactionSequenceNumbers);
+
+ // Collection of key value pairs that are
+ Map<IoTString, KeyValue> speculativeTableTmp = new HashMap<IoTString, KeyValue>();
+
+ // The last transaction arbitrated on
+ int64_t lastTransactionCommitted = -1;
+ Set<Abort> generatedAborts = new HashSet<Abort>();
+
+ for (Long transactionSequenceNumber : transactionSequenceNumbers) {
+ Transaction transaction = liveTransactionBySequenceNumberTable.get(transactionSequenceNumber);
+
+
+
+ // Check if this machine arbitrates for this transaction if not then we cant arbitrate this transaction
+ if (transaction.getArbitrator() != localMachineId) {
+ continue;
+ }
+
+ if (transactionSequenceNumber < lastSeqNumArbOn) {
+ continue;
+ }
+
+ if (offlineTransactionsCommittedAndAtServer.contains(transaction.getId())) {
+ // We have seen this already locally so dont commit again
+ continue;
+ }
+
+
+ if (!transaction.isComplete()) {
+ // Will arbitrate in incorrect order if we continue so just break
+ // Most likely this
+ break;
+ }
+
+
+ // update the largest transaction seen by arbitrator from server
+ if (lastTransactionSeenFromMachineFromServer.get(transaction.getMachineId()) == NULL) {
+ lastTransactionSeenFromMachineFromServer.put(transaction.getMachineId(), transaction.getClientLocalSequenceNumber());
+ } else {
+ Long lastTransactionSeenFromMachine = lastTransactionSeenFromMachineFromServer.get(transaction.getMachineId());
+ if (transaction.getClientLocalSequenceNumber() > lastTransactionSeenFromMachine) {
+ lastTransactionSeenFromMachineFromServer.put(transaction.getMachineId(), transaction.getClientLocalSequenceNumber());
+ }
+ }
+
+ if (transaction.evaluateGuard(committedKeyValueTable, speculativeTableTmp, NULL)) {
+ // Guard evaluated as true
+
+ // Update the local changes so we can make the commit
+ for (KeyValue kv : transaction.getKeyValueUpdateSet()) {
+ speculativeTableTmp.put(kv.getKey(), kv);
+ }
+
+ // Update what the last transaction committed was for use in batch commit
+ lastTransactionCommitted = transactionSequenceNumber;
+ } else {
+ // Guard evaluated was false so create abort
+
+ // Create the abort
+ Abort newAbort = new Abort(NULL,
+ transaction.getClientLocalSequenceNumber(),
+ transaction.getSequenceNumber(),
+ transaction.getMachineId(),
+ transaction.getArbitrator(),
+ localArbitrationSequenceNumber);
+ localArbitrationSequenceNumber++;
+
+ generatedAborts.add(newAbort);
+
+ // Insert the abort so we can process
+ processEntry(newAbort);
+ }
+
+ lastSeqNumArbOn = transactionSequenceNumber;
+
+ // liveTransactionBySequenceNumberTable.remove(transactionSequenceNumber);
+ }
+
+ Commit newCommit = NULL;
+
+ // If there is something to commit
+ if (speculativeTableTmp.size() != 0) {
+
+ // Create the commit and increment the commit sequence number
+ newCommit = new Commit(localArbitrationSequenceNumber, localMachineId, lastTransactionCommitted);
+ localArbitrationSequenceNumber++;
+
+ // Add all the new keys to the commit
+ for (KeyValue kv : speculativeTableTmp.values()) {
+ newCommit.addKV(kv);
+ }
+
+ // create the commit parts
+ newCommit.createCommitParts();
+
+ // Append all the commit parts to the end of the pending queue waiting for sending to the server
+
+ // Insert the commit so we can process it
+ for (CommitPart commitPart : newCommit.getParts().values()) {
+ processEntry(commitPart);
+ }
+ }
+
+ if ((newCommit != NULL) || (generatedAborts.size() > 0)) {
+ ArbitrationRound arbitrationRound = new ArbitrationRound(newCommit, generatedAborts);
+ pendingSendArbitrationRounds.add(arbitrationRound);
+
+ if (compactArbitrationData()) {
+ ArbitrationRound newArbitrationRound = pendingSendArbitrationRounds.get(pendingSendArbitrationRounds.size() - 1);
+ if (newArbitrationRound.getCommit() != NULL) {
+ for (CommitPart commitPart : newArbitrationRound.getCommit().getParts().values()) {
+ processEntry(commitPart);
+ }
+ }
+ }
+ }
+ }
+
+ private Pair<Boolean, Boolean> arbitrateOnLocalTransaction(Transaction transaction) {
+
+ // Check if this machine arbitrates for this transaction if not then we cant arbitrate this transaction
+ if (transaction.getArbitrator() != localMachineId) {
+ return new Pair<Boolean, Boolean>(false, false);
+ }
+
+ if (!transaction.isComplete()) {
+ // Will arbitrate in incorrect order if we continue so just break
+ // Most likely this
+ return new Pair<Boolean, Boolean>(false, false);
+ }
+
+ if (transaction.getMachineId() != localMachineId) {
+ // dont do this check for local transactions
+ if (lastTransactionSeenFromMachineFromServer.get(transaction.getMachineId()) != NULL) {
+ if (lastTransactionSeenFromMachineFromServer.get(transaction.getMachineId()) > transaction.getClientLocalSequenceNumber()) {
+ // We've have already seen this from the server
+ return new Pair<Boolean, Boolean>(false, false);
+ }
+ }
+ }
+
+ if (transaction.evaluateGuard(committedKeyValueTable, NULL, NULL)) {
+ // Guard evaluated as true
+
+ // Create the commit and increment the commit sequence number
+ Commit newCommit = new Commit(localArbitrationSequenceNumber, localMachineId, -1);
+ localArbitrationSequenceNumber++;
+
+ // Update the local changes so we can make the commit
+ for (KeyValue kv : transaction.getKeyValueUpdateSet()) {
+ newCommit.addKV(kv);
+ }
+
+ // create the commit parts
+ newCommit.createCommitParts();
+
+ // Append all the commit parts to the end of the pending queue waiting for sending to the server
+ ArbitrationRound arbitrationRound = new ArbitrationRound(newCommit, new HashSet<Abort>());
+ pendingSendArbitrationRounds.add(arbitrationRound);
+
+ if (compactArbitrationData()) {
+ ArbitrationRound newArbitrationRound = pendingSendArbitrationRounds.get(pendingSendArbitrationRounds.size() - 1);
+ for (CommitPart commitPart : newArbitrationRound.getCommit().getParts().values()) {
+ processEntry(commitPart);
+ }
+ } else {
+ // Insert the commit so we can process it
+ for (CommitPart commitPart : newCommit.getParts().values()) {
+ processEntry(commitPart);
+ }
+ }
+
+ if (transaction.getMachineId() == localMachineId) {
+ TransactionStatus status = transaction.getTransactionStatus();
+ if (status != NULL) {
+ status.setStatus(TransactionStatus.StatusCommitted);
+ }
+ }
+
+ updateLiveStateFromLocal();
+ return new Pair<Boolean, Boolean>(true, true);
+ } else {
+
+ if (transaction.getMachineId() == localMachineId) {
+ // For locally created messages update the status
+
+ // Guard evaluated was false so create abort
+ TransactionStatus status = transaction.getTransactionStatus();
+ if (status != NULL) {
+ status.setStatus(TransactionStatus.StatusAborted);
+ }
+ } else {
+ Set addAbortSet = new HashSet<Abort>();
+
+
+ // Create the abort
+ Abort newAbort = new Abort(NULL,
+ transaction.getClientLocalSequenceNumber(),
+ -1,
+ transaction.getMachineId(),
+ transaction.getArbitrator(),
+ localArbitrationSequenceNumber);
+ localArbitrationSequenceNumber++;
+
+ addAbortSet.add(newAbort);
+
+
+ // Append all the commit parts to the end of the pending queue waiting for sending to the server
+ ArbitrationRound arbitrationRound = new ArbitrationRound(NULL, addAbortSet);
+ pendingSendArbitrationRounds.add(arbitrationRound);
+
+ if (compactArbitrationData()) {
+ ArbitrationRound newArbitrationRound = pendingSendArbitrationRounds.get(pendingSendArbitrationRounds.size() - 1);
+ for (CommitPart commitPart : newArbitrationRound.getCommit().getParts().values()) {
+ processEntry(commitPart);
+ }
+ }
+ }
+
+ updateLiveStateFromLocal();
+ return new Pair<Boolean, Boolean>(true, false);
+ }
+ }
+
+ /**
+ * Compacts the arbitration data my merging commits and aggregating aborts so that a single large push of commits can be done instead of many small updates
+ */
+ private bool compactArbitrationData() {
+
+ if (pendingSendArbitrationRounds.size() < 2) {
+ // Nothing to compact so do nothing
+ return false;
+ }
+
+ ArbitrationRound lastRound = pendingSendArbitrationRounds.get(pendingSendArbitrationRounds.size() - 1);
+ if (lastRound.didSendPart()) {
+ return false;
+ }
+
+ bool hadCommit = (lastRound.getCommit() == NULL);
+ bool gotNewCommit = false;
+
+ int numberToDelete = 1;
+ while (numberToDelete < pendingSendArbitrationRounds.size()) {
+ ArbitrationRound round = pendingSendArbitrationRounds.get(pendingSendArbitrationRounds.size() - numberToDelete - 1);
+
+ if (round.isFull() || round.didSendPart()) {
+ // Stop since there is a part that cannot be compacted and we need to compact in order
+ break;
+ }
+
+ if (round.getCommit() == NULL) {
+
+ // Try compacting aborts only
+ int newSize = round.getCurrentSize() + lastRound.getAbortsCount();
+ if (newSize > ArbitrationRound.MAX_PARTS) {
+ // Cant compact since it would be too large
+ break;
+ }
+ lastRound.addAborts(round.getAborts());
+ } else {
+
+ // Create a new larger commit
+ Commit newCommit = Commit.merge(lastRound.getCommit(), round.getCommit(), localArbitrationSequenceNumber);
+ localArbitrationSequenceNumber++;
+
+ // Create the commit parts so that we can count them
+ newCommit.createCommitParts();
+
+ // Calculate the new size of the parts
+ int newSize = newCommit.getNumberOfParts();
+ newSize += lastRound.getAbortsCount();
+ newSize += round.getAbortsCount();
+
+ if (newSize > ArbitrationRound.MAX_PARTS) {
+ // Cant compact since it would be too large
+ break;
+ }
+
+ // Set the new compacted part
+ lastRound.setCommit(newCommit);
+ lastRound.addAborts(round.getAborts());
+ gotNewCommit = true;
+ }
+
+ numberToDelete++;
+ }
+
+ if (numberToDelete != 1) {
+ // If there is a compaction
+
+ // Delete the previous pieces that are now in the new compacted piece
+ if (numberToDelete == pendingSendArbitrationRounds.size()) {
+ pendingSendArbitrationRounds.clear();
+ } else {
+ for (int i = 0; i < numberToDelete; i++) {
+ pendingSendArbitrationRounds.remove(pendingSendArbitrationRounds.size() - 1);
+ }
+ }
+
+ // Add the new compacted into the pending to send list
+ pendingSendArbitrationRounds.add(lastRound);
+
+ // Should reinsert into the commit processor
+ if (hadCommit && gotNewCommit) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+ // private bool compactArbitrationData() {
+ // return false;
+ // }
+
+ /**
+ * Update all the commits and the committed tables, sets dead the dead transactions
+ */
+ private bool updateCommittedTable() {
+
+ if (newCommitParts.size() == 0) {
+ // Nothing new to process
+ return false;
+ }
+
+ // Iterate through all the machine Ids that we received new parts for
+ for (Long machineId : newCommitParts.keySet()) {
+ Map<Pair<Long, Integer>, CommitPart> parts = newCommitParts.get(machineId);
+
+ // Iterate through all the parts for that machine Id
+ for (Pair<Long, Integer> partId : parts.keySet()) {
+ CommitPart part = parts.get(partId);
+
+ // Get the transaction object for that sequence number
+ Map<Long, Commit> commitForClientTable = liveCommitsTable.get(part.getMachineId());
+
+ if (commitForClientTable == NULL) {
+ // This is the first commit from this device
+ commitForClientTable = new HashMap<Long, Commit>();
+ liveCommitsTable.put(part.getMachineId(), commitForClientTable);
+ }
+
+ Commit commit = commitForClientTable.get(part.getSequenceNumber());
+
+ if (commit == NULL) {
+ // This is a new commit that we dont have so make a new one
+ commit = new Commit();
+
+ // Insert this new commit into the live tables
+ commitForClientTable.put(part.getSequenceNumber(), commit);
+ }
+
+ // Add that part to the commit
+ commit.addPartDecode(part);
+ }
+ }
+
+ // Clear all the new commits parts in preparation for the next time the server sends slots
+ newCommitParts.clear();
+
+ // If we process a new commit keep track of it for future use
+ bool didProcessANewCommit = false;
+
+ // Process the commits one by one
+ for (Long arbitratorId : liveCommitsTable.keySet()) {
+
+ // Get all the commits for a specific arbitrator
+ Map<Long, Commit> commitForClientTable = liveCommitsTable.get(arbitratorId);
+
+ // Sort the commits in order
+ List<Long> commitSequenceNumbers = new ArrayList<Long>(commitForClientTable.keySet());
+ Collections.sort(commitSequenceNumbers);
+
+ // Get the last commit seen from this arbitrator
+ int64_t lastCommitSeenSequenceNumber = -1;
+ if (lastCommitSeenSequenceNumberByArbitratorTable.get(arbitratorId) != NULL) {
+ lastCommitSeenSequenceNumber = lastCommitSeenSequenceNumberByArbitratorTable.get(arbitratorId);
+ }
+
+ // Go through each new commit one by one
+ for (int i = 0; i < commitSequenceNumbers.size(); i++) {
+ Long commitSequenceNumber = commitSequenceNumbers.get(i);
+ Commit commit = commitForClientTable.get(commitSequenceNumber);
+
+ // Special processing if a commit is not complete
+ if (!commit.isComplete()) {
+ if (i == (commitSequenceNumbers.size() - 1)) {
+ // If there is an incomplete commit and this commit is the latest one seen then this commit cannot be processed and there are no other commits
+ break;
+ } else {
+ // This is a commit that was already dead but parts of it are still in the block chain (not flushed out yet).
+ // Delete it and move on
+ commit.setDead();
+ commitForClientTable.remove(commit.getSequenceNumber());
+ continue;
+ }
+ }
+
+ // Update the last transaction that was updated if we can
+ if (commit.getTransactionSequenceNumber() != -1) {
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(commit.getMachineId());
+
+ // Update the last transaction sequence number that the arbitrator arbitrated on
+ if ((lastTransactionNumber == NULL) || (lastTransactionNumber < commit.getTransactionSequenceNumber())) {
+ lastArbitratedTransactionNumberByArbitratorTable.put(commit.getMachineId(), commit.getTransactionSequenceNumber());
+ }
+ }
+
+ // Update the last arbitration data that we have seen so far
+ if (lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(commit.getMachineId()) != NULL) {
+
+ int64_t lastArbitrationSequenceNumber = lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(commit.getMachineId());
+ if (commit.getSequenceNumber() > lastArbitrationSequenceNumber) {
+ // Is larger
+ lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.put(commit.getMachineId(), commit.getSequenceNumber());
+ }
+ } else {
+ // Never seen any data from this arbitrator so record the first one
+ lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.put(commit.getMachineId(), commit.getSequenceNumber());
+ }
+
+ // We have already seen this commit before so need to do the full processing on this commit
+ if (commit.getSequenceNumber() <= lastCommitSeenSequenceNumber) {
+
+ // Update the last transaction that was updated if we can
+ if (commit.getTransactionSequenceNumber() != -1) {
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(commit.getMachineId());
+
+ // Update the last transaction sequence number that the arbitrator arbitrated on
+ if ((lastTransactionNumber == NULL) || (lastTransactionNumber < commit.getTransactionSequenceNumber())) {
+ lastArbitratedTransactionNumberByArbitratorTable.put(commit.getMachineId(), commit.getTransactionSequenceNumber());
+ }
+ }
+
+ continue;
+ }
+
+ // If we got here then this is a brand new commit and needs full processing
+
+ // Get what commits should be edited, these are the commits that have live values for their keys
+ Set<Commit> commitsToEdit = new HashSet<Commit>();
+ for (KeyValue kv : commit.getKeyValueUpdateSet()) {
+ commitsToEdit.add(liveCommitsByKeyTable.get(kv.getKey()));
+ }
+ commitsToEdit.remove(NULL); // remove NULL since it could be in this set
+
+ // Update each previous commit that needs to be updated
+ for (Commit previousCommit : commitsToEdit) {
+
+ // Only bother with live commits (TODO: Maybe remove this check)
+ if (previousCommit.isLive()) {
+
+ // Update which keys in the old commits are still live
+ for (KeyValue kv : commit.getKeyValueUpdateSet()) {
+ previousCommit.invalidateKey(kv.getKey());
+ }
+
+ // if the commit is now dead then remove it
+ if (!previousCommit.isLive()) {
+ commitForClientTable.remove(previousCommit);
+ }
+ }
+ }
+
+ // Update the last seen sequence number from this arbitrator
+ if (lastCommitSeenSequenceNumberByArbitratorTable.get(commit.getMachineId()) != NULL) {
+ if (commit.getSequenceNumber() > lastCommitSeenSequenceNumberByArbitratorTable.get(commit.getMachineId())) {
+ lastCommitSeenSequenceNumberByArbitratorTable.put(commit.getMachineId(), commit.getSequenceNumber());
+ }
+ } else {
+ lastCommitSeenSequenceNumberByArbitratorTable.put(commit.getMachineId(), commit.getSequenceNumber());
+ }
+
+ // We processed a new commit that we havent seen before
+ didProcessANewCommit = true;
+
+ // Update the committed table of keys and which commit is using which key
+ for (KeyValue kv : commit.getKeyValueUpdateSet()) {
+ committedKeyValueTable.put(kv.getKey(), kv);
+ liveCommitsByKeyTable.put(kv.getKey(), commit);
+ }
+ }
+ }
+
+ return didProcessANewCommit;
+ }
+
+ /**
+ * Create the speculative table from transactions that are still live and have come from the cloud
+ */
+ private bool updateSpeculativeTable(bool didProcessNewCommits) {
+ if (liveTransactionBySequenceNumberTable.keySet().size() == 0) {
+ // There is nothing to speculate on
+ return false;
+ }
+
+ // Create a list of the transaction sequence numbers and sort them from oldest to newest
+ List<Long> transactionSequenceNumbersSorted = new ArrayList<Long>(liveTransactionBySequenceNumberTable.keySet());
+ Collections.sort(transactionSequenceNumbersSorted);
+
+ bool hasGapInTransactionSequenceNumbers = transactionSequenceNumbersSorted.get(0) != oldestTransactionSequenceNumberSpeculatedOn;
+
+
+ if (hasGapInTransactionSequenceNumbers || didProcessNewCommits) {
+ // If there is a gap in the transaction sequence numbers then there was a commit or an abort of a transaction
+ // OR there was a new commit (Could be from offline commit) so a redo the speculation from scratch
+
+ // Start from scratch
+ speculatedKeyValueTable.clear();
+ lastTransactionSequenceNumberSpeculatedOn = -1;
+ oldestTransactionSequenceNumberSpeculatedOn = -1;
+
+ }
+
+ // Remember the front of the transaction list
+ oldestTransactionSequenceNumberSpeculatedOn = transactionSequenceNumbersSorted.get(0);
+
+ // Find where to start arbitration from
+ int startIndex = transactionSequenceNumbersSorted.indexOf(lastTransactionSequenceNumberSpeculatedOn) + 1;
+
+ if (startIndex >= transactionSequenceNumbersSorted.size()) {
+ // Make sure we are not out of bounds
+ return false; // did not speculate
+ }
+
+ Set<Long> incompleteTransactionArbitrator = new HashSet<Long>();
+ bool didSkip = true;
+
+ for (int i = startIndex; i < transactionSequenceNumbersSorted.size(); i++) {
+ int64_t transactionSequenceNumber = transactionSequenceNumbersSorted.get(i);
+ Transaction transaction = liveTransactionBySequenceNumberTable.get(transactionSequenceNumber);
+
+ if (!transaction.isComplete()) {
+ // If there is an incomplete transaction then there is nothing we can do
+ // add this transactions arbitrator to the list of arbitrators we should ignore
+ incompleteTransactionArbitrator.add(transaction.getArbitrator());
+ didSkip = true;
+ continue;
+ }
+
+ if (incompleteTransactionArbitrator.contains(transaction.getArbitrator())) {
+ continue;
+ }
+
+ lastTransactionSequenceNumberSpeculatedOn = transactionSequenceNumber;
+
+ if (transaction.evaluateGuard(committedKeyValueTable, speculatedKeyValueTable, NULL)) {
+ // Guard evaluated to true so update the speculative table
+ for (KeyValue kv : transaction.getKeyValueUpdateSet()) {
+ speculatedKeyValueTable.put(kv.getKey(), kv);
+ }
+ }
+ }
+
+ if (didSkip) {
+ // Since there was a skip we need to redo the speculation next time around
+ lastTransactionSequenceNumberSpeculatedOn = -1;
+ oldestTransactionSequenceNumberSpeculatedOn = -1;
+ }
+
+ // We did some speculation
+ return true;
+ }
+
+ /**
+ * Create the pending transaction speculative table from transactions that are still in the pending transaction buffer
+ */
+ private void updatePendingTransactionSpeculativeTable(bool didProcessNewCommitsOrSpeculate) {
+ if (pendingTransactionQueue.size() == 0) {
+ // There is nothing to speculate on
+ return;
+ }
+
+
+ if (didProcessNewCommitsOrSpeculate || (firstPendingTransaction != pendingTransactionQueue.get(0))) {
+ // need to reset on the pending speculation
+ lastPendingTransactionSpeculatedOn = NULL;
+ firstPendingTransaction = pendingTransactionQueue.get(0);
+ pendingTransactionSpeculatedKeyValueTable.clear();
+ }
+
+ // Find where to start arbitration from
+ int startIndex = pendingTransactionQueue.indexOf(firstPendingTransaction) + 1;
+
+ if (startIndex >= pendingTransactionQueue.size()) {
+ // Make sure we are not out of bounds
+ return;
+ }
+
+ for (int i = startIndex; i < pendingTransactionQueue.size(); i++) {
+ Transaction transaction = pendingTransactionQueue.get(i);
+
+ lastPendingTransactionSpeculatedOn = transaction;
+
+ if (transaction.evaluateGuard(committedKeyValueTable, speculatedKeyValueTable, pendingTransactionSpeculatedKeyValueTable)) {
+ // Guard evaluated to true so update the speculative table
+ for (KeyValue kv : transaction.getKeyValueUpdateSet()) {
+ pendingTransactionSpeculatedKeyValueTable.put(kv.getKey(), kv);
+ }
+ }
+ }
+ }
+
+ /**
+ * Set dead and remove from the live transaction tables the transactions that are dead
+ */
+ private void updateLiveTransactionsAndStatus() {
+
+ // Go through each of the transactions
+ for (Iterator<Map.Entry<Long, Transaction>> iter = liveTransactionBySequenceNumberTable.entrySet().iterator(); iter.hasNext();) {
+ Transaction transaction = iter.next().getValue();
+
+ // Check if the transaction is dead
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(transaction.getArbitrator());
+ if ((lastTransactionNumber != NULL) && (lastTransactionNumber >= transaction.getSequenceNumber())) {
+
+ // Set dead the transaction
+ transaction.setDead();
+
+ // Remove the transaction from the live table
+ iter.remove();
+ liveTransactionByTransactionIdTable.remove(transaction.getId());
+ }
+ }
+
+ // Go through each of the transactions
+ for (Iterator<Map.Entry<Long, TransactionStatus>> iter = outstandingTransactionStatus.entrySet().iterator(); iter.hasNext();) {
+ TransactionStatus status = iter.next().getValue();
+
+ // Check if the transaction is dead
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(status.getTransactionArbitrator());
+ if ((lastTransactionNumber != NULL) && (lastTransactionNumber >= status.getTransactionSequenceNumber())) {
+
+ // Set committed
+ status.setStatus(TransactionStatus.StatusCommitted);
+
+ // Remove
+ iter.remove();
+ }
+ }
+ }
+
+ /**
+ * Process this slot, entry by entry. Also update the latest message sent by slot
+ */
+ private void processSlot(SlotIndexer indexer, Slot slot, bool acceptUpdatesToLocal, HashSet<Long> machineSet) {
+
+ // Update the last message seen
+ updateLastMessage(slot.getMachineID(), slot.getSequenceNumber(), slot, acceptUpdatesToLocal, machineSet);
+
+ // Process each entry in the slot
+ for (Entry entry : slot.getEntries()) {
+ switch (entry.getType()) {
+
+ case Entry.TypeCommitPart:
+ processEntry((CommitPart)entry);
+ break;
+
+ case Entry.TypeAbort:
+ processEntry((Abort)entry);
+ break;
+
+ case Entry.TypeTransactionPart:
+ processEntry((TransactionPart)entry);
+ break;
+
+ case Entry.TypeNewKey:
+ processEntry((NewKey)entry);
+ break;
+
+ case Entry.TypeLastMessage:
+ processEntry((LastMessage)entry, machineSet);
+ break;
+
+ case Entry.TypeRejectedMessage:
+ processEntry((RejectedMessage)entry, indexer);
+ break;
+
+ case Entry.TypeTableStatus:
+ processEntry((TableStatus)entry, slot.getSequenceNumber());
+ break;
+
+ default:
+ throw new Error("Unrecognized type: " + entry.getType());
+ }
+ }
+ }
+
+ /**
+ * Update the last message that was sent for a machine Id
+ */
+ private void processEntry(LastMessage entry, HashSet<Long> machineSet) {
+ // Update what the last message received by a machine was
+ updateLastMessage(entry.getMachineID(), entry.getSequenceNumber(), entry, false, machineSet);
+ }
+
+ /**
+ * Add the new key to the arbitrators table and update the set of live new keys (in case of a rescued new key message)
+ */
+ private void processEntry(NewKey entry) {
+
+ // Update the arbitrator table with the new key information
+ arbitratorTable.put(entry.getKey(), entry.getMachineID());
+
+ // Update what the latest live new key is
+ NewKey oldNewKey = liveNewKeyTable.put(entry.getKey(), entry);
+ if (oldNewKey != NULL) {
+ // Delete the old new key messages
+ oldNewKey.setDead();
+ }
+ }
+
+ /**
+ * Process new table status entries and set dead the old ones as new ones come in.
+ * keeps track of the largest and smallest table status seen in this current round
+ * of updating the local copy of the block chain
+ */
+ private void processEntry(TableStatus entry, int64_t seq) {
+ int newNumSlots = entry.getMaxSlots();
+ updateCurrMaxSize(newNumSlots);
+
+ initExpectedSize(seq, newNumSlots);
+
+ if (liveTableStatus != NULL) {
+ // We have a larger table status so the old table status is no int64_ter alive
+ liveTableStatus.setDead();
+ }
+
+ // Make this new table status the latest alive table status
+ liveTableStatus = entry;
+ }
+
+ /**
+ * Check old messages to see if there is a block chain violation. Also
+ */
+ private void processEntry(RejectedMessage entry, SlotIndexer indexer) {
+ int64_t oldSeqNum = entry.getOldSeqNum();
+ int64_t newSeqNum = entry.getNewSeqNum();
+ bool isequal = entry.getEqual();
+ int64_t machineId = entry.getMachineID();
+ int64_t seq = entry.getSequenceNumber();
+
+
+ // Check if we have messages that were supposed to be rejected in our local block chain
+ for (int64_t seqNum = oldSeqNum; seqNum <= newSeqNum; seqNum++) {
+
+ // Get the slot
+ Slot slot = indexer.getSlot(seqNum);
+
+ if (slot != NULL) {
+ // If we have this slot make sure that it was not supposed to be a rejected slot
+
+ int64_t slotMachineId = slot.getMachineID();
+ if (isequal != (slotMachineId == machineId)) {
+ throw new Error("Server Error: Trying to insert rejected message for slot " + seqNum);
+ }
+ }
+ }
+
+
+ // Create a list of clients to watch until they see this rejected message entry.
+ HashSet<Long> deviceWatchSet = new HashSet<Long>();
+ for (Map.Entry<Long, Pair<Long, Liveness>> lastMessageEntry : lastMessageTable.entrySet()) {
+
+ // Machine ID for the last message entry
+ int64_t lastMessageEntryMachineId = lastMessageEntry.getKey();
+
+ // We've seen it, don't need to continue to watch. Our next
+ // message will implicitly acknowledge it.
+ if (lastMessageEntryMachineId == localMachineId) {
+ continue;
+ }
+
+ Pair<Long, Liveness> lastMessageValue = lastMessageEntry.getValue();
+ int64_t entrySequenceNumber = lastMessageValue.getFirst();
+
+ if (entrySequenceNumber < seq) {
+
+ // Add this rejected message to the set of messages that this machine ID did not see yet
+ addWatchList(lastMessageEntryMachineId, entry);
+
+ // This client did not see this rejected message yet so add it to the watch set to monitor
+ deviceWatchSet.add(lastMessageEntryMachineId);
+ }
+ }
+
+ if (deviceWatchSet.isEmpty()) {
+ // This rejected message has been seen by all the clients so
+ entry.setDead();
+ } else {
+ // We need to watch this rejected message
+ entry.setWatchSet(deviceWatchSet);
+ }
+ }
+
+ /**
+ * Check if this abort is live, if not then save it so we can kill it later.
+ * update the last transaction number that was arbitrated on.
+ */
+ private void processEntry(Abort entry) {
+
+
+ if (entry.getTransactionSequenceNumber() != -1) {
+ // update the transaction status if it was sent to the server
+ TransactionStatus status = outstandingTransactionStatus.remove(entry.getTransactionSequenceNumber());
+ if (status != NULL) {
+ status.setStatus(TransactionStatus.StatusAborted);
+ }
+ }
+
+ // Abort has not been seen by the client it is for yet so we need to keep track of it
+ Abort previouslySeenAbort = liveAbortTable.put(entry.getAbortId(), entry);
+ if (previouslySeenAbort != NULL) {
+ previouslySeenAbort.setDead(); // Delete old version of the abort since we got a rescued newer version
+ }
+
+ if (entry.getTransactionArbitrator() == localMachineId) {
+ liveAbortsGeneratedByLocal.put(entry.getArbitratorLocalSequenceNumber(), entry);
+ }
+
+ if ((entry.getSequenceNumber() != -1) && (lastMessageTable.get(entry.getTransactionMachineId()).getFirst() >= entry.getSequenceNumber())) {
+
+ // The machine already saw this so it is dead
+ entry.setDead();
+ liveAbortTable.remove(entry.getAbortId());
+
+ if (entry.getTransactionArbitrator() == localMachineId) {
+ liveAbortsGeneratedByLocal.remove(entry.getArbitratorLocalSequenceNumber());
+ }
+
+ return;
+ }
+
+
+
+
+ // Update the last arbitration data that we have seen so far
+ if (lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(entry.getTransactionArbitrator()) != NULL) {
+
+ int64_t lastArbitrationSequenceNumber = lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.get(entry.getTransactionArbitrator());
+ if (entry.getSequenceNumber() > lastArbitrationSequenceNumber) {
+ // Is larger
+ lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.put(entry.getTransactionArbitrator(), entry.getSequenceNumber());
+ }
+ } else {
+ // Never seen any data from this arbitrator so record the first one
+ lastArbitrationDataLocalSequenceNumberSeenFromArbitrator.put(entry.getTransactionArbitrator(), entry.getSequenceNumber());
+ }
+
+
+ // Set dead a transaction if we can
+ Transaction transactionToSetDead = liveTransactionByTransactionIdTable.remove(new Pair<Long, Long>(entry.getTransactionMachineId(), entry.getTransactionClientLocalSequenceNumber()));
+ if (transactionToSetDead != NULL) {
+ liveTransactionBySequenceNumberTable.remove(transactionToSetDead.getSequenceNumber());
+ }
+
+ // Update the last transaction sequence number that the arbitrator arbitrated on
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(entry.getTransactionArbitrator());
+ if ((lastTransactionNumber == NULL) || (lastTransactionNumber < entry.getTransactionSequenceNumber())) {
+
+ // Is a valid one
+ if (entry.getTransactionSequenceNumber() != -1) {
+ lastArbitratedTransactionNumberByArbitratorTable.put(entry.getTransactionArbitrator(), entry.getTransactionSequenceNumber());
+ }
+ }
+ }
+
+ /**
+ * Set dead the transaction part if that transaction is dead and keep track of all new parts
+ */
+ private void processEntry(TransactionPart entry) {
+ // Check if we have already seen this transaction and set it dead OR if it is not alive
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(entry.getArbitratorId());
+ if ((lastTransactionNumber != NULL) && (lastTransactionNumber >= entry.getSequenceNumber())) {
+ // This transaction is dead, it was already committed or aborted
+ entry.setDead();
+ return;
+ }
+
+ // This part is still alive
+ Map<Pair<Long, Integer>, TransactionPart> transactionPart = newTransactionParts.get(entry.getMachineId());
+
+ if (transactionPart == NULL) {
+ // Dont have a table for this machine Id yet so make one
+ transactionPart = new HashMap<Pair<Long, Integer>, TransactionPart>();
+ newTransactionParts.put(entry.getMachineId(), transactionPart);
+ }
+
+ // Update the part and set dead ones we have already seen (got a rescued version)
+ TransactionPart previouslySeenPart = transactionPart.put(entry.getPartId(), entry);
+ if (previouslySeenPart != NULL) {
+ previouslySeenPart.setDead();
+ }
+ }
+
+ /**
+ * Process new commit entries and save them for future use. Delete duplicates
+ */
+ private void processEntry(CommitPart entry) {
+
+
+ // Update the last transaction that was updated if we can
+ if (entry.getTransactionSequenceNumber() != -1) {
+ Long lastTransactionNumber = lastArbitratedTransactionNumberByArbitratorTable.get(entry.getMachineId());
+
+ // Update the last transaction sequence number that the arbitrator arbitrated on
+ if ((lastTransactionNumber == NULL) || (lastTransactionNumber < entry.getTransactionSequenceNumber())) {
+ lastArbitratedTransactionNumberByArbitratorTable.put(entry.getMachineId(), entry.getTransactionSequenceNumber());
+ }
+ }
+
+
+
+
+ Map<Pair<Long, Integer>, CommitPart> commitPart = newCommitParts.get(entry.getMachineId());
+
+ if (commitPart == NULL) {
+ // Don't have a table for this machine Id yet so make one
+ commitPart = new HashMap<Pair<Long, Integer>, CommitPart>();
+ newCommitParts.put(entry.getMachineId(), commitPart);
+ }
+
+ // Update the part and set dead ones we have already seen (got a rescued version)
+ CommitPart previouslySeenPart = commitPart.put(entry.getPartId(), entry);
+ if (previouslySeenPart != NULL) {
+ previouslySeenPart.setDead();
+ }
+ }
+
+ /**
+ * Update the last message seen table. Update and set dead the appropriate RejectedMessages as clients see them.
+ * Updates the live aborts, removes those that are dead and sets them dead.
+ * Check that the last message seen is correct and that there is no mismatch of our own last message or that
+ * other clients have not had a rollback on the last message.
+ */
+ private void updateLastMessage(int64_t machineId, int64_t seqNum, Liveness liveness, bool acceptUpdatesToLocal, HashSet<Long> machineSet) {
+
+ // We have seen this machine ID
+ machineSet.remove(machineId);
+
+ // Get the set of rejected messages that this machine Id is has not seen yet
+ HashSet<RejectedMessage> watchset = rejectedMessageWatchListTable.get(machineId);
+
+ // If there is a rejected message that this machine Id has not seen yet
+ if (watchset != NULL) {
+
+ // Go through each rejected message that this machine Id has not seen yet
+ for (Iterator<RejectedMessage> rmit = watchset.iterator(); rmit.hasNext(); ) {
+
+ RejectedMessage rm = rmit.next();
+
+ // If this machine Id has seen this rejected message...
+ if (rm.getSequenceNumber() <= seqNum) {
+
+ // Remove it from our watchlist
+ rmit.remove();
+
+ // Decrement machines that need to see this notification
+ rm.removeWatcher(machineId);
+ }
+ }
+ }
+
+ // Set dead the abort
+ for (Iterator<Map.Entry<Pair<Long, Long>, Abort>> i = liveAbortTable.entrySet().iterator(); i.hasNext();) {
+ Abort abort = i.next().getValue();
+
+ if ((abort.getTransactionMachineId() == machineId) && (abort.getSequenceNumber() <= seqNum)) {
+ abort.setDead();
+ i.remove();
+
+ if (abort.getTransactionArbitrator() == localMachineId) {
+ liveAbortsGeneratedByLocal.remove(abort.getArbitratorLocalSequenceNumber());
+ }
+ }
+ }
+
+
+
+ if (machineId == localMachineId) {
+ // Our own messages are immediately dead.
+ if (liveness instanceof LastMessage) {
+ ((LastMessage)liveness).setDead();
+ } else if (liveness instanceof Slot) {
+ ((Slot)liveness).setDead();
+ } else {
+ throw new Error("Unrecognized type");
+ }
+ }
+
+ // Get the old last message for this device
+ Pair<Long, Liveness> lastMessageEntry = lastMessageTable.put(machineId, new Pair<Long, Liveness>(seqNum, liveness));
+ if (lastMessageEntry == NULL) {
+ // If no last message then there is nothing else to process
+ return;
+ }
+
+ int64_t lastMessageSeqNum = lastMessageEntry.getFirst();
+ Liveness lastEntry = lastMessageEntry.getSecond();
+
+ // If it is not our machine Id since we already set ours to dead
+ if (machineId != localMachineId) {
+ if (lastEntry instanceof LastMessage) {
+ ((LastMessage)lastEntry).setDead();
+ } else if (lastEntry instanceof Slot) {
+ ((Slot)lastEntry).setDead();
+ } else {
+ throw new Error("Unrecognized type");
+ }
+ }
+
+ // Make sure the server is not playing any games
+ if (machineId == localMachineId) {
+
+ if (hadPartialSendToServer) {
+ // We were not making any updates and we had a machine mismatch
+ if (lastMessageSeqNum > seqNum && !acceptUpdatesToLocal) {
+ throw new Error("Server Error: Mismatch on local machine sequence number, needed at least: " + lastMessageSeqNum + " got: " + seqNum);
+ }
+
+ } else {
+ // We were not making any updates and we had a machine mismatch
+ if (lastMessageSeqNum != seqNum && !acceptUpdatesToLocal) {
+ throw new Error("Server Error: Mismatch on local machine sequence number, needed: " + lastMessageSeqNum + " got: " + seqNum);
+ }
+ }
+ } else {
+ if (lastMessageSeqNum > seqNum) {
+ throw new Error("Server Error: Rollback on remote machine sequence number");
+ }
+ }
+ }
+
+ /**
+ * Add a rejected message entry to the watch set to keep track of which clients have seen that
+ * rejected message entry and which have not.
+ */
+ private void addWatchList(int64_t machineId, RejectedMessage entry) {
+ HashSet<RejectedMessage> entries = rejectedMessageWatchListTable.get(machineId);
+ if (entries == NULL) {
+ // There is no set for this machine ID yet so create one
+ entries = new HashSet<RejectedMessage>();
+ rejectedMessageWatchListTable.put(machineId, entries);
+ }
+ entries.add(entry);
+ }
+
+ /**
+ * Check if the HMAC chain is not violated
+ */
+ private void checkHMACChain(SlotIndexer indexer, Slot[] newSlots) {
+ for (int i = 0; i < newSlots.length; i++) {
+ Slot currSlot = newSlots[i];
+ Slot prevSlot = indexer.getSlot(currSlot.getSequenceNumber() - 1);
+ if (prevSlot != NULL &&
+ !Arrays.equals(prevSlot.getHMAC(), currSlot.getPrevHMAC()))
+ throw new Error("Server Error: Invalid HMAC Chain" + currSlot + " " + prevSlot);
+ }
+ }
+}
--- /dev/null
+
+/**
+ * TableStatus entries record the current size of the data structure
+ * in slots. Used to remember the size and to perform resizes.
+ * @author Brian Demsky
+ * @version 1.0
+ */
+
+
+class TableStatus extends Entry {
+ int maxslots;
+
+ TableStatus(Slot slot, int _maxslots) {
+ super(slot);
+ maxslots=_maxslots;
+ }
+
+ int getMaxSlots() {
+ return maxslots;
+ }
+
+ static Entry decode(Slot slot, ByteBuffer bb) {
+ int maxslots=bb.getInt();
+ return new TableStatus(slot, maxslots);
+ }
+
+ void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeTableStatus);
+ bb.putInt(maxslots);
+ }
+
+ int getSize() {
+ return sizeof(int32_t)+sizeof(char);
+ }
+
+ char getType() {
+ return Entry.TypeTableStatus;
+ }
+
+ Entry getCopy(Slot s) {
+ return new TableStatus(s, maxslots);
+ }
+}
--- /dev/null
+
+/**
+ * TableStatus entries record the current size of the data structure
+ * in slots. Used to remember the size and to perform resizes.
+ * @author Brian Demsky
+ * @version 1.0
+ */
+
+
+class TableStatus extends Entry {
+ private int maxslots;
+
+ TableStatus(Slot slot, int _maxslots) {
+ super(slot);
+ maxslots=_maxslots;
+ }
+
+ int getMaxSlots() {
+ return maxslots;
+ }
+
+ static Entry decode(Slot slot, ByteBuffer bb) {
+ int maxslots=bb.getInt();
+ return new TableStatus(slot, maxslots);
+ }
+
+ void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeTableStatus);
+ bb.putInt(maxslots);
+ }
+
+ int getSize() {
+ return sizeof(int32_t)+sizeof(char);
+ }
+
+ char getType() {
+ return Entry.TypeTableStatus;
+ }
+
+ Entry getCopy(Slot s) {
+ return new TableStatus(s, maxslots);
+ }
+}
--- /dev/null
+
+class ThreeTuple<A, B, C> {
+ A a;
+ B b;
+ C c;
+
+ ThreeTuple(A a, B b, C c) {
+ this.a = a;
+ this.b = b;
+ this.c = c;
+ }
+
+ A getFirst() {
+ return a;
+ }
+
+ B getSecond() {
+ return b;
+ }
+
+ C getThird() {
+ return c;
+ }
+
+ String toString() {
+ return "<" + a + "," + b + "," + c + ">";
+ }
+}
--- /dev/null
+
+class ThreeTuple<A, B, C> {
+ private A a;
+ private B b;
+ private C c;
+
+ ThreeTuple(A a, B b, C c) {
+ this.a = a;
+ this.b = b;
+ this.c = c;
+ }
+
+ A getFirst() {
+ return a;
+ }
+
+ B getSecond() {
+ return b;
+ }
+
+ C getThird() {
+ return c;
+ }
+
+ public String toString() {
+ return "<" + a + "," + b + "," + c + ">";
+ }
+}
--- /dev/null
+
+
+class TimingSingleton {
+ static TimingSingleton singleton = new TimingSingleton( );
+ static int64_t startTime = 0;
+
+ static int64_t totalTime = 0;
+
+ TimingSingleton() {
+
+ }
+
+ static TimingSingleton getInstance( ) {
+ return singleton;
+ }
+
+
+ static void startTime( ) {
+ startTime = System.nanoTime();
+ }
+
+ static void endTime( ) {
+ totalTime += System.nanoTime() - startTime;
+
+ }
+
+ static int64_t getTime( ) {
+ return totalTime;
+ }
+
+
+}
--- /dev/null
+
+
+class TimingSingleton {
+ private static TimingSingleton singleton = new TimingSingleton( );
+ private static int64_t startTime = 0;
+
+ private static int64_t totalTime = 0;
+
+ private TimingSingleton() {
+
+ }
+
+ public static TimingSingleton getInstance( ) {
+ return singleton;
+ }
+
+
+ public static void startTime( ) {
+ startTime = System.nanoTime();
+ }
+
+ public static void endTime( ) {
+ totalTime += System.nanoTime() - startTime;
+
+ }
+
+ public static int64_t getTime( ) {
+ return totalTime;
+ }
+
+
+}
--- /dev/null
+
+
+class Transaction {
+
+ Map<Integer, TransactionPart> parts = NULL;
+ Set<Integer> missingParts = NULL;
+ List<Integer> partsPendingSend = NULL;
+ bool isComplete = false;
+ bool hasLastPart = false;
+ Set<KeyValue> keyValueGuardSet = NULL;
+ Set<KeyValue> keyValueUpdateSet = NULL;
+ bool isDead = false;
+ int64_t sequenceNumber = -1;
+ int64_t clientLocalSequenceNumber = -1;
+ int64_t arbitratorId = -1;
+ int64_t machineId = -1;
+ Pair<Long, Long> transactionId = NULL;
+
+ int nextPartToSend = 0;
+ bool didSendAPartToServer = false;
+
+ TransactionStatus transactionStatus = NULL;
+
+ bool hadServerFailure = false;
+
+ Transaction() {
+ parts = new HashMap<Integer, TransactionPart>();
+ keyValueGuardSet = new HashSet<KeyValue>();
+ keyValueUpdateSet = new HashSet<KeyValue>();
+ partsPendingSend = new ArrayList<Integer>();
+ }
+
+ void addPartEncode(TransactionPart newPart) {
+ parts.put(newPart.getPartNumber(), newPart);
+ partsPendingSend.add(newPart.getPartNumber());
+
+ sequenceNumber = newPart.getSequenceNumber();
+ arbitratorId = newPart.getArbitratorId();
+ transactionId = newPart.getTransactionId();
+ clientLocalSequenceNumber = newPart.getClientLocalSequenceNumber();
+ machineId = newPart.getMachineId();
+
+ isComplete = true;
+ }
+
+ void addPartDecode(TransactionPart newPart) {
+
+ if (isDead) {
+ // If dead then just kill this part and move on
+ newPart.setDead();
+ return;
+ }
+
+ sequenceNumber = newPart.getSequenceNumber();
+ arbitratorId = newPart.getArbitratorId();
+ transactionId = newPart.getTransactionId();
+ clientLocalSequenceNumber = newPart.getClientLocalSequenceNumber();
+ machineId = newPart.getMachineId();
+
+ TransactionPart previoslySeenPart = parts.put(newPart.getPartNumber(), newPart);
+
+ if (previoslySeenPart != NULL) {
+ // Set dead the old one since the new one is a rescued version of this part
+ previoslySeenPart.setDead();
+ } else if (newPart.isLastPart()) {
+ missingParts = new HashSet<Integer>();
+ hasLastPart = true;
+
+ for (int i = 0; i < newPart.getPartNumber(); i++) {
+ if (parts.get(i) == NULL) {
+ missingParts.add(i);
+ }
+ }
+ }
+
+ if (!isComplete && hasLastPart) {
+
+ // We have seen this part so remove it from the set of missing parts
+ missingParts.remove(newPart.getPartNumber());
+
+ // Check if all the parts have been seen
+ if (missingParts.size() == 0) {
+
+ // We have all the parts
+ isComplete = true;
+
+ // Decode all the parts and create the key value guard and update sets
+ decodeTransactionData();
+ }
+ }
+ }
+
+ void addUpdateKV(KeyValue kv) {
+ keyValueUpdateSet.add(kv);
+ }
+
+ void addGuardKV(KeyValue kv) {
+ keyValueGuardSet.add(kv);
+ }
+
+
+ int64_t getSequenceNumber() {
+ return sequenceNumber;
+ }
+
+ void setSequenceNumber(int64_t _sequenceNumber) {
+ sequenceNumber = _sequenceNumber;
+
+ for (Integer i : parts.keySet()) {
+ parts.get(i).setSequenceNumber(sequenceNumber);
+ }
+ }
+
+ int64_t getClientLocalSequenceNumber() {
+ return clientLocalSequenceNumber;
+ }
+
+ Map<Integer, TransactionPart> getParts() {
+ return parts;
+ }
+
+ bool didSendAPartToServer() {
+ return didSendAPartToServer;
+ }
+
+ void resetNextPartToSend() {
+ nextPartToSend = 0;
+ }
+
+ TransactionPart getNextPartToSend() {
+ if ((partsPendingSend.size() == 0) || (partsPendingSend.size() == nextPartToSend)) {
+ return NULL;
+ }
+ TransactionPart part = parts.get(partsPendingSend.get(nextPartToSend));
+ nextPartToSend++;
+ return part;
+ }
+
+
+ void setServerFailure() {
+ hadServerFailure = true;
+ }
+
+ bool getServerFailure() {
+ return hadServerFailure;
+ }
+
+
+ void resetServerFailure() {
+ hadServerFailure = false;
+ }
+
+
+ void setTransactionStatus(TransactionStatus _transactionStatus) {
+ transactionStatus = _transactionStatus;
+ }
+
+ TransactionStatus getTransactionStatus() {
+ return transactionStatus;
+ }
+
+ void removeSentParts(List<Integer> sentParts) {
+ nextPartToSend = 0;
+ if(partsPendingSend.removeAll(sentParts))
+ {
+ didSendAPartToServer = true;
+ transactionStatus.setTransactionSequenceNumber(sequenceNumber);
+ }
+ }
+
+ bool didSendAllParts() {
+ return partsPendingSend.isEmpty();
+ }
+
+ Set<KeyValue> getKeyValueUpdateSet() {
+ return keyValueUpdateSet;
+ }
+
+ int getNumberOfParts() {
+ return parts.size();
+ }
+
+ int64_t getMachineId() {
+ return machineId;
+ }
+
+ int64_t getArbitrator() {
+ return arbitratorId;
+ }
+
+ bool isComplete() {
+ return isComplete;
+ }
+
+ Pair<Long, Long> getId() {
+ return transactionId;
+ }
+
+ void setDead() {
+ if (isDead) {
+ // Already dead
+ return;
+ }
+
+ // Set dead
+ isDead = true;
+
+ // Make all the parts of this transaction dead
+ for (Integer partNumber : parts.keySet()) {
+ TransactionPart part = parts.get(partNumber);
+ part.setDead();
+ }
+ }
+
+ TransactionPart getPart(int index) {
+ return parts.get(index);
+ }
+
+ void decodeTransactionData() {
+
+ // Calculate the size of the data section
+ int dataSize = 0;
+ for (int i = 0; i < parts.keySet().size(); i++) {
+ TransactionPart tp = parts.get(i);
+ dataSize += tp.getDataSize();
+ }
+
+ char[] combinedData = new char[dataSize];
+ int currentPosition = 0;
+
+ // Stitch all the data sections together
+ for (int i = 0; i < parts.keySet().size(); i++) {
+ TransactionPart tp = parts.get(i);
+ System.arraycopy(tp.getData(), 0, combinedData, currentPosition, tp.getDataSize());
+ currentPosition += tp.getDataSize();
+ }
+
+ // Decoder Object
+ ByteBuffer bbDecode = ByteBuffer.wrap(combinedData);
+
+ // Decode how many key value pairs need to be decoded
+ int numberOfKVGuards = bbDecode.getInt();
+ int numberOfKVUpdates = bbDecode.getInt();
+
+ // Decode all the guard key values
+ for (int i = 0; i < numberOfKVGuards; i++) {
+ KeyValue kv = (KeyValue)KeyValue.decode(bbDecode);
+ keyValueGuardSet.add(kv);
+ }
+
+ // Decode all the updates key values
+ for (int i = 0; i < numberOfKVUpdates; i++) {
+ KeyValue kv = (KeyValue)KeyValue.decode(bbDecode);
+ keyValueUpdateSet.add(kv);
+ }
+ }
+
+ bool evaluateGuard(Map<IoTString, KeyValue> committedKeyValueTable, Map<IoTString, KeyValue> speculatedKeyValueTable, Map<IoTString, KeyValue> pendingTransactionSpeculatedKeyValueTable) {
+ for (KeyValue kvGuard : keyValueGuardSet) {
+
+ // First check if the key is in the speculative table, this is the value of the latest assumption
+ KeyValue kv = NULL;
+
+ // If we have a speculation table then use it first
+ if (pendingTransactionSpeculatedKeyValueTable != NULL) {
+ kv = pendingTransactionSpeculatedKeyValueTable.get(kvGuard.getKey());
+ }
+
+ // If we have a speculation table then use it first
+ if ((kv == NULL) && (speculatedKeyValueTable != NULL)) {
+ kv = speculatedKeyValueTable.get(kvGuard.getKey());
+ }
+
+ if (kv == NULL) {
+ // if it is not in the speculative table then check the committed table and use that
+ // value as our latest assumption
+ kv = committedKeyValueTable.get(kvGuard.getKey());
+ }
+
+ if (kvGuard.getValue() != NULL) {
+ if ((kv == NULL) || (!kvGuard.getValue().equals(kv.getValue()))) {
+
+
+ if (kv != NULL) {
+ System.out.println(kvGuard.getValue() + " " + kv.getValue());
+ } else {
+ System.out.println(kvGuard.getValue() + " " + kv);
+ }
+
+ return false;
+ }
+ } else {
+ if (kv != NULL) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+}
--- /dev/null
+
+
+class Transaction {
+
+ private Map<Integer, TransactionPart> parts = NULL;
+ private Set<Integer> missingParts = NULL;
+ private List<Integer> partsPendingSend = NULL;
+ private bool isComplete = false;
+ private bool hasLastPart = false;
+ private Set<KeyValue> keyValueGuardSet = NULL;
+ private Set<KeyValue> keyValueUpdateSet = NULL;
+ private bool isDead = false;
+ private int64_t sequenceNumber = -1;
+ private int64_t clientLocalSequenceNumber = -1;
+ private int64_t arbitratorId = -1;
+ private int64_t machineId = -1;
+ private Pair<Long, Long> transactionId = NULL;
+
+ private int nextPartToSend = 0;
+ private bool didSendAPartToServer = false;
+
+ private TransactionStatus transactionStatus = NULL;
+
+ private bool hadServerFailure = false;
+
+ public Transaction() {
+ parts = new HashMap<Integer, TransactionPart>();
+ keyValueGuardSet = new HashSet<KeyValue>();
+ keyValueUpdateSet = new HashSet<KeyValue>();
+ partsPendingSend = new ArrayList<Integer>();
+ }
+
+ public void addPartEncode(TransactionPart newPart) {
+ parts.put(newPart.getPartNumber(), newPart);
+ partsPendingSend.add(newPart.getPartNumber());
+
+ sequenceNumber = newPart.getSequenceNumber();
+ arbitratorId = newPart.getArbitratorId();
+ transactionId = newPart.getTransactionId();
+ clientLocalSequenceNumber = newPart.getClientLocalSequenceNumber();
+ machineId = newPart.getMachineId();
+
+ isComplete = true;
+ }
+
+ public void addPartDecode(TransactionPart newPart) {
+
+ if (isDead) {
+ // If dead then just kill this part and move on
+ newPart.setDead();
+ return;
+ }
+
+ sequenceNumber = newPart.getSequenceNumber();
+ arbitratorId = newPart.getArbitratorId();
+ transactionId = newPart.getTransactionId();
+ clientLocalSequenceNumber = newPart.getClientLocalSequenceNumber();
+ machineId = newPart.getMachineId();
+
+ TransactionPart previoslySeenPart = parts.put(newPart.getPartNumber(), newPart);
+
+ if (previoslySeenPart != NULL) {
+ // Set dead the old one since the new one is a rescued version of this part
+ previoslySeenPart.setDead();
+ } else if (newPart.isLastPart()) {
+ missingParts = new HashSet<Integer>();
+ hasLastPart = true;
+
+ for (int i = 0; i < newPart.getPartNumber(); i++) {
+ if (parts.get(i) == NULL) {
+ missingParts.add(i);
+ }
+ }
+ }
+
+ if (!isComplete && hasLastPart) {
+
+ // We have seen this part so remove it from the set of missing parts
+ missingParts.remove(newPart.getPartNumber());
+
+ // Check if all the parts have been seen
+ if (missingParts.size() == 0) {
+
+ // We have all the parts
+ isComplete = true;
+
+ // Decode all the parts and create the key value guard and update sets
+ decodeTransactionData();
+ }
+ }
+ }
+
+ public void addUpdateKV(KeyValue kv) {
+ keyValueUpdateSet.add(kv);
+ }
+
+ public void addGuardKV(KeyValue kv) {
+ keyValueGuardSet.add(kv);
+ }
+
+
+ public int64_t getSequenceNumber() {
+ return sequenceNumber;
+ }
+
+ public void setSequenceNumber(int64_t _sequenceNumber) {
+ sequenceNumber = _sequenceNumber;
+
+ for (Integer i : parts.keySet()) {
+ parts.get(i).setSequenceNumber(sequenceNumber);
+ }
+ }
+
+ public int64_t getClientLocalSequenceNumber() {
+ return clientLocalSequenceNumber;
+ }
+
+ public Map<Integer, TransactionPart> getParts() {
+ return parts;
+ }
+
+ public bool didSendAPartToServer() {
+ return didSendAPartToServer;
+ }
+
+ public void resetNextPartToSend() {
+ nextPartToSend = 0;
+ }
+
+ public TransactionPart getNextPartToSend() {
+ if ((partsPendingSend.size() == 0) || (partsPendingSend.size() == nextPartToSend)) {
+ return NULL;
+ }
+ TransactionPart part = parts.get(partsPendingSend.get(nextPartToSend));
+ nextPartToSend++;
+ return part;
+ }
+
+
+ public void setServerFailure() {
+ hadServerFailure = true;
+ }
+
+ public bool getServerFailure() {
+ return hadServerFailure;
+ }
+
+
+ public void resetServerFailure() {
+ hadServerFailure = false;
+ }
+
+
+ public void setTransactionStatus(TransactionStatus _transactionStatus) {
+ transactionStatus = _transactionStatus;
+ }
+
+ public TransactionStatus getTransactionStatus() {
+ return transactionStatus;
+ }
+
+ public void removeSentParts(List<Integer> sentParts) {
+ nextPartToSend = 0;
+ if(partsPendingSend.removeAll(sentParts))
+ {
+ didSendAPartToServer = true;
+ transactionStatus.setTransactionSequenceNumber(sequenceNumber);
+ }
+ }
+
+ public bool didSendAllParts() {
+ return partsPendingSend.isEmpty();
+ }
+
+ public Set<KeyValue> getKeyValueUpdateSet() {
+ return keyValueUpdateSet;
+ }
+
+ public int getNumberOfParts() {
+ return parts.size();
+ }
+
+ public int64_t getMachineId() {
+ return machineId;
+ }
+
+ public int64_t getArbitrator() {
+ return arbitratorId;
+ }
+
+ public bool isComplete() {
+ return isComplete;
+ }
+
+ public Pair<Long, Long> getId() {
+ return transactionId;
+ }
+
+ public void setDead() {
+ if (isDead) {
+ // Already dead
+ return;
+ }
+
+ // Set dead
+ isDead = true;
+
+ // Make all the parts of this transaction dead
+ for (Integer partNumber : parts.keySet()) {
+ TransactionPart part = parts.get(partNumber);
+ part.setDead();
+ }
+ }
+
+ public TransactionPart getPart(int index) {
+ return parts.get(index);
+ }
+
+ private void decodeTransactionData() {
+
+ // Calculate the size of the data section
+ int dataSize = 0;
+ for (int i = 0; i < parts.keySet().size(); i++) {
+ TransactionPart tp = parts.get(i);
+ dataSize += tp.getDataSize();
+ }
+
+ char[] combinedData = new char[dataSize];
+ int currentPosition = 0;
+
+ // Stitch all the data sections together
+ for (int i = 0; i < parts.keySet().size(); i++) {
+ TransactionPart tp = parts.get(i);
+ System.arraycopy(tp.getData(), 0, combinedData, currentPosition, tp.getDataSize());
+ currentPosition += tp.getDataSize();
+ }
+
+ // Decoder Object
+ ByteBuffer bbDecode = ByteBuffer.wrap(combinedData);
+
+ // Decode how many key value pairs need to be decoded
+ int numberOfKVGuards = bbDecode.getInt();
+ int numberOfKVUpdates = bbDecode.getInt();
+
+ // Decode all the guard key values
+ for (int i = 0; i < numberOfKVGuards; i++) {
+ KeyValue kv = (KeyValue)KeyValue.decode(bbDecode);
+ keyValueGuardSet.add(kv);
+ }
+
+ // Decode all the updates key values
+ for (int i = 0; i < numberOfKVUpdates; i++) {
+ KeyValue kv = (KeyValue)KeyValue.decode(bbDecode);
+ keyValueUpdateSet.add(kv);
+ }
+ }
+
+ public bool evaluateGuard(Map<IoTString, KeyValue> committedKeyValueTable, Map<IoTString, KeyValue> speculatedKeyValueTable, Map<IoTString, KeyValue> pendingTransactionSpeculatedKeyValueTable) {
+ for (KeyValue kvGuard : keyValueGuardSet) {
+
+ // First check if the key is in the speculative table, this is the value of the latest assumption
+ KeyValue kv = NULL;
+
+ // If we have a speculation table then use it first
+ if (pendingTransactionSpeculatedKeyValueTable != NULL) {
+ kv = pendingTransactionSpeculatedKeyValueTable.get(kvGuard.getKey());
+ }
+
+ // If we have a speculation table then use it first
+ if ((kv == NULL) && (speculatedKeyValueTable != NULL)) {
+ kv = speculatedKeyValueTable.get(kvGuard.getKey());
+ }
+
+ if (kv == NULL) {
+ // if it is not in the speculative table then check the committed table and use that
+ // value as our latest assumption
+ kv = committedKeyValueTable.get(kvGuard.getKey());
+ }
+
+ if (kvGuard.getValue() != NULL) {
+ if ((kv == NULL) || (!kvGuard.getValue().equals(kv.getValue()))) {
+
+
+ if (kv != NULL) {
+ System.out.println(kvGuard.getValue() + " " + kv.getValue());
+ } else {
+ System.out.println(kvGuard.getValue() + " " + kv);
+ }
+
+ return false;
+ }
+ } else {
+ if (kv != NULL) {
+ return false;
+ }
+ }
+ }
+ return true;
+ }
+}
--- /dev/null
+
+
+class TransactionPart extends Entry {
+
+ // Max size of the part excluding the fixed size header
+ static final int MAX_NON_HEADER_SIZE = 512;
+
+ int64_t sequenceNumber = -1;
+ int64_t machineId = -1;
+ int64_t arbitratorId = -1;
+ int64_t clientLocalSequenceNumber = -1; // Sequence number of the transaction that this is a part of
+ int partNumber = -1; // Parts position in the
+ Boolean isLastPart = false;
+
+ Pair<Long, Long> transactionId = NULL;
+ Pair<Long, Integer> partId = NULL;
+
+ char[] data = NULL;
+
+ TransactionPart(Slot s, int64_t _machineId, int64_t _arbitratorId, int64_t _clientLocalSequenceNumber, int _partNumber, char[] _data, Boolean _isLastPart) {
+ super(s);
+ machineId = _machineId;
+ arbitratorId = _arbitratorId;
+ clientLocalSequenceNumber = _clientLocalSequenceNumber;
+ partNumber = _partNumber;
+ data = _data;
+ isLastPart = _isLastPart;
+
+ transactionId = new Pair<Long, Long>(machineId, clientLocalSequenceNumber);
+ partId = new Pair<Long, Integer>(clientLocalSequenceNumber, partNumber);
+
+ }
+
+ int getSize() {
+ if (data == NULL) {
+ return (4 * sizeof(int64_t)) + (2 * sizeof(int32_t)) + (2 * sizeof(char));
+ }
+ return (4 * sizeof(int64_t)) + (2 * sizeof(int32_t)) + (2 * sizeof(char)) + data.length;
+ }
+
+ void setSlot(Slot s) {
+ parentslot = s;
+ }
+
+ Pair<Long, Long> getTransactionId() {
+ return transactionId;
+ }
+
+ int64_t getArbitratorId() {
+ return arbitratorId;
+ }
+
+ Pair<Long, Integer> getPartId() {
+ return partId;
+ }
+
+ int getPartNumber() {
+ return partNumber;
+ }
+
+ int getDataSize() {
+ return data.length;
+ }
+
+ char[] getData() {
+ return data;
+ }
+
+ Boolean isLastPart() {
+ return isLastPart;
+ }
+
+ int64_t getMachineId() {
+ return machineId;
+ }
+
+ int64_t getClientLocalSequenceNumber() {
+ return clientLocalSequenceNumber;
+ }
+
+
+ int64_t getSequenceNumber() {
+ return sequenceNumber;
+ }
+
+ void setSequenceNumber(int64_t _sequenceNumber) {
+ sequenceNumber = _sequenceNumber;
+ }
+
+ static Entry decode(Slot s, ByteBuffer bb) {
+ int64_t sequenceNumber = bb.getLong();
+ int64_t machineId = bb.getLong();
+ int64_t arbitratorId = bb.getLong();
+ int64_t clientLocalSequenceNumber = bb.getLong();
+ int partNumber = bb.getInt();
+ int dataSize = bb.getInt();
+ Boolean isLastPart = (bb.get() == 1);
+ // Get the data
+ char[] data = new char[dataSize];
+ bb.get(data);
+
+ TransactionPart returnTransactionPart = new TransactionPart(s, machineId, arbitratorId, clientLocalSequenceNumber, partNumber, data, isLastPart);
+ returnTransactionPart.setSequenceNumber(sequenceNumber);
+
+ return returnTransactionPart;
+ }
+
+ void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeTransactionPart);
+ bb.putLong(sequenceNumber);
+ bb.putLong(machineId);
+ bb.putLong(arbitratorId);
+ bb.putLong(clientLocalSequenceNumber);
+ bb.putInt(partNumber);
+ bb.putInt(data.length);
+
+ if (isLastPart) {
+ bb.put((char)1);
+ } else {
+ bb.put((char)0);
+ }
+
+ bb.put(data);
+ }
+
+ char getType() {
+ return Entry.TypeTransactionPart;
+ }
+
+ Entry getCopy(Slot s) {
+
+ TransactionPart copyTransaction = new TransactionPart(s, machineId, arbitratorId, clientLocalSequenceNumber, partNumber, data, isLastPart);
+ copyTransaction.setSequenceNumber(sequenceNumber);
+
+ return copyTransaction;
+ }
+}
--- /dev/null
+
+
+class TransactionPart extends Entry {
+
+ // Max size of the part excluding the fixed size header
+ public static final int MAX_NON_HEADER_SIZE = 512;
+
+ private int64_t sequenceNumber = -1;
+ private int64_t machineId = -1;
+ private int64_t arbitratorId = -1;
+ private int64_t clientLocalSequenceNumber = -1; // Sequence number of the transaction that this is a part of
+ private int partNumber = -1; // Parts position in the
+ private Boolean isLastPart = false;
+
+ private Pair<Long, Long> transactionId = NULL;
+ private Pair<Long, Integer> partId = NULL;
+
+ private char[] data = NULL;
+
+ public TransactionPart(Slot s, int64_t _machineId, int64_t _arbitratorId, int64_t _clientLocalSequenceNumber, int _partNumber, char[] _data, Boolean _isLastPart) {
+ super(s);
+ machineId = _machineId;
+ arbitratorId = _arbitratorId;
+ clientLocalSequenceNumber = _clientLocalSequenceNumber;
+ partNumber = _partNumber;
+ data = _data;
+ isLastPart = _isLastPart;
+
+ transactionId = new Pair<Long, Long>(machineId, clientLocalSequenceNumber);
+ partId = new Pair<Long, Integer>(clientLocalSequenceNumber, partNumber);
+
+ }
+
+ public int getSize() {
+ if (data == NULL) {
+ return (4 * sizeof(int64_t)) + (2 * sizeof(int32_t)) + (2 * sizeof(char));
+ }
+ return (4 * sizeof(int64_t)) + (2 * sizeof(int32_t)) + (2 * sizeof(char)) + data.length;
+ }
+
+ public void setSlot(Slot s) {
+ parentslot = s;
+ }
+
+ public Pair<Long, Long> getTransactionId() {
+ return transactionId;
+ }
+
+ public int64_t getArbitratorId() {
+ return arbitratorId;
+ }
+
+ public Pair<Long, Integer> getPartId() {
+ return partId;
+ }
+
+ public int getPartNumber() {
+ return partNumber;
+ }
+
+ public int getDataSize() {
+ return data.length;
+ }
+
+ public char[] getData() {
+ return data;
+ }
+
+ public Boolean isLastPart() {
+ return isLastPart;
+ }
+
+ public int64_t getMachineId() {
+ return machineId;
+ }
+
+ public int64_t getClientLocalSequenceNumber() {
+ return clientLocalSequenceNumber;
+ }
+
+
+ public int64_t getSequenceNumber() {
+ return sequenceNumber;
+ }
+
+ public void setSequenceNumber(int64_t _sequenceNumber) {
+ sequenceNumber = _sequenceNumber;
+ }
+
+ static Entry decode(Slot s, ByteBuffer bb) {
+ int64_t sequenceNumber = bb.getLong();
+ int64_t machineId = bb.getLong();
+ int64_t arbitratorId = bb.getLong();
+ int64_t clientLocalSequenceNumber = bb.getLong();
+ int partNumber = bb.getInt();
+ int dataSize = bb.getInt();
+ Boolean isLastPart = (bb.get() == 1);
+ // Get the data
+ char[] data = new char[dataSize];
+ bb.get(data);
+
+ TransactionPart returnTransactionPart = new TransactionPart(s, machineId, arbitratorId, clientLocalSequenceNumber, partNumber, data, isLastPart);
+ returnTransactionPart.setSequenceNumber(sequenceNumber);
+
+ return returnTransactionPart;
+ }
+
+ public void encode(ByteBuffer bb) {
+ bb.put(Entry.TypeTransactionPart);
+ bb.putLong(sequenceNumber);
+ bb.putLong(machineId);
+ bb.putLong(arbitratorId);
+ bb.putLong(clientLocalSequenceNumber);
+ bb.putInt(partNumber);
+ bb.putInt(data.length);
+
+ if (isLastPart) {
+ bb.put((char)1);
+ } else {
+ bb.put((char)0);
+ }
+
+ bb.put(data);
+ }
+
+ public char getType() {
+ return Entry.TypeTransactionPart;
+ }
+
+ public Entry getCopy(Slot s) {
+
+ TransactionPart copyTransaction = new TransactionPart(s, machineId, arbitratorId, clientLocalSequenceNumber, partNumber, data, isLastPart);
+ copyTransaction.setSequenceNumber(sequenceNumber);
+
+ return copyTransaction;
+ }
+}
--- /dev/null
+
+class TransactionStatus {
+ static final char StatusAborted = 1;
+ static final char StatusPending = 2;
+ static final char StatusCommitted = 3;
+ // static final char StatusRetrying = 4;
+ static final char StatusSentPartial = 5;
+ static final char StatusSentFully = 6;
+ static final char StatusNoEffect = 10;
+
+ char status = 0;
+ bool applicationReleased = false;
+ bool wasSentInChain = false;
+ int64_t transactionSequenceNumber = 0;
+ int64_t arbitrator = -1;
+
+
+ TransactionStatus(char _status, int64_t _arbitrator) {
+ status = _status;
+ arbitrator = _arbitrator;
+ }
+
+ char getStatus() {
+ return status;
+ }
+
+ void setStatus(char _status) {
+ status = _status;
+ }
+
+ int64_t getTransactionSequenceNumber() {
+ return transactionSequenceNumber;
+ }
+
+ void setTransactionSequenceNumber(int64_t _transactionSequenceNumber) {
+ transactionSequenceNumber = _transactionSequenceNumber;
+ }
+
+ int64_t getTransactionArbitrator() {
+ return arbitrator;
+ }
+
+ void release() {
+ applicationReleased = true;
+ }
+
+ bool getReleased() {
+ return applicationReleased;
+ }
+}
--- /dev/null
+
+class TransactionStatus {
+ static final char StatusAborted = 1;
+ static final char StatusPending = 2;
+ static final char StatusCommitted = 3;
+ // static final char StatusRetrying = 4;
+ static final char StatusSentPartial = 5;
+ static final char StatusSentFully = 6;
+ static final char StatusNoEffect = 10;
+
+ private char status = 0;
+ private bool applicationReleased = false;
+ private bool wasSentInChain = false;
+ private int64_t transactionSequenceNumber = 0;
+ private int64_t arbitrator = -1;
+
+
+ public TransactionStatus(char _status, int64_t _arbitrator) {
+ status = _status;
+ arbitrator = _arbitrator;
+ }
+
+ public char getStatus() {
+ return status;
+ }
+
+ public void setStatus(char _status) {
+ status = _status;
+ }
+
+ public int64_t getTransactionSequenceNumber() {
+ return transactionSequenceNumber;
+ }
+
+ public void setTransactionSequenceNumber(int64_t _transactionSequenceNumber) {
+ transactionSequenceNumber = _transactionSequenceNumber;
+ }
+
+ public int64_t getTransactionArbitrator() {
+ return arbitrator;
+ }
+
+ public void release() {
+ applicationReleased = true;
+ }
+
+ public bool getReleased() {
+ return applicationReleased;
+ }
+}
--- /dev/null
+#ifndef ARRAY_H
+#define ARRAY_H
+
+template<typename type>
+class Array {
+ public:
+ Array() :
+ array(NULL),
+ size(0) {
+ }
+
+ Array(uint _size) :
+ array((type *) ourcalloc(1, sizeof(type) * _size)),
+ size(_size)
+ {
+ }
+
+ Array(type *_array, uint _size) :
+ array((type *) ourmalloc(sizeof(type) * _size)),
+ size(_size) {
+ memcpy(array, _array, _size * sizeof(type));
+ }
+
+ Array(Array<type> *_array) :
+ array((type *) ourmalloc(sizeof(type) * _array->size)),
+ size(_array->size) {
+ memcpy(array, _array->array, size * sizeof(type));
+ }
+
+ void init(uint _size) {
+ array = (type *) ourcalloc(1, sizeof(type) * _size);
+ size = _size;
+ }
+
+ void init(type *_array, uint _size) {
+ array = (type *) ourmalloc(sizeof(type) * _size);
+ size = _size;
+ memcpy(array, _array, _size * sizeof(type));
+ }
+
+ void init(Array<type> *_array) {
+ array = (type *) ourmalloc(sizeof(type) * _array->size);
+ size = _array->size;
+ memcpy(array, _array->array, size * sizeof(type));
+ }
+
+ ~Array() {
+ if (array)
+ ourfree(array);
+ }
+
+ type get(uint index) const {
+ return array[index];
+ }
+
+ void set(uint index, type item) {
+ array[index] = item;
+ }
+
+ uint length() const {
+ return size;
+ }
+
+ type *internalArray() {
+ return array;
+ }
+
+ private:
+ type *array;
+ uint size;
+};
+#endif
--- /dev/null
+#ifndef COMMON_H
+#define COMMON_H
+#include <inttypes.h>
+
+class Abort;
+class Entry;
+class Slot;
+class ByteBuffer;
+class Liveness;
+
+
+#endif
--- /dev/null
+# A few common Makefile items
+
+CC := gcc
+CXX := g++
+
+UNAME := $(shell uname)
+
+LIB_NAME := cons_comp
+LIB_SO := lib_$(LIB_NAME).so
+
+CPPFLAGS += -Wall -g -O0
+
+# Mac OSX options
+ifeq ($(UNAME), Darwin)
+CPPFLAGS += -D_XOPEN_SOURCE -DMAC
+endif
--- /dev/null
+/* Copyright (c) 2015 Regents of the University of California
+ *
+ * Author: Brian Demsky <bdemsky@uci.edu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef HASHSET_H
+#define HASHSET_H
+#include "hashtable.h"
+
+template<typename _Key>
+struct Linknode {
+ _Key key;
+ Linknode<_Key> *prev;
+ Linknode<_Key> *next;
+};
+
+template<typename _Key, typename _KeyInt, int _Shift, unsigned int (*hash_function) (_Key), bool (*equals) (_Key, _Key)>
+class Hashset;
+
+template<typename _Key, typename _KeyInt, int _Shift, unsigned int (*hash_function) (_Key) = defaultHashFunction<_Key, _Shift, _KeyInt>, bool (*equals) (_Key, _Key) = defaultEquals<_Key> >
+class SetIterator {
+public:
+ SetIterator(Linknode<_Key> *_curr, Hashset <_Key, _KeyInt, _Shift, hash_function, equals> *_set) :
+ curr(_curr),
+ set(_set)
+ {
+ }
+
+ /** Override: new operator */
+ void *operator new(size_t size) {
+ return ourmalloc(size);
+ }
+
+ /** Override: delete operator */
+ void operator delete(void *p, size_t size) {
+ ourfree(p);
+ }
+
+ /** Override: new[] operator */
+ void *operator new[](size_t size) {
+ return ourmalloc(size);
+ }
+
+ /** Override: delete[] operator */
+ void operator delete[](void *p, size_t size) {
+ ourfree(p);
+ }
+
+ bool hasNext() {
+ return curr != NULL;
+ }
+
+ _Key next() {
+ _Key k = curr->key;
+ last = curr;
+ curr = curr->next;
+ return k;
+ }
+
+ _Key currKey() {
+ return last->key;
+ }
+
+ void remove() {
+ _Key k = last->key;
+ set->remove(k);
+ }
+
+private:
+ Linknode<_Key> *curr;
+ Linknode<_Key> *last;
+ Hashset <_Key, _KeyInt, _Shift, hash_function, equals> *set;
+};
+
+template<typename _Key, typename _KeyInt, int _Shift = 0, unsigned int (*hash_function) (_Key) = defaultHashFunction<_Key, _Shift, _KeyInt>, bool (*equals) (_Key, _Key) = defaultEquals<_Key> >
+class Hashset {
+public:
+ Hashset(unsigned int initialcapacity = 16, double factor = 0.5) :
+ table(new Hashtable<_Key, Linknode<_Key> *, _KeyInt, _Shift, hash_function, equals>(initialcapacity, factor)),
+ list(NULL),
+ tail(NULL)
+ {
+ }
+
+ /** @brief Hashset destructor */
+ ~Hashset() {
+ Linknode<_Key> *tmp = list;
+ while (tmp != NULL) {
+ Linknode<_Key> *tmpnext = tmp->next;
+ ourfree(tmp);
+ tmp = tmpnext;
+ }
+ delete table;
+ }
+
+ Hashset<_Key, _KeyInt, _Shift, hash_function, equals> *copy() {
+ Hashset<_Key, _KeyInt, _Shift, hash_function, equals> *copy = new Hashset<_Key, _KeyInt, _Shift, hash_function, equals>(table->getCapacity(), table->getLoadFactor());
+ SetIterator<_Key, _KeyInt, _Shift, hash_function, equals> *it = iterator();
+ while (it->hasNext())
+ copy->add(it->next());
+ delete it;
+ return copy;
+ }
+
+ void reset() {
+ Linknode<_Key> *tmp = list;
+ while (tmp != NULL) {
+ Linknode<_Key> *tmpnext = tmp->next;
+ ourfree(tmp);
+ tmp = tmpnext;
+ }
+ list = tail = NULL;
+ table->reset();
+ }
+
+ void resetAndDelete() {
+ Linknode<_Key> *tmp = list;
+ while (tmp != NULL) {
+ Linknode<_Key> *tmpnext = tmp->next;
+ ourfree(tmp);
+ tmp = tmpnext;
+ }
+ list = tail = NULL;
+ table->resetAndDeleteKeys();
+ }
+
+ /** @brief Adds a new key to the hashset. Returns false if the key
+ * is already present. */
+
+ void addAll(Hashset<_Key, _KeyInt, _Shift, hash_function, equals> *table) {
+ SetIterator<_Key, _KeyInt, _Shift, hash_function, equals> *it = iterator();
+ while (it->hasNext())
+ add(it->next());
+ delete it;
+ }
+
+ /** @brief Adds a new key to the hashset. Returns false if the key
+ * is already present. */
+
+ bool add(_Key key) {
+ Linknode<_Key> *val = table->get(key);
+ if (val == NULL) {
+ Linknode<_Key> *newnode = (Linknode<_Key> *)ourmalloc(sizeof(struct Linknode<_Key>));
+ newnode->prev = tail;
+ newnode->next = NULL;
+ newnode->key = key;
+ if (tail != NULL)
+ tail->next = newnode;
+ else
+ list = newnode;
+ tail = newnode;
+ table->put(key, newnode);
+ return true;
+ } else
+ return false;
+ }
+
+ /** @brief Return random key from set. */
+
+ _Key getRandomElement() {
+ if (getSize() == 0)
+ return NULL;
+ else if (getSize() < 6) {
+ uint count = random() % getSize();
+ Linknode<_Key> *ptr = list;
+ while (count > 0) {
+ ptr = ptr->next;
+ count--;
+ }
+ return ptr->key;
+ } else
+ return table->getRandomValue()->key;
+ }
+
+ /** @brief Gets the original key corresponding to this one from the
+ * hashset. Returns NULL if not present. */
+
+ _Key get(_Key key) {
+ Linknode<_Key> *val = table->get(key);
+ if (val != NULL)
+ return val->key;
+ else
+ return NULL;
+ }
+
+ _Key getFirstKey() {
+ return list->key;
+ }
+
+ bool contains(_Key key) {
+ return table->get(key) != NULL;
+ }
+
+ bool remove(_Key key) {
+ Linknode<_Key> *oldlinknode;
+ oldlinknode = table->get(key);
+ if (oldlinknode == NULL) {
+ return false;
+ }
+ table->remove(key);
+
+ //remove link node from the list
+ if (oldlinknode->prev == NULL)
+ list = oldlinknode->next;
+ else
+ oldlinknode->prev->next = oldlinknode->next;
+ if (oldlinknode->next != NULL)
+ oldlinknode->next->prev = oldlinknode->prev;
+ else
+ tail = oldlinknode->prev;
+ ourfree(oldlinknode);
+ return true;
+ }
+
+ unsigned int getSize() const {
+ return table->getSize();
+ }
+
+ bool isEmpty() const {
+ return getSize() == 0;
+ }
+
+ SetIterator<_Key, _KeyInt, _Shift, hash_function, equals> *iterator() {
+ return new SetIterator<_Key, _KeyInt, _Shift, hash_function, equals>(list, this);
+ }
+
+ /** Override: new operator */
+ void *operator new(size_t size) {
+ return ourmalloc(size);
+ }
+
+ /** Override: delete operator */
+ void operator delete(void *p, size_t size) {
+ ourfree(p);
+ }
+
+ /** Override: new[] operator */
+ void *operator new[](size_t size) {
+ return ourmalloc(size);
+ }
+
+ /** Override: delete[] operator */
+ void operator delete[](void *p, size_t size) {
+ ourfree(p);
+ }
+private:
+ Hashtable<_Key, Linknode<_Key> *, _KeyInt, _Shift, hash_function, equals> *table;
+ Linknode<_Key> *list;
+ Linknode<_Key> *tail;
+};
+#endif
--- /dev/null
+/* Copyright (c) 2015 Regents of the University of California
+ *
+ * Author: Brian Demsky <bdemsky@uci.edu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+/** @file hashtable.h
+ * @brief Hashtable. Standard chained bucket variety.
+ */
+
+#ifndef HASHTABLE_H__
+#define HASHTABLE_H__
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "mymemory.h"
+#include "common.h"
+
+/**
+ * @brief Hashtable node
+ *
+ * @tparam _Key Type name for the key
+ * @tparam _Val Type name for the values to be stored
+ */
+template<typename _Key, typename _Val>
+struct Hashlistnode {
+ _Key key;
+ _Val val;
+ uint hashcode;
+};
+
+template<typename _Key, int _Shift, typename _KeyInt>
+inline unsigned int defaultHashFunction(_Key hash) {
+ return (unsigned int)(((_KeyInt)hash) >> _Shift);
+}
+
+template<typename _Key>
+inline bool defaultEquals(_Key key1, _Key key2) {
+ return key1 == key2;
+}
+
+/**
+ * @brief A simple, custom hash table
+ *
+ * By default it is snapshotting, but you can pass in your own allocation
+ * functions. Note that this table does not support the value 0 (NULL) used as
+ * a key and is designed primarily with pointer-based keys in mind. Other
+ * primitive key types are supported only for non-zero values.
+ *
+ * @tparam _Key Type name for the key
+ * @tparam _Val Type name for the values to be stored
+ * @tparam _KeyInt Integer type that is at least as large as _Key. Used for key
+ * manipulation and storage.
+ * @tparam _Shift Logical shift to apply to all keys. Default 0.
+ */
+template<typename _Key, typename _Val, typename _KeyInt, int _Shift = 0, unsigned int (*hash_function) (_Key) = defaultHashFunction<_Key, _Shift, _KeyInt>, bool (*equals) (_Key, _Key) = defaultEquals<_Key> >
+class Hashtable {
+public:
+ /**
+ * @brief Hash table constructor
+ * @param initialcapacity Sets the initial capacity of the hash table.
+ * Default size 1024.
+ * @param factor Sets the percentage full before the hashtable is
+ * resized. Default ratio 0.5.
+ */
+ Hashtable(unsigned int initialcapacity = 1024, double factor = 0.5) {
+ // Allocate space for the hash table
+ table = (struct Hashlistnode<_Key, _Val> *)ourcalloc(initialcapacity, sizeof(struct Hashlistnode<_Key, _Val>));
+ zero = NULL;
+ loadfactor = factor;
+ capacity = initialcapacity;
+ capacitymask = initialcapacity - 1;
+
+ threshold = (unsigned int)(initialcapacity * loadfactor);
+ size = 0; // Initial number of elements in the hash
+ }
+
+ /** @brief Hash table destructor */
+ ~Hashtable() {
+ ourfree(table);
+ if (zero)
+ ourfree(zero);
+ }
+
+ /** Override: new operator */
+ void *operator new(size_t size) {
+ return ourmalloc(size);
+ }
+
+ /** Override: delete operator */
+ void operator delete(void *p, size_t size) {
+ ourfree(p);
+ }
+
+ /** Override: new[] operator */
+ void *operator new[](size_t size) {
+ return ourmalloc(size);
+ }
+
+ /** Override: delete[] operator */
+ void operator delete[](void *p, size_t size) {
+ ourfree(p);
+ }
+
+ /** @brief Reset the table to its initial state. */
+ void reset() {
+ memset(table, 0, capacity * sizeof(struct Hashlistnode<_Key, _Val>));
+ if (zero) {
+ ourfree(zero);
+ zero = NULL;
+ }
+ size = 0;
+ }
+
+ /** Doesn't work with zero value */
+ _Val getRandomValue() {
+ while (true) {
+ unsigned int index = random() & capacitymask;
+ struct Hashlistnode<_Key, _Val> *bin = &table[index];
+ if (bin->key != NULL && bin->val != NULL) {
+ return bin->val;
+ }
+ }
+ }
+
+ void resetAndDeleteKeys() {
+ for (unsigned int i = 0; i < capacity; i++) {
+ struct Hashlistnode<_Key, _Val> *bin = &table[i];
+ if (bin->key != NULL) {
+ delete bin->key;
+ bin->key = NULL;
+ if (bin->val != NULL) {
+ bin->val = NULL;
+ }
+ }
+ }
+ if (zero) {
+ ourfree(zero);
+ zero = NULL;
+ }
+ size = 0;
+ }
+
+ void resetAndDeleteVals() {
+ for (unsigned int i = 0; i < capacity; i++) {
+ struct Hashlistnode<_Key, _Val> *bin = &table[i];
+ if (bin->key != NULL) {
+ bin->key = NULL;
+ if (bin->val != NULL) {
+ delete bin->val;
+ bin->val = NULL;
+ }
+ }
+ }
+ if (zero) {
+ if (zero->val != NULL)
+ delete zero->val;
+ ourfree(zero);
+ zero = NULL;
+ }
+ size = 0;
+ }
+
+ void resetAndFreeVals() {
+ for (unsigned int i = 0; i < capacity; i++) {
+ struct Hashlistnode<_Key, _Val> *bin = &table[i];
+ if (bin->key != NULL) {
+ bin->key = NULL;
+ if (bin->val != NULL) {
+ ourfree(bin->val);
+ bin->val = NULL;
+ }
+ }
+ }
+ if (zero) {
+ if (zero->val != NULL)
+ ourfree(zero->val);
+ ourfree(zero);
+ zero = NULL;
+ }
+ size = 0;
+ }
+
+ /**
+ * @brief Put a key/value pair into the table
+ * @param key The key for the new value; must not be 0 or NULL
+ * @param val The value to store in the table
+ */
+ void put(_Key key, _Val val) {
+ /* Hashtable cannot handle 0 as a key */
+ if (!key) {
+ if (!zero) {
+ zero = (struct Hashlistnode<_Key, _Val> *)ourmalloc(sizeof(struct Hashlistnode<_Key, _Val>));
+ size++;
+ }
+ zero->key = key;
+ zero->val = val;
+ return;
+ }
+
+ if (size > threshold)
+ resize(capacity << 1);
+
+ struct Hashlistnode<_Key, _Val> *search;
+
+ unsigned int hashcode = hash_function(key);
+ unsigned int index = hashcode;
+ do {
+ index &= capacitymask;
+ search = &table[index];
+ if (!search->key) {
+ //key is null, probably done
+ break;
+ }
+ if (search->hashcode == hashcode)
+ if (equals(search->key, key)) {
+ search->val = val;
+ return;
+ }
+ index++;
+ } while (true);
+
+ search->key = key;
+ search->val = val;
+ search->hashcode = hashcode;
+ size++;
+ }
+
+ /**
+ * @brief Lookup the corresponding value for the given key
+ * @param key The key for finding the value; must not be 0 or NULL
+ * @return The value in the table, if the key is found; otherwise 0
+ */
+ _Val get(_Key key) const {
+ struct Hashlistnode<_Key, _Val> *search;
+
+ /* Hashtable cannot handle 0 as a key */
+ if (!key) {
+ if (zero)
+ return zero->val;
+ else
+ return (_Val) 0;
+ }
+
+ unsigned int hashcode = hash_function(key);
+ unsigned int oindex = hashcode & capacitymask;
+ unsigned int index = oindex;
+ do {
+ search = &table[index];
+ if (!search->key) {
+ if (!search->val)
+ break;
+ } else
+ if (hashcode == search->hashcode)
+ if (equals(search->key, key))
+ return search->val;
+ index++;
+ index &= capacitymask;
+ if (index == oindex)
+ break;
+ } while (true);
+ return (_Val)0;
+ }
+
+ /**
+ * @brief Remove the given key and return the corresponding value
+ * @param key The key for finding the value; must not be 0 or NULL
+ * @return The value in the table, if the key is found; otherwise 0
+ */
+ _Val remove(_Key key) {
+ struct Hashlistnode<_Key, _Val> *search;
+
+ /* Hashtable cannot handle 0 as a key */
+ if (!key) {
+ if (!zero) {
+ return (_Val)0;
+ } else {
+ _Val v = zero->val;
+ ourfree(zero);
+ zero = NULL;
+ size--;
+ return v;
+ }
+ }
+
+
+ unsigned int hashcode = hash_function(key);
+ unsigned int index = hashcode;
+ do {
+ index &= capacitymask;
+ search = &table[index];
+ if (!search->key) {
+ if (!search->val)
+ break;
+ } else
+ if (hashcode == search->hashcode)
+ if (equals(search->key, key)) {
+ _Val v = search->val;
+ //empty out this bin
+ search->val = (_Val) 1;
+ search->key = 0;
+ size--;
+ return v;
+ }
+ index++;
+ } while (true);
+ return (_Val)0;
+ }
+
+ unsigned int getSize() const {
+ return size;
+ }
+
+
+ /**
+ * @brief Check whether the table contains a value for the given key
+ * @param key The key for finding the value; must not be 0 or NULL
+ * @return True, if the key is found; false otherwise
+ */
+ bool contains(_Key key) const {
+ struct Hashlistnode<_Key, _Val> *search;
+
+ /* Hashtable cannot handle 0 as a key */
+ if (!key) {
+ return zero != NULL;
+ }
+
+ unsigned int index = hash_function(key);
+ unsigned int hashcode = index;
+ do {
+ index &= capacitymask;
+ search = &table[index];
+ if (!search->key) {
+ if (!search->val)
+ break;
+ } else
+ if (hashcode == search->hashcode)
+ if (equals(search->key, key))
+ return true;
+ index++;
+ } while (true);
+ return false;
+ }
+
+ /**
+ * @brief Resize the table
+ * @param newsize The new size of the table
+ */
+ void resize(unsigned int newsize) {
+ struct Hashlistnode<_Key, _Val> *oldtable = table;
+ struct Hashlistnode<_Key, _Val> *newtable;
+ unsigned int oldcapacity = capacity;
+
+ if ((newtable = (struct Hashlistnode<_Key, _Val> *)ourcalloc(newsize, sizeof(struct Hashlistnode<_Key, _Val>))) == NULL) {
+ model_print("calloc error %s %d\n", __FILE__, __LINE__);
+ exit(EXIT_FAILURE);
+ }
+
+ table = newtable; // Update the global hashtable upon resize()
+ capacity = newsize;
+ capacitymask = newsize - 1;
+
+ threshold = (unsigned int)(newsize * loadfactor);
+
+ struct Hashlistnode<_Key, _Val> *bin = &oldtable[0];
+ struct Hashlistnode<_Key, _Val> *lastbin = &oldtable[oldcapacity];
+ for (; bin < lastbin; bin++) {
+ _Key key = bin->key;
+
+ struct Hashlistnode<_Key, _Val> *search;
+ if (!key)
+ continue;
+
+ unsigned int hashcode = bin->hashcode;
+ unsigned int index = hashcode;
+ do {
+ index &= capacitymask;
+ search = &table[index];
+ index++;
+ } while (search->key);
+
+ search->hashcode = hashcode;
+ search->key = key;
+ search->val = bin->val;
+ }
+
+ ourfree(oldtable); // Free the memory of the old hash table
+ }
+ double getLoadFactor() {return loadfactor;}
+ unsigned int getCapacity() {return capacity;}
+ struct Hashlistnode<_Key, _Val> *table;
+ struct Hashlistnode<_Key, _Val> *zero;
+ unsigned int capacity;
+ unsigned int size;
+private:
+ unsigned int capacitymask;
+ unsigned int threshold;
+ double loadfactor;
+};
+
+#endif/* __HASHTABLE_H__ */
--- /dev/null
+#ifndef CPPVECTOR_H
+#define CPPVECTOR_H
+#include <string.h>
+
+#define VECTOR_DEFCAP 8
+
+template<typename type>
+class Vector {
+public:
+ Vector(uint _capacity = VECTOR_DEFCAP) :
+ size(0),
+ capacity(_capacity),
+ array((type *) ourmalloc(sizeof(type) * _capacity)) {
+ }
+
+ Vector(uint _capacity, type *_array) :
+ size(_capacity),
+ capacity(_capacity),
+ array((type *) ourmalloc(sizeof(type) * _capacity)) {
+ memcpy(array, _array, capacity * sizeof(type));
+ }
+
+ void pop() {
+ size--;
+ }
+
+ type last() const {
+ return array[size - 1];
+ }
+
+ void setSize(uint _size) {
+ if (_size <= size) {
+ size = _size;
+ return;
+ } else if (_size > capacity) {
+ array = (type *)ourrealloc(array, _size * sizeof(type));
+ capacity = _size;
+ }
+ bzero(&array[size], (_size - size) * sizeof(type));
+ size = _size;
+ }
+
+ void push(type item) {
+ if (size >= capacity) {
+ uint newcap = capacity << 1;
+ array = (type *)ourrealloc(array, newcap * sizeof(type));
+ capacity = newcap;
+ }
+ array[size++] = item;
+ }
+
+ type get(uint index) const {
+ return array[index];
+ }
+
+ void setExpand(uint index, type item) {
+ if (index >= size)
+ setSize(index + 1);
+ set(index, item);
+ }
+
+ void set(uint index, type item) {
+ array[index] = item;
+ }
+
+ uint getSize() const {
+ return size;
+ }
+
+ ~Vector() {
+ ourfree(array);
+ }
+
+ void clear() {
+ size = 0;
+ }
+
+ type *expose() {
+ return array;
+ }
+ CMEMALLOC;
+private:
+ uint size;
+ uint capacity;
+ type *array;
+};
+#endif