From c76be793b17d234cc803f89bdda90c89f57ce17e Mon Sep 17 00:00:00 2001
From: root <root@dw-6.eecs.uci.edu>
Date: Thu, 18 Jul 2019 19:37:38 -0700
Subject: [PATCH] More code towards support non-atomic stores

---
 cmodelint.cc        |  2 +-
 datarace.cc         |  4 ++++
 execution.cc        | 28 +++++++++++++++++++++++++---
 hashset.h           |  2 +-
 hashtable.h         |  2 +-
 include/impatomic.h | 26 +++++++++++++-------------
 include/wildcard.h  |  4 ++--
 model.cc            | 18 +++++++++---------
 8 files changed, 56 insertions(+), 30 deletions(-)

diff --git a/cmodelint.cc b/cmodelint.cc
index 5b37e867..9faae534 100644
--- a/cmodelint.cc
+++ b/cmodelint.cc
@@ -114,7 +114,7 @@ CDSATOMICINT(64)
 #define CDSATOMICLOAD(size)                                             \
 	uint ## size ## _t cds_atomic_load ## size(void * obj, int atomic_index, const char * position) { \
 		ensureModel();                                                      \
-		return (uint ## size ## _t) model->switch_to_master( \
+		return (uint ## size ## _t)model->switch_to_master( \
 			new ModelAction(ATOMIC_READ, position, orders[atomic_index], obj)); \
 	}
 
diff --git a/datarace.cc b/datarace.cc
index af97a20c..58c52dd6 100644
--- a/datarace.cc
+++ b/datarace.cc
@@ -68,6 +68,8 @@ bool hasNonAtomicStore(const void *address) {
 		//Do we have a non atomic write with a non-zero clock
 		return ((WRITEVECTOR(shadowval) != 0) && !(ATOMICMASK & shadowval));
 	} else {
+		if (shadowval == 0)
+			return false;
 		struct RaceRecord *record = (struct RaceRecord *)shadowval;
 		return !record->isAtomic && record->writeClock != 0;
 	}
@@ -79,6 +81,8 @@ void setAtomicStoreFlag(const void *address) {
 	if (ISSHORTRECORD(shadowval)) {
 		*shadow = shadowval | ATOMICMASK;
 	} else {
+		if (shadowval == 0)
+			return;
 		struct RaceRecord *record = (struct RaceRecord *)shadowval;
 		record->isAtomic = 1;
 	}
diff --git a/execution.cc b/execution.cc
index b5f86ff0..3f8a78cf 100644
--- a/execution.cc
+++ b/execution.cc
@@ -63,7 +63,7 @@ ModelExecution::ModelExecution(ModelChecker *m, Scheduler *scheduler) :
 	thrd_last_action(1),
 	thrd_last_fence_release(),
 	priv(new struct model_snapshot_members ()),
-			 mo_graph(new CycleGraph()),
+	mo_graph(new CycleGraph()),
 	fuzzer(new Fuzzer()),
 	thrd_func_list(),
 	thrd_func_inst_lists()
@@ -1132,7 +1132,7 @@ void ModelExecution::add_action_to_lists(ModelAction *act)
 void insertIntoActionList(action_list_t *list, ModelAction *act) {
 	action_list_t::reverse_iterator rit = list->rbegin();
 	modelclock_t next_seq = act->get_seq_number();
-	if ((*rit)->get_seq_number() == next_seq)
+	if (rit == list->rend() || (*rit)->get_seq_number() == next_seq)
 		list->push_back(act);
 	else {
 		for(;rit != list->rend();rit++) {
@@ -1147,6 +1147,28 @@ void insertIntoActionList(action_list_t *list, ModelAction *act) {
 	}
 }
 
+void insertIntoActionListAndSetCV(action_list_t *list, ModelAction *act) {
+	action_list_t::reverse_iterator rit = list->rbegin();
+	modelclock_t next_seq = act->get_seq_number();
+	if (rit == list->rend()) {
+		act->create_cv(NULL);
+	} else if ((*rit)->get_seq_number() == next_seq) {
+		act->create_cv((*rit));
+		list->push_back(act);
+	} else {
+		for(;rit != list->rend();rit++) {
+			if ((*rit)->get_seq_number() == next_seq) {
+				act->create_cv((*rit));
+				action_list_t::iterator it = rit.base();
+				it++;	//get to right sequence number
+				it++;	//get to item after it
+				list->insert(it, act);
+				break;
+			}
+		}
+	}
+}
+
 /**
  * Performs various bookkeeping operations for a normal write.  The
  * complication is that we are typically inserting a normal write
@@ -1160,7 +1182,7 @@ void ModelExecution::add_normal_write_to_lists(ModelAction *act)
 	int tid = id_to_int(act->get_tid());
 	action_list_t *list = get_safe_ptr_action(&obj_map, act->get_location());
 	insertIntoActionList(list, act);
-	insertIntoActionList(&action_trace, act);
+	insertIntoActionListAndSetCV(&action_trace, act);
 
 	// Update obj_thrd_map, a per location, per thread, order of actions
 	SnapVector<action_list_t> *vec = get_safe_ptr_vect_action(&obj_thrd_map, act->get_location());
diff --git a/hashset.h b/hashset.h
index 140b85fb..7f7febc6 100644
--- a/hashset.h
+++ b/hashset.h
@@ -78,7 +78,7 @@ private:
 	HashSet <_Key, _KeyInt, _Shift, _malloc, _calloc, _free, hash_function, equals> * set;
 };
 
-template<typename _Key, typename _KeyInt, int _Shift = 0, void * (* _malloc)(size_t) = snapshot_malloc, void * (* _calloc)(size_t, size_t) = snapshot_calloc, void (*_free)(void *) = snapshot_free, unsigned int (*hash_function)(_Key) = default_hash_function<_Key, _Shift, _KeyInt>, bool (*equals)(_Key, _Key) = default_equals<_Key> >
+template<typename _Key, typename _KeyInt, int _Shift = 0, void * (*_malloc)(size_t) = snapshot_malloc, void * (*_calloc)(size_t, size_t) = snapshot_calloc, void (*_free)(void *) = snapshot_free, unsigned int (*hash_function)(_Key) = default_hash_function<_Key, _Shift, _KeyInt>, bool (*equals)(_Key, _Key) = default_equals<_Key> >
 class HashSet {
 public:
 	HashSet(unsigned int initialcapacity = 16, double factor = 0.5) :
diff --git a/hashtable.h b/hashtable.h
index f7120388..c9da5aad 100644
--- a/hashtable.h
+++ b/hashtable.h
@@ -62,7 +62,7 @@ inline bool default_equals(_Key key1, _Key key2) {
  * @tparam _free   Provide your own 'free' for the table, or default to
  *                 snapshotting.
  */
-template<typename _Key, typename _Val, typename _KeyInt, int _Shift = 0, void * (* _malloc)(size_t) = snapshot_malloc, void * (* _calloc)(size_t, size_t) = snapshot_calloc, void (*_free)(void *) = snapshot_free, unsigned int (*hash_function)(_Key) = default_hash_function<_Key, _Shift, _KeyInt>, bool (*equals)(_Key, _Key) = default_equals<_Key> >
+template<typename _Key, typename _Val, typename _KeyInt, int _Shift = 0, void * (*_malloc)(size_t) = snapshot_malloc, void * (*_calloc)(size_t, size_t) = snapshot_calloc, void (*_free)(void *) = snapshot_free, unsigned int (*hash_function)(_Key) = default_hash_function<_Key, _Shift, _KeyInt>, bool (*equals)(_Key, _Key) = default_equals<_Key> >
 class HashTable {
 public:
 	/**
diff --git a/include/impatomic.h b/include/impatomic.h
index 7f4dcd4c..02239d5f 100644
--- a/include/impatomic.h
+++ b/include/impatomic.h
@@ -82,12 +82,12 @@ inline void atomic_flag::clear( memory_order __x__ ) volatile
 
 #define _ATOMIC_LOAD_( __a__, __x__ )                                         \
 	({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
-		 __typeof__((__a__)->__f__) __r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__);  \
+		 __typeof__((__a__)->__f__)__r__ = (__typeof__((__a__)->__f__))model_read_action((void *)__p__, __x__);  \
 		 __r__; })
 
 #define _ATOMIC_STORE_( __a__, __m__, __x__ )                                 \
 	({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
-		 __typeof__(__m__) __v__ = (__m__);                            \
+		 __typeof__(__m__)__v__ = (__m__);                            \
 		 model_write_action((void *) __p__,  __x__, (uint64_t) __v__); \
 		 __v__ = __v__;	/* Silence clang (-Wunused-value) */           \
 	 })
@@ -95,16 +95,16 @@ inline void atomic_flag::clear( memory_order __x__ ) volatile
 
 #define _ATOMIC_INIT_( __a__, __m__ )                                         \
 	({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
-		 __typeof__(__m__) __v__ = (__m__);                            \
+		 __typeof__(__m__)__v__ = (__m__);                            \
 		 model_init_action((void *) __p__,  (uint64_t) __v__);         \
 		 __v__ = __v__;	/* Silence clang (-Wunused-value) */           \
 	 })
 
 #define _ATOMIC_MODIFY_( __a__, __o__, __m__, __x__ )                         \
 	({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
-		 __typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__); \
-		 __typeof__(__m__) __v__ = (__m__);                                    \
-		 __typeof__((__a__)->__f__) __copy__= __old__;                         \
+		 __typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__); \
+		 __typeof__(__m__)__v__ = (__m__);                                    \
+		 __typeof__((__a__)->__f__)__copy__= __old__;                         \
 		 __copy__ __o__ __v__;                                                 \
 		 model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);          \
 		 __old__ = __old__;	/* Silence clang (-Wunused-value) */               \
@@ -115,10 +115,10 @@ inline void atomic_flag::clear( memory_order __x__ ) volatile
 
 #define _ATOMIC_CMPSWP_( __a__, __e__, __m__, __x__ )                         \
 	({ volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);   \
-		 __typeof__(__e__) __q__ = (__e__);                            \
-		 __typeof__(__m__) __v__ = (__m__);                            \
+		 __typeof__(__e__)__q__ = (__e__);                            \
+		 __typeof__(__m__)__v__ = (__m__);                            \
 		 bool __r__;                                                   \
-		 __typeof__((__a__)->__f__) __t__=(__typeof__((__a__)->__f__))model_rmwrcas_action((void *)__p__, __x__, (uint64_t) *__q__, sizeof((__a__)->__f__)); \
+		 __typeof__((__a__)->__f__)__t__=(__typeof__((__a__)->__f__))model_rmwrcas_action((void *)__p__, __x__, (uint64_t) *__q__, sizeof((__a__)->__f__)); \
 		 if (__t__ == *__q__ ) {;                                     \
 														model_rmw_action((void *)__p__, __x__, (uint64_t) __v__); __r__ = true; } \
 		 else {  model_rmwc_action((void *)__p__, __x__); *__q__ = __t__;  __r__ = false;} \
@@ -2418,8 +2418,8 @@ inline void* atomic_fetch_add_explicit
 	( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
 {
 	volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);
-	__typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
-	__typeof__((__a__)->__f__) __copy__= __old__;
+	__typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
+	__typeof__((__a__)->__f__)__copy__= __old__;
 	__copy__ = (void *) (((char *)__copy__) + __m__);
 	model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
 	return __old__;
@@ -2434,8 +2434,8 @@ inline void* atomic_fetch_sub_explicit
 	( volatile atomic_address* __a__, ptrdiff_t __m__, memory_order __x__ )
 {
 	volatile __typeof__((__a__)->__f__)* __p__ = &((__a__)->__f__);
-	__typeof__((__a__)->__f__) __old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
-	__typeof__((__a__)->__f__) __copy__= __old__;
+	__typeof__((__a__)->__f__)__old__=(__typeof__((__a__)->__f__))model_rmwr_action((void *)__p__, __x__);
+	__typeof__((__a__)->__f__)__copy__= __old__;
 	__copy__ = (void *) (((char *)__copy__) - __m__);
 	model_rmw_action((void *)__p__, __x__, (uint64_t) __copy__);
 	return __old__;
diff --git a/include/wildcard.h b/include/wildcard.h
index 6bcd6acd..0eaffd5e 100644
--- a/include/wildcard.h
+++ b/include/wildcard.h
@@ -21,11 +21,11 @@
 #define is_normal_mo(x) ((x >= memory_order_relaxed && x <= memory_order_seq_cst) || x == memory_order_normal)
 
 #define assert_infer(x) for (int i = 0;i <= wildcardNum;i++) \
-	ASSERT(is_normal_mo_infer((x[i])));
+		ASSERT(is_normal_mo_infer((x[i])));
 
 #define assert_infers(x) for (ModelList<memory_order *>::iterator iter = \
 																(x)->begin();iter != (x)->end();iter++) \
-	assert_infer((*iter));
+		assert_infer((*iter));
 
 #define relaxed memory_order_relaxed
 #define release memory_order_release
diff --git a/model.cc b/model.cc
index 6214e9bc..4eafe4fb 100644
--- a/model.cc
+++ b/model.cc
@@ -160,7 +160,7 @@ void ModelChecker::print_bugs() const
 							bugs->size(),
 							bugs->size() > 1 ? "s" : "");
 	for (unsigned int i = 0;i < bugs->size();i++)
-		(*bugs)[i]->print();
+		(*bugs)[i] -> print();
 }
 
 /**
@@ -171,15 +171,15 @@ void ModelChecker::print_bugs() const
  */
 void ModelChecker::record_stats()
 {
-	stats.num_total++;
+	stats.num_total ++;
 	if (!execution->isfeasibleprefix())
-		stats.num_infeasible++;
+		stats.num_infeasible ++;
 	else if (execution->have_bug_reports())
-		stats.num_buggy_executions++;
+		stats.num_buggy_executions ++;
 	else if (execution->is_complete_execution())
-		stats.num_complete++;
+		stats.num_complete ++;
 	else {
-		stats.num_redundant++;
+		stats.num_redundant ++;
 
 		/**
 		 * @todo We can violate this ASSERT() when fairness/sleep sets
@@ -260,15 +260,15 @@ bool ModelChecker::next_execution()
 		return true;
 	}
 // test code
-	execution_number++;
+	execution_number ++;
 	reset_to_initial_state();
 	return false;
 }
 
 /** @brief Run trace analyses on complete trace */
 void ModelChecker::run_trace_analyses() {
-	for (unsigned int i = 0;i < trace_analyses.size();i++)
-		trace_analyses[i]->analyze(execution->get_action_trace());
+	for (unsigned int i = 0;i < trace_analyses.size();i ++)
+		trace_analyses[i] -> analyze(execution->get_action_trace());
 }
 
 /**
-- 
2.34.1