*/
bool CycleGraph::checkReachable(CycleNode *from, CycleNode *to) {
std::vector<CycleNode *, MyAlloc<CycleNode *> > queue;
- HashTable<CycleNode *, CycleNode *, uintptr_t, 4, model_malloc, MYCALLOC, MYFREE> discovered;
+ HashTable<CycleNode *, CycleNode *, uintptr_t, 4, model_malloc, model_calloc, MYFREE> discovered;
queue.push_back(from);
discovered.put(from, from);
#endif
/** Non-snapshotting calloc for our use. */
-void *MYCALLOC(size_t count, size_t size) {
+void *model_calloc(size_t count, size_t size) {
#if USE_MPROTECT_SNAPSHOT
static void *(*callocp)(size_t count, size_t size)=NULL;
char *error;
#define SNAPSHOTALLOC
void *model_malloc(size_t size);
-void *MYCALLOC(size_t count, size_t size);
+void *model_calloc(size_t count, size_t size);
void MYFREE(void *ptr);
static inline void * snapshot_malloc(size_t size) {
*/
void rollBack( snapshot_id theID ){
#if USE_MPROTECT_SNAPSHOT
- HashTable< void *, bool, uintptr_t, 4, model_malloc, MYCALLOC, MYFREE> duplicateMap;
+ HashTable< void *, bool, uintptr_t, 4, model_malloc, model_calloc, MYFREE> duplicateMap;
for(unsigned int region=0; region<snapshotrecord->lastRegion;region++) {
if( mprotect(snapshotrecord->regionsToSnapShot[region].basePtr, snapshotrecord->regionsToSnapShot[region].sizeInPages*sizeof(struct SnapShotPage), PROT_READ | PROT_WRITE ) == -1 ){
perror("mprotect");