#include <folly/Portability.h>
#include <folly/detail/DiscriminatedPtrDetail.h>
-#if !FOLLY_X64
-# error "DiscriminatedPtr is x64-specific code."
+#if !FOLLY_X64 && !FOLLY_PPC64
+# error "DiscriminatedPtr is x64 and ppc64 specific code."
#endif
namespace folly {
inline uint32_t fnv32_buf(const void* buf,
size_t n,
uint32_t hash = FNV_32_HASH_START) {
- const char* char_buf = reinterpret_cast<const char*>(buf);
+ // forcing signed char, since other platforms can use unsigned
+ const signed char* char_buf = reinterpret_cast<const signed char*>(buf);
for (size_t i = 0; i < n; ++i) {
hash += (hash << 1) + (hash << 4) + (hash << 7) +
inline uint64_t fnv64_buf(const void* buf,
size_t n,
uint64_t hash = FNV_64_HASH_START) {
- const char* char_buf = reinterpret_cast<const char*>(buf);
+ // forcing signed char, since other platforms can use unsigned
+ const signed char* char_buf = reinterpret_cast<const signed char*>(buf);
for (size_t i = 0; i < n; ++i) {
hash += (hash << 1) + (hash << 4) + (hash << 5) + (hash << 7) +
#define get16bits(d) (*((const uint16_t*) (d)))
inline uint32_t hsieh_hash32_buf(const void* buf, size_t len) {
- const char* s = reinterpret_cast<const char*>(buf);
+ // forcing signed char, since other platforms can use unsigned
+ const signed char* s = reinterpret_cast<const signed char*>(buf);
uint32_t hash = static_cast<uint32_t>(len);
uint32_t tmp;
size_t rem;
#include <folly/Portability.h>
-#if !FOLLY_X64
-# error "PackedSyncPtr is x64-specific code."
+#if !FOLLY_X64 && !FOLLY_PPC64
+# error "PackedSyncPtr is x64 and ppc64 specific code."
#endif
/*
#include <folly/detail/Sleeper.h>
#include <folly/Portability.h>
-#if !FOLLY_X64 && !FOLLY_A64
-# error "PicoSpinLock.h is currently x64 and aarch64 only."
+#if !FOLLY_X64 && !FOLLY_A64 && !FOLLY_PPC64
+# error "PicoSpinLock.h is currently x64, aarch64 and ppc64 only."
#endif
namespace folly {
#undef FB_DOBTS
#elif FOLLY_A64
ret = __atomic_fetch_or(&lock_, 1 << Bit, __ATOMIC_SEQ_CST);
+#elif FOLLY_PPC64
+#define FB_DOBTS(size) \
+ asm volatile("\teieio\n" \
+ "\tl" #size "arx 14,0,%[lockPtr]\n" \
+ "\tli 15,1\n" \
+ "\tsldi 15,15,%[bit]\n" \
+ "\tand. 16,15,14\n" \
+ "\tbne 0f\n" \
+ "\tor 14,14,15\n" \
+ "\tst" #size "cx. 14,0,%[lockPtr]\n" \
+ "\tbne 0f\n" \
+ "\tori %[output],%[output],1\n" \
+ "\tisync\n" \
+ "0:\n" \
+ : [output] "+r" (ret) \
+ : [lockPtr] "r"(&lock_), \
+ [bit] "i" (Bit) \
+ : "cr0", "memory", "r14", "r15", "r16")
+
+ switch (sizeof(IntType)) {
+ case 2: FB_DOBTS(h); break;
+ case 4: FB_DOBTS(w); break;
+ case 8: FB_DOBTS(d); break;
+ }
+
+#undef FB_DOBTS
#else
-#error "x86 aarch64 only"
+#error "x86 aarch64 ppc64 only"
#endif
return ret;
#undef FB_DOBTR
#elif FOLLY_A64
__atomic_fetch_and(&lock_, ~(1 << Bit), __ATOMIC_SEQ_CST);
+#elif FOLLY_PPC64
+#define FB_DOBTR(size) \
+ asm volatile("\teieio\n" \
+ "0: l" #size "arx 14,0,%[lockPtr]\n" \
+ "\tli 15,1\n" \
+ "\tsldi 15,15,%[bit]\n" \
+ "\txor 14,14,15\n" \
+ "\tst" #size "cx. 14,0,%[lockPtr]\n" \
+ "\tbne 0b\n" \
+ "\tisync\n" \
+ : \
+ : [lockPtr] "r"(&lock_), \
+ [bit] "i" (Bit) \
+ : "cr0", "memory", "r14", "r15")
+
+ switch (sizeof(IntType)) {
+ case 2: FB_DOBTR(h); break;
+ case 4: FB_DOBTR(w); break;
+ case 8: FB_DOBTR(d); break;
+ }
+
+#undef FB_DOBTR
#else
-# error "x64 aarch64 only"
+# error "x64 aarch64 ppc64 only"
#endif
}
};
# define FOLLY_A64 0
#endif
+#if defined (__powerpc64__)
+# define FOLLY_PPC64 1
+#else
+# define FOLLY_PPC64 0
+#endif
+
// packing is very ugly in msvc
#ifdef _MSC_VER
# define FOLLY_PACK_ATTR /**/
asm volatile ("pause");
#elif FOLLY_A64
asm volatile ("wfe");
+#elif FOLLY_PPC64
+ asm volatile("or 27,27,27");
#endif
}
inline void asm_pause() {
asm ("pause");
#elif FOLLY_A64
asm ("wfe");
+#elif FOLLY_PPC64
+ asm ("or 31,31,31");
#endif
}
AM_CONDITIONAL([HAVE_STD_THREAD], [test "$ac_cv_header_features" = "yes"])
AM_CONDITIONAL([HAVE_X86_64], [test "$build_cpu" = "x86_64"])
+AM_CONDITIONAL([HAVE_PPC64], [test "$build_cpu" = "powerpc64le"])
+AM_CONDITIONAL([RUN_ARCH_SPECIFIC_TESTS], [test "$build_cpu" = "x86_64" || test "$build_cpu" = "powerpc64le"])
AM_CONDITIONAL([HAVE_LINUX], [test "$build_os" == "linux-gnu"])
AM_CONDITIONAL([HAVE_WEAK_SYMBOLS],
[test "$folly_cv_prog_cc_weak_symbols" = "yes"])
// Stack madvise isn't Linux or glibc specific, but the system calls
// and arithmetic (and bug compatibility) are not portable. The set of
// platforms could be increased if it was useful.
-#if FOLLY_X64 && defined(_GNU_SOURCE) && defined(__linux__)
+#if (FOLLY_X64 || FOLLY_PPC64 ) && defined(_GNU_SOURCE) && defined(__linux__)
static const size_t s_pageSize = sysconf(_SC_PAGESIZE);
static FOLLY_TLS uintptr_t tls_stackLimit;
#include <folly/Malloc.h>
#include <folly/Portability.h>
-#if defined(__GNUC__) && FOLLY_X64
+#if defined(__GNUC__) && (FOLLY_X64 || FOLLY_PPC64)
# include <folly/SmallLocks.h>
# define FB_PACK_ATTR FOLLY_PACK_ATTR
# define FB_PACK_PUSH FOLLY_PACK_PUSH
}
} FB_PACK_ATTR;
-#if FOLLY_X64
+#if (FOLLY_X64 || FOLLY_PPC64)
typedef unsigned char InlineStorageType[sizeof(value_type) * MaxInline];
#else
typedef typename std::aligned_storage<
spin_lock_test_LDADD = libgtestmain.la $(top_builddir)/libfolly.la
TESTS += spin_lock_test
-if HAVE_X86_64
+if RUN_ARCH_SPECIFIC_TESTS
small_locks_test_SOURCES = SmallLocksTest.cpp
small_locks_test_LDADD = libgtestmain.la $(top_builddir)/libfolly.la
TESTS += small_locks_test
discriminated_ptr_test_LDADD = libgtestmain.la $(top_builddir)/libfolly.la
TESTS += discriminated_ptr_test
+if !HAVE_PPC64
cpuid_test_SOURCES = CpuIdTest.cpp
cpuid_test_LDADD = libgtestmain.la $(top_builddir)/libfolly.la
TESTS += cpuid_test
endif
+endif
sorted_vector_types_test_SOURCES = sorted_vector_test.cpp
sorted_vector_types_test_LDADD = libgtestmain.la $(top_builddir)/libfolly.la
using folly::small_vector;
using namespace folly::small_vector_policy;
-#if FOLLY_X64
+#if FOLLY_X64 || FOLLY_PPC64
static_assert(sizeof(small_vector<int>) == 16,
"Object size is not what we expect for small_vector<int>");