2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 // @author Mark Rabkin (mrabkin@fb.com)
18 // @author Andrei Alexandrescu (andrei.alexandrescu@fb.com)
20 #include <folly/Range.h>
22 #if FOLLY_HAVE_EMMINTRIN_H
23 #include <emmintrin.h> // __v16qi
30 * Predicates that can be used with qfind and startsWith
32 const AsciiCaseSensitive asciiCaseSensitive = AsciiCaseSensitive();
33 const AsciiCaseInsensitive asciiCaseInsensitive = AsciiCaseInsensitive();
35 std::ostream& operator<<(std::ostream& os, const StringPiece piece) {
36 os.write(piece.start(), piece.size());
40 std::ostream& operator<<(std::ostream& os, const MutableStringPiece piece) {
41 os.write(piece.start(), piece.size());
47 // It's okay if pages are bigger than this (as powers of two), but they should
49 constexpr size_t kMinPageSize = 4096;
50 static_assert(kMinPageSize >= 16,
51 "kMinPageSize must be at least SSE register size");
52 #define PAGE_FOR(addr) \
53 (reinterpret_cast<uintptr_t>(addr) / kMinPageSize)
56 // Earlier versions of GCC (for example, Clang on Mac OS X, which is based on
57 // GCC 4.2) do not have a full compliment of SSE builtins.
58 #if FOLLY_HAVE_EMMINTRIN_H && __GNUC_PREREQ(4, 6)
59 inline size_t nextAlignedIndex(const char* arr) {
60 auto firstPossible = reinterpret_cast<uintptr_t>(arr) + 1;
61 return 1 + // add 1 because the index starts at 'arr'
62 ((firstPossible + 15) & ~0xF) // round up to next multiple of 16
66 // build sse4.2-optimized version even if -msse4.2 is not passed to GCC
67 size_t qfind_first_byte_of_needles16(const StringPiece haystack,
68 const StringPiece needles)
69 __attribute__ ((__target__("sse4.2"), noinline))
70 FOLLY_DISABLE_ADDRESS_SANITIZER;
72 // helper method for case where needles.size() <= 16
73 size_t qfind_first_byte_of_needles16(const StringPiece haystack,
74 const StringPiece needles) {
75 DCHECK(!haystack.empty());
76 DCHECK(!needles.empty());
77 DCHECK_LE(needles.size(), 16);
78 if ((needles.size() <= 2 && haystack.size() >= 256) ||
79 // must bail if we can't even SSE-load a single segment of haystack
80 (haystack.size() < 16 &&
81 PAGE_FOR(haystack.end() - 1) != PAGE_FOR(haystack.data() + 15)) ||
82 // can't load needles into SSE register if it could cross page boundary
83 PAGE_FOR(needles.end() - 1) != PAGE_FOR(needles.data() + 15)) {
84 return detail::qfind_first_byte_of_nosse(haystack, needles);
87 auto arr2 = __builtin_ia32_loaddqu(needles.data());
88 // do an unaligned load for first block of haystack
89 auto arr1 = __builtin_ia32_loaddqu(haystack.data());
90 auto index = __builtin_ia32_pcmpestri128(arr2, needles.size(),
91 arr1, haystack.size(), 0);
96 // Now, we can do aligned loads hereafter...
97 size_t i = nextAlignedIndex(haystack.data());
98 for (; i < haystack.size(); i+= 16) {
99 void* ptr1 = __builtin_assume_aligned(haystack.data() + i, 16);
100 auto arr1 = *reinterpret_cast<const __v16qi*>(ptr1);
101 auto index = __builtin_ia32_pcmpestri128(arr2, needles.size(),
102 arr1, haystack.size() - i, 0);
107 return StringPiece::npos;
109 #endif // FOLLY_HAVE_EMMINTRIN_H && GCC 4.6+
111 // Aho, Hopcroft, and Ullman refer to this trick in "The Design and Analysis
112 // of Computer Algorithms" (1974), but the best description is here:
113 // http://research.swtch.com/sparse
116 FastByteSet() : size_(0) { } // no init of arrays required!
118 inline void add(uint8_t i) {
125 inline bool contains(uint8_t i) const {
126 DCHECK_LE(size_, 256);
127 return sparse_[i] < size_ && dense_[sparse_[i]] == i;
131 uint16_t size_; // can't use uint8_t because it would overflow if all
132 // possible values were inserted.
133 uint8_t sparse_[256];
141 size_t qfind_first_byte_of_byteset(const StringPiece haystack,
142 const StringPiece needles) {
144 for (auto needle: needles) {
147 for (size_t index = 0; index < haystack.size(); ++index) {
148 if (s.contains(haystack[index])) {
152 return StringPiece::npos;
155 #if FOLLY_HAVE_EMMINTRIN_H && __GNUC_PREREQ(4, 6)
157 template <bool HAYSTACK_ALIGNED>
158 size_t scanHaystackBlock(const StringPiece haystack,
159 const StringPiece needles,
161 // inline is okay because it's only called from other sse4.2 functions
162 __attribute__ ((__target__("sse4.2")))
163 // Turn off ASAN because the "arr2 = ..." assignment in the loop below reads
164 // up to 15 bytes beyond end of the buffer in #needles#. That is ok because
165 // ptr2 is always 16-byte aligned, so the read can never span a page boundary.
166 // Also, the extra data that may be read is never actually used.
167 FOLLY_DISABLE_ADDRESS_SANITIZER;
169 // Scans a 16-byte block of haystack (starting at blockStartIdx) to find first
170 // needle. If HAYSTACK_ALIGNED, then haystack must be 16byte aligned.
171 // If !HAYSTACK_ALIGNED, then caller must ensure that it is safe to load the
173 template <bool HAYSTACK_ALIGNED>
174 size_t scanHaystackBlock(const StringPiece haystack,
175 const StringPiece needles,
176 uint64_t blockStartIdx) {
177 DCHECK_GT(needles.size(), 16); // should handled by *needles16() method
178 DCHECK(blockStartIdx + 16 <= haystack.size() ||
179 (PAGE_FOR(haystack.data() + blockStartIdx) ==
180 PAGE_FOR(haystack.data() + blockStartIdx + 15)));
183 if (HAYSTACK_ALIGNED) {
184 void* ptr1 = __builtin_assume_aligned(haystack.data() + blockStartIdx, 16);
185 arr1 = *reinterpret_cast<const __v16qi*>(ptr1);
187 arr1 = __builtin_ia32_loaddqu(haystack.data() + blockStartIdx);
190 // This load is safe because needles.size() >= 16
191 auto arr2 = __builtin_ia32_loaddqu(needles.data());
192 size_t b = __builtin_ia32_pcmpestri128(
193 arr2, 16, arr1, haystack.size() - blockStartIdx, 0);
195 size_t j = nextAlignedIndex(needles.data());
196 for (; j < needles.size(); j += 16) {
197 void* ptr2 = __builtin_assume_aligned(needles.data() + j, 16);
198 arr2 = *reinterpret_cast<const __v16qi*>(ptr2);
200 auto index = __builtin_ia32_pcmpestri128(
201 arr2, needles.size() - j, arr1, haystack.size() - blockStartIdx, 0);
202 b = std::min<size_t>(index, b);
206 return blockStartIdx + b;
208 return StringPiece::npos;
211 size_t qfind_first_byte_of_sse42(const StringPiece haystack,
212 const StringPiece needles)
213 __attribute__ ((__target__("sse4.2"), noinline));
215 size_t qfind_first_byte_of_sse42(const StringPiece haystack,
216 const StringPiece needles) {
217 if (UNLIKELY(needles.empty() || haystack.empty())) {
218 return StringPiece::npos;
219 } else if (needles.size() <= 16) {
220 // we can save some unnecessary load instructions by optimizing for
221 // the common case of needles.size() <= 16
222 return qfind_first_byte_of_needles16(haystack, needles);
225 if (haystack.size() < 16 &&
226 PAGE_FOR(haystack.end() - 1) != PAGE_FOR(haystack.data() + 16)) {
227 // We can't safely SSE-load haystack. Use a different approach.
228 if (haystack.size() <= 2) {
229 return qfind_first_of(haystack, needles, asciiCaseSensitive);
231 return qfind_first_byte_of_byteset(haystack, needles);
234 auto ret = scanHaystackBlock<false>(haystack, needles, 0);
235 if (ret != StringPiece::npos) {
239 size_t i = nextAlignedIndex(haystack.data());
240 for (; i < haystack.size(); i += 16) {
241 auto ret = scanHaystackBlock<true>(haystack, needles, i);
242 if (ret != StringPiece::npos) {
247 return StringPiece::npos;
249 #endif // FOLLY_HAVE_EMMINTRIN_H && GCC 4.6+
251 size_t qfind_first_byte_of_nosse(const StringPiece haystack,
252 const StringPiece needles) {
253 if (UNLIKELY(needles.empty() || haystack.empty())) {
254 return StringPiece::npos;
256 // The thresholds below were empirically determined by benchmarking.
257 // This is not an exact science since it depends on the CPU, the size of
258 // needles, and the size of haystack.
259 if ((needles.size() >= 4 && haystack.size() <= 10) ||
260 (needles.size() >= 16 && haystack.size() <= 64) ||
261 needles.size() >= 32) {
262 return qfind_first_byte_of_byteset(haystack, needles);
264 return qfind_first_of(haystack, needles, asciiCaseSensitive);
267 } // namespace detail