2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/concurrency/CacheLocality.h>
20 #define _GNU_SOURCE 1 // for RTLD_NOLOAD
25 #include <folly/Conv.h>
26 #include <folly/Exception.h>
27 #include <folly/FileUtil.h>
28 #include <folly/Format.h>
29 #include <folly/ScopeGuard.h>
33 ///////////// CacheLocality
35 /// Returns the best real CacheLocality information available
36 static CacheLocality getSystemLocalityInfo() {
39 return CacheLocality::readFromSysfs();
45 long numCpus = sysconf(_SC_NPROCESSORS_CONF);
47 // This shouldn't happen, but if it does we should try to keep
48 // going. We are probably not going to be able to parse /sys on
49 // this box either (although we will try), which means we are going
50 // to fall back to the SequentialThreadId splitter. On my 16 core
51 // (x hyperthreading) dev box 16 stripes is enough to get pretty good
52 // contention avoidance with SequentialThreadId, and there is little
53 // improvement from going from 32 to 64. This default gives us some
57 return CacheLocality::uniform(size_t(numCpus));
61 const CacheLocality& CacheLocality::system<std::atomic>() {
62 static auto* cache = new CacheLocality(getSystemLocalityInfo());
66 // Each level of cache has sharing sets, which are the set of cpus
67 // that share a common cache at that level. These are available in a
68 // hex bitset form (/sys/devices/system/cpu/cpu0/index0/shared_cpu_map,
69 // for example). They are also available in a human-readable list form,
70 // as in /sys/devices/system/cpu/cpu0/index0/shared_cpu_list. The list
71 // is a comma-separated list of numbers and ranges, where the ranges are
72 // a pair of decimal numbers separated by a '-'.
74 // To sort the cpus for optimum locality we don't really need to parse
75 // the sharing sets, we just need a unique representative from the
76 // equivalence class. The smallest value works fine, and happens to be
77 // the first decimal number in the file. We load all of the equivalence
78 // class information from all of the cpu*/index* directories, order the
79 // cpus first by increasing last-level cache equivalence class, then by
80 // the smaller caches. Finally, we break ties with the cpu number itself.
82 /// Returns the first decimal number in the string, or throws an exception
83 /// if the string does not start with a number terminated by ',', '-',
85 static size_t parseLeadingNumber(const std::string& line) {
86 auto raw = line.c_str();
88 unsigned long val = strtoul(raw, &end, 10);
89 if (end == raw || (*end != ',' && *end != '-' && *end != '\n' && *end != 0)) {
90 throw std::runtime_error(
91 to<std::string>("error parsing list '", line, "'").c_str());
96 CacheLocality CacheLocality::readFromSysfsTree(
97 const std::function<std::string(std::string)>& mapping) {
98 // number of equivalence classes per level
99 std::vector<size_t> numCachesByLevel;
101 // the list of cache equivalence classes, where equivalance classes
102 // are named by the smallest cpu in the class
103 std::vector<std::vector<size_t>> equivClassesByCpu;
105 std::vector<size_t> cpus;
108 auto cpu = cpus.size();
109 std::vector<size_t> levels;
110 for (size_t index = 0;; ++index) {
112 sformat("/sys/devices/system/cpu/cpu{}/cache/index{}/", cpu, index);
113 auto cacheType = mapping(dir + "type");
114 auto equivStr = mapping(dir + "shared_cpu_list");
115 if (cacheType.size() == 0 || equivStr.size() == 0) {
119 if (cacheType[0] == 'I') {
120 // cacheType in { "Data", "Instruction", "Unified" }. skip icache
123 auto equiv = parseLeadingNumber(equivStr);
124 auto level = levels.size();
125 levels.push_back(equiv);
128 // we only want to count the equiv classes once, so we do it when
129 // we first encounter them
130 while (numCachesByLevel.size() <= level) {
131 numCachesByLevel.push_back(0);
133 numCachesByLevel[level]++;
137 if (levels.size() == 0) {
138 // no levels at all for this cpu, we must be done
141 equivClassesByCpu.emplace_back(std::move(levels));
145 if (cpus.size() == 0) {
146 throw std::runtime_error("unable to load cache sharing info");
149 std::sort(cpus.begin(),
151 [&](size_t lhs, size_t rhs) -> bool {
152 // sort first by equiv class of cache with highest index,
153 // direction doesn't matter. If different cpus have
154 // different numbers of caches then this code might produce
155 // a sub-optimal ordering, but it won't crash
156 auto& lhsEquiv = equivClassesByCpu[lhs];
157 auto& rhsEquiv = equivClassesByCpu[rhs];
158 for (ssize_t i = ssize_t(std::min(lhsEquiv.size(), rhsEquiv.size())) - 1;
161 auto idx = size_t(i);
162 if (lhsEquiv[idx] != rhsEquiv[idx]) {
163 return lhsEquiv[idx] < rhsEquiv[idx];
167 // break ties deterministically by cpu
171 // the cpus are now sorted by locality, with neighboring entries closer
172 // to each other than entries that are far away. For striping we want
173 // the inverse map, since we are starting with the cpu
174 std::vector<size_t> indexes(cpus.size());
175 for (size_t i = 0; i < cpus.size(); ++i) {
176 indexes[cpus[i]] = i;
179 return CacheLocality{
180 cpus.size(), std::move(numCachesByLevel), std::move(indexes)};
183 CacheLocality CacheLocality::readFromSysfs() {
184 return readFromSysfsTree([](std::string name) {
185 std::ifstream xi(name.c_str());
187 std::getline(xi, rv);
192 CacheLocality CacheLocality::uniform(size_t numCpus) {
195 rv.numCpus = numCpus;
197 // one cache shared by all cpus
198 rv.numCachesByLevel.push_back(numCpus);
200 // no permutations in locality index mapping
201 for (size_t cpu = 0; cpu < numCpus; ++cpu) {
202 rv.localityIndexByCpu.push_back(cpu);
208 ////////////// Getcpu
210 Getcpu::Func Getcpu::resolveVdsoFunc() {
211 #if !FOLLY_HAVE_LINUX_VDSO
214 void* h = dlopen("linux-vdso.so.1", RTLD_LAZY | RTLD_LOCAL | RTLD_NOLOAD);
219 auto func = Getcpu::Func(dlsym(h, "__vdso_getcpu"));
220 if (func == nullptr) {
221 // technically a null result could either be a failure or a successful
222 // lookup of a symbol with the null value, but the second can't actually
223 // happen for this symbol. No point holding the handle forever if
224 // we don't need the code
233 /////////////// SequentialThreadId
234 template struct SequentialThreadId<std::atomic>;
237 /////////////// AccessSpreader
238 template struct AccessSpreader<std::atomic>;
240 SimpleAllocator::SimpleAllocator(size_t allocSize, size_t sz)
241 : allocSize_{allocSize}, sz_(sz) {}
243 SimpleAllocator::~SimpleAllocator() {
244 std::lock_guard<std::mutex> g(m_);
245 for (auto& block : blocks_) {
246 detail::aligned_free(block);
250 void* SimpleAllocator::allocateHard() {
251 // Allocate a new slab.
252 mem_ = static_cast<uint8_t*>(detail::aligned_malloc(allocSize_, allocSize_));
254 std::__throw_bad_alloc();
256 end_ = mem_ + allocSize_;
257 blocks_.push_back(mem_);
259 // Install a pointer to ourselves as the allocator.
260 *reinterpret_cast<SimpleAllocator**>(mem_) = this;
262 alignof(std::max_align_t) >= sizeof(SimpleAllocator*),
263 "alignment too small");
264 mem_ += std::min(sz_, alignof(std::max_align_t));
269 assert(intptr_t(mem) % 128 != 0);