Bitcoin Core 22.99.0
P2P Digital Currency
lockedpool.cpp
Go to the documentation of this file.
1// Copyright (c) 2016-2020 The Bitcoin Core developers
2// Distributed under the MIT software license, see the accompanying
3// file COPYING or http://www.opensource.org/licenses/mit-license.php.
4
6#include <support/cleanse.h>
7
8#if defined(HAVE_CONFIG_H)
10#endif
11
12#ifdef WIN32
13#ifndef NOMINMAX
14#define NOMINMAX
15#endif
16#include <windows.h>
17#else
18#include <sys/mman.h> // for mmap
19#include <sys/resource.h> // for getrlimit
20#include <limits.h> // for PAGESIZE
21#include <unistd.h> // for sysconf
22#endif
23
24#include <algorithm>
25#ifdef ARENA_DEBUG
26#include <iomanip>
27#include <iostream>
28#endif
29
31
32/*******************************************************************************/
33// Utilities
34//
36static inline size_t align_up(size_t x, size_t align)
37{
38 return (x + align - 1) & ~(align - 1);
39}
40
41/*******************************************************************************/
42// Implementation: Arena
43
44Arena::Arena(void *base_in, size_t size_in, size_t alignment_in):
45 base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
46{
47 // Start with one free chunk that covers the entire arena
48 auto it = size_to_free_chunk.emplace(size_in, base);
49 chunks_free.emplace(base, it);
50 chunks_free_end.emplace(base + size_in, it);
51}
52
54{
55}
56
57void* Arena::alloc(size_t size)
58{
59 // Round to next multiple of alignment
60 size = align_up(size, alignment);
61
62 // Don't handle zero-sized chunks
63 if (size == 0)
64 return nullptr;
65
66 // Pick a large enough free-chunk. Returns an iterator pointing to the first element that is not less than key.
67 // This allocation strategy is best-fit. According to "Dynamic Storage Allocation: A Survey and Critical Review",
68 // Wilson et. al. 1995, https://www.scs.stanford.edu/14wi-cs140/sched/readings/wilson.pdf, best-fit and first-fit
69 // policies seem to work well in practice.
70 auto size_ptr_it = size_to_free_chunk.lower_bound(size);
71 if (size_ptr_it == size_to_free_chunk.end())
72 return nullptr;
73
74 // Create the used-chunk, taking its space from the end of the free-chunk
75 const size_t size_remaining = size_ptr_it->first - size;
76 auto allocated = chunks_used.emplace(size_ptr_it->second + size_remaining, size).first;
77 chunks_free_end.erase(size_ptr_it->second + size_ptr_it->first);
78 if (size_ptr_it->first == size) {
79 // whole chunk is used up
80 chunks_free.erase(size_ptr_it->second);
81 } else {
82 // still some memory left in the chunk
83 auto it_remaining = size_to_free_chunk.emplace(size_remaining, size_ptr_it->second);
84 chunks_free[size_ptr_it->second] = it_remaining;
85 chunks_free_end.emplace(size_ptr_it->second + size_remaining, it_remaining);
86 }
87 size_to_free_chunk.erase(size_ptr_it);
88
89 return reinterpret_cast<void*>(allocated->first);
90}
91
92void Arena::free(void *ptr)
93{
94 // Freeing the nullptr pointer is OK.
95 if (ptr == nullptr) {
96 return;
97 }
98
99 // Remove chunk from used map
100 auto i = chunks_used.find(static_cast<char*>(ptr));
101 if (i == chunks_used.end()) {
102 throw std::runtime_error("Arena: invalid or double free");
103 }
104 std::pair<char*, size_t> freed = *i;
105 chunks_used.erase(i);
106
107 // coalesce freed with previous chunk
108 auto prev = chunks_free_end.find(freed.first);
109 if (prev != chunks_free_end.end()) {
110 freed.first -= prev->second->first;
111 freed.second += prev->second->first;
112 size_to_free_chunk.erase(prev->second);
113 chunks_free_end.erase(prev);
114 }
115
116 // coalesce freed with chunk after freed
117 auto next = chunks_free.find(freed.first + freed.second);
118 if (next != chunks_free.end()) {
119 freed.second += next->second->first;
120 size_to_free_chunk.erase(next->second);
121 chunks_free.erase(next);
122 }
123
124 // Add/set space with coalesced free chunk
125 auto it = size_to_free_chunk.emplace(freed.second, freed.first);
126 chunks_free[freed.first] = it;
127 chunks_free_end[freed.first + freed.second] = it;
128}
129
131{
132 Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() };
133 for (const auto& chunk: chunks_used)
134 r.used += chunk.second;
135 for (const auto& chunk: chunks_free)
136 r.free += chunk.second->first;
137 r.total = r.used + r.free;
138 return r;
139}
140
141#ifdef ARENA_DEBUG
142static void printchunk(void* base, size_t sz, bool used) {
143 std::cout <<
144 "0x" << std::hex << std::setw(16) << std::setfill('0') << base <<
145 " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz <<
146 " 0x" << used << std::endl;
147}
148void Arena::walk() const
149{
150 for (const auto& chunk: chunks_used)
151 printchunk(chunk.first, chunk.second, true);
152 std::cout << std::endl;
153 for (const auto& chunk: chunks_free)
154 printchunk(chunk.first, chunk.second->first, false);
155 std::cout << std::endl;
156}
157#endif
158
159/*******************************************************************************/
160// Implementation: Win32LockedPageAllocator
161
162#ifdef WIN32
165class Win32LockedPageAllocator: public LockedPageAllocator
166{
167public:
168 Win32LockedPageAllocator();
169 void* AllocateLocked(size_t len, bool *lockingSuccess) override;
170 void FreeLocked(void* addr, size_t len) override;
171 size_t GetLimit() override;
172private:
173 size_t page_size;
174};
175
176Win32LockedPageAllocator::Win32LockedPageAllocator()
177{
178 // Determine system page size in bytes
179 SYSTEM_INFO sSysInfo;
180 GetSystemInfo(&sSysInfo);
181 page_size = sSysInfo.dwPageSize;
182}
183void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
184{
185 len = align_up(len, page_size);
186 void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
187 if (addr) {
188 // VirtualLock is used to attempt to keep keying material out of swap. Note
189 // that it does not provide this as a guarantee, but, in practice, memory
190 // that has been VirtualLock'd almost never gets written to the pagefile
191 // except in rare circumstances where memory is extremely low.
192 *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
193 }
194 return addr;
195}
196void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
197{
198 len = align_up(len, page_size);
199 memory_cleanse(addr, len);
200 VirtualUnlock(const_cast<void*>(addr), len);
201}
202
203size_t Win32LockedPageAllocator::GetLimit()
204{
205 // TODO is there a limit on Windows, how to get it?
206 return std::numeric_limits<size_t>::max();
207}
208#endif
209
210/*******************************************************************************/
211// Implementation: PosixLockedPageAllocator
212
213#ifndef WIN32
218{
219public:
221 void* AllocateLocked(size_t len, bool *lockingSuccess) override;
222 void FreeLocked(void* addr, size_t len) override;
223 size_t GetLimit() override;
224private:
225 size_t page_size;
226};
227
229{
230 // Determine system page size in bytes
231#if defined(PAGESIZE) // defined in limits.h
232 page_size = PAGESIZE;
233#else // assume some POSIX OS
234 page_size = sysconf(_SC_PAGESIZE);
235#endif
236}
237
238// Some systems (at least OS X) do not define MAP_ANONYMOUS yet and define
239// MAP_ANON which is deprecated
240#ifndef MAP_ANONYMOUS
241#define MAP_ANONYMOUS MAP_ANON
242#endif
243
244void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
245{
246 void *addr;
247 len = align_up(len, page_size);
248 addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
249 if (addr == MAP_FAILED) {
250 return nullptr;
251 }
252 if (addr) {
253 *lockingSuccess = mlock(addr, len) == 0;
254#if defined(MADV_DONTDUMP) // Linux
255 madvise(addr, len, MADV_DONTDUMP);
256#elif defined(MADV_NOCORE) // FreeBSD
257 madvise(addr, len, MADV_NOCORE);
258#endif
259 }
260 return addr;
261}
262void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len)
263{
264 len = align_up(len, page_size);
265 memory_cleanse(addr, len);
266 munlock(addr, len);
267 munmap(addr, len);
268}
270{
271#ifdef RLIMIT_MEMLOCK
272 struct rlimit rlim;
273 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
274 if (rlim.rlim_cur != RLIM_INFINITY) {
275 return rlim.rlim_cur;
276 }
277 }
278#endif
279 return std::numeric_limits<size_t>::max();
280}
281#endif
282
283/*******************************************************************************/
284// Implementation: LockedPool
285
286LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in):
287 allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
288{
289}
290
292{
293}
294void* LockedPool::alloc(size_t size)
295{
296 std::lock_guard<std::mutex> lock(mutex);
297
298 // Don't handle impossible sizes
299 if (size == 0 || size > ARENA_SIZE)
300 return nullptr;
301
302 // Try allocating from each current arena
303 for (auto &arena: arenas) {
304 void *addr = arena.alloc(size);
305 if (addr) {
306 return addr;
307 }
308 }
309 // If that fails, create a new one
311 return arenas.back().alloc(size);
312 }
313 return nullptr;
314}
315
316void LockedPool::free(void *ptr)
317{
318 std::lock_guard<std::mutex> lock(mutex);
319 // TODO we can do better than this linear search by keeping a map of arena
320 // extents to arena, and looking up the address.
321 for (auto &arena: arenas) {
322 if (arena.addressInArena(ptr)) {
323 arena.free(ptr);
324 return;
325 }
326 }
327 throw std::runtime_error("LockedPool: invalid address not pointing to any arena");
328}
329
331{
332 std::lock_guard<std::mutex> lock(mutex);
334 for (const auto &arena: arenas) {
335 Arena::Stats i = arena.stats();
336 r.used += i.used;
337 r.free += i.free;
338 r.total += i.total;
339 r.chunks_used += i.chunks_used;
340 r.chunks_free += i.chunks_free;
341 }
342 return r;
343}
344
345bool LockedPool::new_arena(size_t size, size_t align)
346{
347 bool locked;
348 // If this is the first arena, handle this specially: Cap the upper size
349 // by the process limit. This makes sure that the first arena will at least
350 // be locked. An exception to this is if the process limit is 0:
351 // in this case no memory can be locked at all so we'll skip past this logic.
352 if (arenas.empty()) {
353 size_t limit = allocator->GetLimit();
354 if (limit > 0) {
355 size = std::min(size, limit);
356 }
357 }
358 void *addr = allocator->AllocateLocked(size, &locked);
359 if (!addr) {
360 return false;
361 }
362 if (locked) {
364 } else if (lf_cb) { // Call the locking-failed callback if locking failed
365 if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed.
366 allocator->FreeLocked(addr, size);
367 return false;
368 }
369 }
370 arenas.emplace_back(allocator.get(), addr, size, align);
371 return true;
372}
373
374LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in):
375 Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
376{
377}
379{
380 allocator->FreeLocked(base, size);
381}
382
383/*******************************************************************************/
384// Implementation: LockedPoolManager
385//
386LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator_in):
387 LockedPool(std::move(allocator_in), &LockedPoolManager::LockingFailed)
388{
389}
390
392{
393 // TODO: log something but how? without including util.h
394 return true;
395}
396
398{
399 // Using a local static instance guarantees that the object is initialized
400 // when it's first needed and also deinitialized after all objects that use
401 // it are done with it. I can think of one unlikely scenario where we may
402 // have a static deinitialization order/problem, but the check in
403 // LockedPoolManagerBase's destructor helps us detect if that ever happens.
404#ifdef WIN32
405 std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator());
406#else
407 std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator());
408#endif
409 static LockedPoolManager instance(std::move(allocator));
411}
char * base
Base address of arena.
Definition: lockedpool.h:106
size_t alignment
Minimum chunk alignment.
Definition: lockedpool.h:110
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
Definition: lockedpool.h:100
void * alloc(size_t size)
Allocate size bytes from this arena.
Definition: lockedpool.cpp:57
std::unordered_map< char *, size_t > chunks_used
Map from begin of used chunk to its size.
Definition: lockedpool.h:103
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
Definition: lockedpool.h:94
Arena(void *base, size_t size, size_t alignment)
Definition: lockedpool.cpp:44
Stats stats() const
Get arena usage statistics.
Definition: lockedpool.cpp:130
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
Definition: lockedpool.h:98
virtual ~Arena()
Definition: lockedpool.cpp:53
void free(void *ptr)
Free a previously allocated chunk of memory.
Definition: lockedpool.cpp:92
OS-dependent allocation and deallocation of locked/pinned memory pages.
Definition: lockedpool.h:20
virtual void * AllocateLocked(size_t len, bool *lockingSuccess)=0
Allocate and lock memory pages.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
virtual size_t GetLimit()=0
Get the total limit on the amount of memory that may be locked by this process, in bytes.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
Definition: lockedpool.cpp:374
Pool for locked memory chunks.
Definition: lockedpool.h:127
void free(void *ptr)
Free a previously allocated chunk of memory.
Definition: lockedpool.cpp:316
Stats stats() const
Get pool usage statistics.
Definition: lockedpool.cpp:330
std::unique_ptr< LockedPageAllocator > allocator
Definition: lockedpool.h:183
void * alloc(size_t size)
Allocate size bytes from this arena.
Definition: lockedpool.cpp:294
size_t cumulative_bytes_locked
Definition: lockedpool.h:201
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
Definition: lockedpool.cpp:286
LockingFailed_Callback lf_cb
Definition: lockedpool.h:200
std::list< LockedPageArena > arenas
Definition: lockedpool.h:199
bool new_arena(size_t size, size_t align)
Definition: lockedpool.cpp:345
static const size_t ARENA_ALIGN
Chunk alignment.
Definition: lockedpool.h:138
static const size_t ARENA_SIZE
Size of one arena of locked memory.
Definition: lockedpool.h:134
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
Definition: lockedpool.h:204
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
Definition: lockedpool.h:219
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
Definition: lockedpool.cpp:386
static bool LockingFailed()
Called when locking fails, warn the user here.
Definition: lockedpool.cpp:391
static LockedPoolManager * _instance
Definition: lockedpool.h:237
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
Definition: lockedpool.cpp:397
LockedPageAllocator specialized for OSes that don't try to be special snowflakes.
Definition: lockedpool.cpp:218
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
Definition: lockedpool.cpp:244
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
Definition: lockedpool.cpp:262
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes.
Definition: lockedpool.cpp:269
void memory_cleanse(void *ptr, size_t len)
Secure overwrite a buffer (possibly containing secret data) with zero-bytes.
Definition: cleanse.cpp:14
Memory statistics.
Definition: lockedpool.h:59
size_t used
Definition: lockedpool.h:60
size_t chunks_used
Definition: lockedpool.h:63
size_t total
Definition: lockedpool.h:62
size_t free
Definition: lockedpool.h:61
size_t chunks_free
Definition: lockedpool.h:64
Memory statistics.
Definition: lockedpool.h:146
#define MAP_ANONYMOUS
Definition: lockedpool.cpp:241
static size_t align_up(size_t x, size_t align)
Align up to power of 2.
Definition: lockedpool.cpp:36