LCOV - code coverage report
Current view: top level - src/support - lockedpool.cpp (source / functions) Hit Total Coverage
Test: total_coverage.info Lines: 164 172 95.3 %
Date: 2020-09-26 01:30:44 Functions: 31 34 91.2 %

          Line data    Source code
       1             : // Copyright (c) 2016-2020 The Bitcoin Core developers
       2             : // Distributed under the MIT software license, see the accompanying
       3             : // file COPYING or http://www.opensource.org/licenses/mit-license.php.
       4             : 
       5             : #include <support/lockedpool.h>
       6             : #include <support/cleanse.h>
       7             : 
       8             : #if defined(HAVE_CONFIG_H)
       9             : #include <config/bitcoin-config.h>
      10             : #endif
      11             : 
      12             : #ifdef WIN32
      13             : #ifndef NOMINMAX
      14             : #define NOMINMAX
      15             : #endif
      16             : #include <windows.h>
      17             : #else
      18             : #include <sys/mman.h> // for mmap
      19             : #include <sys/resource.h> // for getrlimit
      20             : #include <limits.h> // for PAGESIZE
      21             : #include <unistd.h> // for sysconf
      22             : #endif
      23             : 
      24             : #include <algorithm>
      25             : #ifdef ARENA_DEBUG
      26             : #include <iomanip>
      27             : #include <iostream>
      28             : #endif
      29             : 
      30             : LockedPoolManager* LockedPoolManager::_instance = nullptr;
      31             : 
      32             : /*******************************************************************************/
      33             : // Utilities
      34             : //
      35             : /** Align up to power of 2 */
      36     1227464 : static inline size_t align_up(size_t x, size_t align)
      37             : {
      38     1227464 :     return (x + align - 1) & ~(align - 1);
      39             : }
      40             : 
      41             : /*******************************************************************************/
      42             : // Implementation: Arena
      43             : 
      44         683 : Arena::Arena(void *base_in, size_t size_in, size_t alignment_in):
      45         681 :     base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in)
      46         683 : {
      47             :     // Start with one free chunk that covers the entire arena
      48         681 :     auto it = size_to_free_chunk.emplace(size_in, base);
      49         681 :     chunks_free.emplace(base, it);
      50         681 :     chunks_free_end.emplace(base + size_in, it);
      51         683 : }
      52             : 
      53         683 : Arena::~Arena()
      54         683 : {
      55         683 : }
      56             : 
      57     1226112 : void* Arena::alloc(size_t size)
      58             : {
      59             :     // Round to next multiple of alignment
      60     1226112 :     size = align_up(size, alignment);
      61             : 
      62             :     // Don't handle zero-sized chunks
      63     1226112 :     if (size == 0)
      64           2 :         return nullptr;
      65             : 
      66             :     // Pick a large enough free-chunk. Returns an iterator pointing to the first element that is not less than key.
      67             :     // This allocation strategy is best-fit. According to "Dynamic Storage Allocation: A Survey and Critical Review",
      68             :     // Wilson et. al. 1995, http://www.scs.stanford.edu/14wi-cs140/sched/readings/wilson.pdf, best-fit and first-fit
      69             :     // policies seem to work well in practice.
      70     1226110 :     auto size_ptr_it = size_to_free_chunk.lower_bound(size);
      71     1226110 :     if (size_ptr_it == size_to_free_chunk.end())
      72         618 :         return nullptr;
      73             : 
      74             :     // Create the used-chunk, taking its space from the end of the free-chunk
      75     1225492 :     const size_t size_remaining = size_ptr_it->first - size;
      76     1225492 :     auto allocated = chunks_used.emplace(size_ptr_it->second + size_remaining, size).first;
      77     1225492 :     chunks_free_end.erase(size_ptr_it->second + size_ptr_it->first);
      78     1225492 :     if (size_ptr_it->first == size) {
      79             :         // whole chunk is used up
      80      821341 :         chunks_free.erase(size_ptr_it->second);
      81      821341 :     } else {
      82             :         // still some memory left in the chunk
      83      404151 :         auto it_remaining = size_to_free_chunk.emplace(size_remaining, size_ptr_it->second);
      84      404151 :         chunks_free[size_ptr_it->second] = it_remaining;
      85      404151 :         chunks_free_end.emplace(size_ptr_it->second + size_remaining, it_remaining);
      86      404151 :     }
      87     1225492 :     size_to_free_chunk.erase(size_ptr_it);
      88             : 
      89     1225492 :     return reinterpret_cast<void*>(allocated->first);
      90     1226112 : }
      91             : 
      92     1230923 : void Arena::free(void *ptr)
      93             : {
      94             :     // Freeing the nullptr pointer is OK.
      95     1230923 :     if (ptr == nullptr) {
      96             :         return;
      97             :     }
      98             : 
      99             :     // Remove chunk from used map
     100     1225494 :     auto i = chunks_used.find(static_cast<char*>(ptr));
     101     1225494 :     if (i == chunks_used.end()) {
     102           2 :         throw std::runtime_error("Arena: invalid or double free");
     103             :     }
     104     1225492 :     std::pair<char*, size_t> freed = *i;
     105     1225492 :     chunks_used.erase(i);
     106             : 
     107             :     // coalesce freed with previous chunk
     108     1225492 :     auto prev = chunks_free_end.find(freed.first);
     109     1225492 :     if (prev != chunks_free_end.end()) {
     110      379223 :         freed.first -= prev->second->first;
     111      379223 :         freed.second += prev->second->first;
     112      379223 :         size_to_free_chunk.erase(prev->second);
     113      379223 :         chunks_free_end.erase(prev);
     114      379223 :     }
     115             : 
     116             :     // coalesce freed with chunk after freed
     117     1225492 :     auto next = chunks_free.find(freed.first + freed.second);
     118     1225492 :     if (next != chunks_free.end()) {
     119       24928 :         freed.second += next->second->first;
     120       24928 :         size_to_free_chunk.erase(next->second);
     121       24928 :         chunks_free.erase(next);
     122       24928 :     }
     123             : 
     124             :     // Add/set space with coalesced free chunk
     125     1225492 :     auto it = size_to_free_chunk.emplace(freed.second, freed.first);
     126     1225492 :     chunks_free[freed.first] = it;
     127     1225492 :     chunks_free_end[freed.first + freed.second] = it;
     128     1230921 : }
     129             : 
     130          34 : Arena::Stats Arena::stats() const
     131             : {
     132          34 :     Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() };
     133        1081 :     for (const auto& chunk: chunks_used)
     134        1047 :         r.used += chunk.second;
     135          72 :     for (const auto& chunk: chunks_free)
     136          38 :         r.free += chunk.second->first;
     137          34 :     r.total = r.used + r.free;
     138          34 :     return r;
     139             : }
     140             : 
     141             : #ifdef ARENA_DEBUG
     142             : static void printchunk(void* base, size_t sz, bool used) {
     143             :     std::cout <<
     144             :         "0x" << std::hex << std::setw(16) << std::setfill('0') << base <<
     145             :         " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz <<
     146             :         " 0x" << used << std::endl;
     147             : }
     148             : void Arena::walk() const
     149             : {
     150             :     for (const auto& chunk: chunks_used)
     151             :         printchunk(chunk.first, chunk.second, true);
     152             :     std::cout << std::endl;
     153             :     for (const auto& chunk: chunks_free)
     154             :         printchunk(chunk.first, chunk.second->first, false);
     155             :     std::cout << std::endl;
     156             : }
     157             : #endif
     158             : 
     159             : /*******************************************************************************/
     160             : // Implementation: Win32LockedPageAllocator
     161             : 
     162             : #ifdef WIN32
     163             : /** LockedPageAllocator specialized for Windows.
     164             :  */
     165             : class Win32LockedPageAllocator: public LockedPageAllocator
     166             : {
     167             : public:
     168             :     Win32LockedPageAllocator();
     169             :     void* AllocateLocked(size_t len, bool *lockingSuccess) override;
     170             :     void FreeLocked(void* addr, size_t len) override;
     171             :     size_t GetLimit() override;
     172             : private:
     173             :     size_t page_size;
     174             : };
     175             : 
     176             : Win32LockedPageAllocator::Win32LockedPageAllocator()
     177             : {
     178             :     // Determine system page size in bytes
     179             :     SYSTEM_INFO sSysInfo;
     180             :     GetSystemInfo(&sSysInfo);
     181             :     page_size = sSysInfo.dwPageSize;
     182             : }
     183             : void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
     184             : {
     185             :     len = align_up(len, page_size);
     186             :     void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
     187             :     if (addr) {
     188             :         // VirtualLock is used to attempt to keep keying material out of swap. Note
     189             :         // that it does not provide this as a guarantee, but, in practice, memory
     190             :         // that has been VirtualLock'd almost never gets written to the pagefile
     191             :         // except in rare circumstances where memory is extremely low.
     192             :         *lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0;
     193             :     }
     194             :     return addr;
     195             : }
     196             : void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len)
     197             : {
     198             :     len = align_up(len, page_size);
     199             :     memory_cleanse(addr, len);
     200             :     VirtualUnlock(const_cast<void*>(addr), len);
     201             : }
     202             : 
     203             : size_t Win32LockedPageAllocator::GetLimit()
     204             : {
     205             :     // TODO is there a limit on Windows, how to get it?
     206             :     return std::numeric_limits<size_t>::max();
     207             : }
     208             : #endif
     209             : 
     210             : /*******************************************************************************/
     211             : // Implementation: PosixLockedPageAllocator
     212             : 
     213             : #ifndef WIN32
     214             : /** LockedPageAllocator specialized for OSes that don't try to be
     215             :  * special snowflakes.
     216             :  */
     217        2028 : class PosixLockedPageAllocator: public LockedPageAllocator
     218             : {
     219             : public:
     220             :     PosixLockedPageAllocator();
     221             :     void* AllocateLocked(size_t len, bool *lockingSuccess) override;
     222             :     void FreeLocked(void* addr, size_t len) override;
     223             :     size_t GetLimit() override;
     224             : private:
     225             :     size_t page_size;
     226             : };
     227             : 
     228        1352 : PosixLockedPageAllocator::PosixLockedPageAllocator()
     229        1352 : {
     230             :     // Determine system page size in bytes
     231             : #if defined(PAGESIZE) // defined in limits.h
     232             :     page_size = PAGESIZE;
     233             : #else                   // assume some POSIX OS
     234         676 :     page_size = sysconf(_SC_PAGESIZE);
     235             : #endif
     236        1352 : }
     237             : 
     238             : // Some systems (at least OS X) do not define MAP_ANONYMOUS yet and define
     239             : // MAP_ANON which is deprecated
     240             : #ifndef MAP_ANONYMOUS
     241             : #define MAP_ANONYMOUS MAP_ANON
     242             : #endif
     243             : 
     244         676 : void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess)
     245             : {
     246             :     void *addr;
     247         676 :     len = align_up(len, page_size);
     248         676 :     addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
     249         676 :     if (addr == MAP_FAILED) {
     250           0 :         return nullptr;
     251             :     }
     252         676 :     if (addr) {
     253         676 :         *lockingSuccess = mlock(addr, len) == 0;
     254             : #if defined(MADV_DONTDUMP) // Linux
     255             :         madvise(addr, len, MADV_DONTDUMP);
     256             : #elif defined(MADV_NOCORE) // FreeBSD
     257             :         madvise(addr, len, MADV_NOCORE);
     258             : #endif
     259         676 :     }
     260         676 :     return addr;
     261         676 : }
     262         676 : void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len)
     263             : {
     264         676 :     len = align_up(len, page_size);
     265         676 :     memory_cleanse(addr, len);
     266         676 :     munlock(addr, len);
     267         676 :     munmap(addr, len);
     268         676 : }
     269         676 : size_t PosixLockedPageAllocator::GetLimit()
     270             : {
     271             : #ifdef RLIMIT_MEMLOCK
     272         676 :     struct rlimit rlim;
     273         676 :     if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
     274         676 :         if (rlim.rlim_cur != RLIM_INFINITY) {
     275           0 :             return rlim.rlim_cur;
     276             :         }
     277             :     }
     278             : #endif
     279         676 :     return std::numeric_limits<size_t>::max();
     280         676 : }
     281             : #endif
     282             : 
     283             : /*******************************************************************************/
     284             : // Implementation: LockedPool
     285             : 
     286         678 : LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in):
     287         677 :     allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0)
     288           1 : {
     289         678 : }
     290             : 
     291         678 : LockedPool::~LockedPool()
     292           1 : {
     293         678 : }
     294     1220173 : void* LockedPool::alloc(size_t size)
     295             : {
     296     1220173 :     std::lock_guard<std::mutex> lock(mutex);
     297             : 
     298             :     // Don't handle impossible sizes
     299     1220173 :     if (size == 0 || size > ARENA_SIZE)
     300           2 :         return nullptr;
     301             : 
     302             :     // Try allocating from each current arena
     303     2439671 :     for (auto &arena: arenas) {
     304     1219500 :         void *addr = arena.alloc(size);
     305     1219500 :         if (addr) {
     306     1219491 :             return addr;
     307             :         }
     308          18 :     }
     309             :     // If that fails, create a new one
     310         680 :     if (new_arena(ARENA_SIZE, ARENA_ALIGN)) {
     311         679 :         return arenas.back().alloc(size);
     312             :     }
     313           1 :     return nullptr;
     314     1220173 : }
     315             : 
     316     1220171 : void LockedPool::free(void *ptr)
     317             : {
     318     1220171 :     std::lock_guard<std::mutex> lock(mutex);
     319             :     // TODO we can do better than this linear search by keeping a map of arena
     320             :     // extents to arena, and looking up the address.
     321     2440348 :     for (auto &arena: arenas) {
     322     1220177 :         if (arena.addressInArena(ptr)) {
     323     1220171 :             arena.free(ptr);
     324     1220170 :             return;
     325             :         }
     326           7 :     }
     327           0 :     throw std::runtime_error("LockedPool: invalid address not pointing to any arena");
     328     1220172 : }
     329             : 
     330          14 : LockedPool::Stats LockedPool::stats() const
     331             : {
     332          14 :     std::lock_guard<std::mutex> lock(mutex);
     333          14 :     LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0};
     334          28 :     for (const auto &arena: arenas) {
     335          14 :         Arena::Stats i = arena.stats();
     336          14 :         r.used += i.used;
     337          14 :         r.free += i.free;
     338          14 :         r.total += i.total;
     339          14 :         r.chunks_used += i.chunks_used;
     340          14 :         r.chunks_free += i.chunks_free;
     341          14 :     }
     342             :     return r;
     343          14 : }
     344             : 
     345         680 : bool LockedPool::new_arena(size_t size, size_t align)
     346             : {
     347         680 :     bool locked;
     348             :     // If this is the first arena, handle this specially: Cap the upper size
     349             :     // by the process limit. This makes sure that the first arena will at least
     350             :     // be locked. An exception to this is if the process limit is 0:
     351             :     // in this case no memory can be locked at all so we'll skip past this logic.
     352         680 :     if (arenas.empty()) {
     353         677 :         size_t limit = allocator->GetLimit();
     354         677 :         if (limit > 0) {
     355         677 :             size = std::min(size, limit);
     356         677 :         }
     357         677 :     }
     358         680 :     void *addr = allocator->AllocateLocked(size, &locked);
     359         680 :     if (!addr) {
     360           1 :         return false;
     361             :     }
     362         679 :     if (locked) {
     363         677 :         cumulative_bytes_locked += size;
     364         679 :     } else if (lf_cb) { // Call the locking-failed callback if locking failed
     365           0 :         if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed.
     366           0 :             allocator->FreeLocked(addr, size);
     367           0 :             return false;
     368             :         }
     369             :     }
     370         679 :     arenas.emplace_back(allocator.get(), addr, size, align);
     371         679 :     return true;
     372         680 : }
     373             : 
     374        1358 : LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in):
     375        1358 :     Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in)
     376        1358 : {
     377        1358 : }
     378        1358 : LockedPool::LockedPageArena::~LockedPageArena()
     379        1358 : {
     380         679 :     allocator->FreeLocked(base, size);
     381        1358 : }
     382             : 
     383             : /*******************************************************************************/
     384             : // Implementation: LockedPoolManager
     385             : //
     386        1352 : LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator_in):
     387         676 :     LockedPool(std::move(allocator_in), &LockedPoolManager::LockingFailed)
     388        1352 : {
     389        1352 : }
     390             : 
     391           0 : bool LockedPoolManager::LockingFailed()
     392             : {
     393             :     // TODO: log something but how? without including util.h
     394           0 :     return true;
     395             : }
     396             : 
     397         676 : void LockedPoolManager::CreateInstance()
     398             : {
     399             :     // Using a local static instance guarantees that the object is initialized
     400             :     // when it's first needed and also deinitialized after all objects that use
     401             :     // it are done with it.  I can think of one unlikely scenario where we may
     402             :     // have a static deinitialization order/problem, but the check in
     403             :     // LockedPoolManagerBase's destructor helps us detect if that ever happens.
     404             : #ifdef WIN32
     405             :     std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator());
     406             : #else
     407         676 :     std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator());
     408             : #endif
     409         676 :     static LockedPoolManager instance(std::move(allocator));
     410         676 :     LockedPoolManager::_instance = &instance;
     411         676 : }

Generated by: LCOV version 1.15