xref: /openbsd-src/gnu/llvm/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // Memory quarantine for AddressSanitizer and potentially other tools.
103cab2bb3Spatrick // Quarantine caches some specified amount of memory in per-thread caches,
113cab2bb3Spatrick // then evicts to global FIFO queue. When the queue reaches specified threshold,
123cab2bb3Spatrick // oldest memory is recycled.
133cab2bb3Spatrick //
143cab2bb3Spatrick //===----------------------------------------------------------------------===//
153cab2bb3Spatrick 
163cab2bb3Spatrick #ifndef SANITIZER_QUARANTINE_H
173cab2bb3Spatrick #define SANITIZER_QUARANTINE_H
183cab2bb3Spatrick 
193cab2bb3Spatrick #include "sanitizer_internal_defs.h"
203cab2bb3Spatrick #include "sanitizer_mutex.h"
213cab2bb3Spatrick #include "sanitizer_list.h"
223cab2bb3Spatrick 
233cab2bb3Spatrick namespace __sanitizer {
243cab2bb3Spatrick 
253cab2bb3Spatrick template<typename Node> class QuarantineCache;
263cab2bb3Spatrick 
273cab2bb3Spatrick struct QuarantineBatch {
283cab2bb3Spatrick   static const uptr kSize = 1021;
293cab2bb3Spatrick   QuarantineBatch *next;
303cab2bb3Spatrick   uptr size;
313cab2bb3Spatrick   uptr count;
323cab2bb3Spatrick   void *batch[kSize];
333cab2bb3Spatrick 
initQuarantineBatch343cab2bb3Spatrick   void init(void *ptr, uptr size) {
353cab2bb3Spatrick     count = 1;
363cab2bb3Spatrick     batch[0] = ptr;
373cab2bb3Spatrick     this->size = size + sizeof(QuarantineBatch);  // Account for the batch size.
383cab2bb3Spatrick   }
393cab2bb3Spatrick 
403cab2bb3Spatrick   // The total size of quarantined nodes recorded in this batch.
quarantined_sizeQuarantineBatch413cab2bb3Spatrick   uptr quarantined_size() const {
423cab2bb3Spatrick     return size - sizeof(QuarantineBatch);
433cab2bb3Spatrick   }
443cab2bb3Spatrick 
push_backQuarantineBatch453cab2bb3Spatrick   void push_back(void *ptr, uptr size) {
463cab2bb3Spatrick     CHECK_LT(count, kSize);
473cab2bb3Spatrick     batch[count++] = ptr;
483cab2bb3Spatrick     this->size += size;
493cab2bb3Spatrick   }
503cab2bb3Spatrick 
can_mergeQuarantineBatch513cab2bb3Spatrick   bool can_merge(const QuarantineBatch* const from) const {
523cab2bb3Spatrick     return count + from->count <= kSize;
533cab2bb3Spatrick   }
543cab2bb3Spatrick 
mergeQuarantineBatch553cab2bb3Spatrick   void merge(QuarantineBatch* const from) {
563cab2bb3Spatrick     CHECK_LE(count + from->count, kSize);
573cab2bb3Spatrick     CHECK_GE(size, sizeof(QuarantineBatch));
583cab2bb3Spatrick 
593cab2bb3Spatrick     for (uptr i = 0; i < from->count; ++i)
603cab2bb3Spatrick       batch[count + i] = from->batch[i];
613cab2bb3Spatrick     count += from->count;
623cab2bb3Spatrick     size += from->quarantined_size();
633cab2bb3Spatrick 
643cab2bb3Spatrick     from->count = 0;
653cab2bb3Spatrick     from->size = sizeof(QuarantineBatch);
663cab2bb3Spatrick   }
673cab2bb3Spatrick };
683cab2bb3Spatrick 
693cab2bb3Spatrick COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13));  // 8Kb.
703cab2bb3Spatrick 
713cab2bb3Spatrick // The callback interface is:
723cab2bb3Spatrick // void Callback::Recycle(Node *ptr);
733cab2bb3Spatrick // void *cb.Allocate(uptr size);
743cab2bb3Spatrick // void cb.Deallocate(void *ptr);
753cab2bb3Spatrick template<typename Callback, typename Node>
763cab2bb3Spatrick class Quarantine {
773cab2bb3Spatrick  public:
783cab2bb3Spatrick   typedef QuarantineCache<Callback> Cache;
793cab2bb3Spatrick 
Quarantine(LinkerInitialized)803cab2bb3Spatrick   explicit Quarantine(LinkerInitialized)
813cab2bb3Spatrick       : cache_(LINKER_INITIALIZED) {
823cab2bb3Spatrick   }
833cab2bb3Spatrick 
Init(uptr size,uptr cache_size)843cab2bb3Spatrick   void Init(uptr size, uptr cache_size) {
853cab2bb3Spatrick     // Thread local quarantine size can be zero only when global quarantine size
863cab2bb3Spatrick     // is zero (it allows us to perform just one atomic read per Put() call).
873cab2bb3Spatrick     CHECK((size == 0 && cache_size == 0) || cache_size != 0);
883cab2bb3Spatrick 
893cab2bb3Spatrick     atomic_store_relaxed(&max_size_, size);
903cab2bb3Spatrick     atomic_store_relaxed(&min_size_, size / 10 * 9);  // 90% of max size.
913cab2bb3Spatrick     atomic_store_relaxed(&max_cache_size_, cache_size);
923cab2bb3Spatrick 
933cab2bb3Spatrick     cache_mutex_.Init();
943cab2bb3Spatrick     recycle_mutex_.Init();
953cab2bb3Spatrick   }
963cab2bb3Spatrick 
GetSize()973cab2bb3Spatrick   uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
GetCacheSize()983cab2bb3Spatrick   uptr GetCacheSize() const {
993cab2bb3Spatrick     return atomic_load_relaxed(&max_cache_size_);
1003cab2bb3Spatrick   }
1013cab2bb3Spatrick 
Put(Cache * c,Callback cb,Node * ptr,uptr size)1023cab2bb3Spatrick   void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
1033cab2bb3Spatrick     uptr cache_size = GetCacheSize();
1043cab2bb3Spatrick     if (cache_size) {
1053cab2bb3Spatrick       c->Enqueue(cb, ptr, size);
1063cab2bb3Spatrick     } else {
1073cab2bb3Spatrick       // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
1083cab2bb3Spatrick       cb.Recycle(ptr);
1093cab2bb3Spatrick     }
1103cab2bb3Spatrick     // Check cache size anyway to accommodate for runtime cache_size change.
1113cab2bb3Spatrick     if (c->Size() > cache_size)
1123cab2bb3Spatrick       Drain(c, cb);
1133cab2bb3Spatrick   }
1143cab2bb3Spatrick 
Drain(Cache * c,Callback cb)1153cab2bb3Spatrick   void NOINLINE Drain(Cache *c, Callback cb) {
1163cab2bb3Spatrick     {
1173cab2bb3Spatrick       SpinMutexLock l(&cache_mutex_);
1183cab2bb3Spatrick       cache_.Transfer(c);
1193cab2bb3Spatrick     }
1203cab2bb3Spatrick     if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
1213cab2bb3Spatrick       Recycle(atomic_load_relaxed(&min_size_), cb);
1223cab2bb3Spatrick   }
1233cab2bb3Spatrick 
DrainAndRecycle(Cache * c,Callback cb)1243cab2bb3Spatrick   void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
1253cab2bb3Spatrick     {
1263cab2bb3Spatrick       SpinMutexLock l(&cache_mutex_);
1273cab2bb3Spatrick       cache_.Transfer(c);
1283cab2bb3Spatrick     }
1293cab2bb3Spatrick     recycle_mutex_.Lock();
1303cab2bb3Spatrick     Recycle(0, cb);
1313cab2bb3Spatrick   }
1323cab2bb3Spatrick 
PrintStats()1333cab2bb3Spatrick   void PrintStats() const {
1343cab2bb3Spatrick     // It assumes that the world is stopped, just as the allocator's PrintStats.
1353cab2bb3Spatrick     Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
1363cab2bb3Spatrick            GetSize() >> 20, GetCacheSize() >> 10);
1373cab2bb3Spatrick     cache_.PrintStats();
1383cab2bb3Spatrick   }
1393cab2bb3Spatrick 
1403cab2bb3Spatrick  private:
1413cab2bb3Spatrick   // Read-only data.
1423cab2bb3Spatrick   char pad0_[kCacheLineSize];
1433cab2bb3Spatrick   atomic_uintptr_t max_size_;
1443cab2bb3Spatrick   atomic_uintptr_t min_size_;
1453cab2bb3Spatrick   atomic_uintptr_t max_cache_size_;
1463cab2bb3Spatrick   char pad1_[kCacheLineSize];
1473cab2bb3Spatrick   StaticSpinMutex cache_mutex_;
1483cab2bb3Spatrick   StaticSpinMutex recycle_mutex_;
1493cab2bb3Spatrick   Cache cache_;
1503cab2bb3Spatrick   char pad2_[kCacheLineSize];
1513cab2bb3Spatrick 
Recycle(uptr min_size,Callback cb)152*810390e3Srobert   void NOINLINE Recycle(uptr min_size, Callback cb)
153*810390e3Srobert       SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) {
1543cab2bb3Spatrick     Cache tmp;
1553cab2bb3Spatrick     {
1563cab2bb3Spatrick       SpinMutexLock l(&cache_mutex_);
1573cab2bb3Spatrick       // Go over the batches and merge partially filled ones to
1583cab2bb3Spatrick       // save some memory, otherwise batches themselves (since the memory used
1593cab2bb3Spatrick       // by them is counted against quarantine limit) can overcome the actual
1603cab2bb3Spatrick       // user's quarantined chunks, which diminishes the purpose of the
1613cab2bb3Spatrick       // quarantine.
1623cab2bb3Spatrick       uptr cache_size = cache_.Size();
1633cab2bb3Spatrick       uptr overhead_size = cache_.OverheadSize();
1643cab2bb3Spatrick       CHECK_GE(cache_size, overhead_size);
1653cab2bb3Spatrick       // Do the merge only when overhead exceeds this predefined limit (might
1663cab2bb3Spatrick       // require some tuning). It saves us merge attempt when the batch list
1673cab2bb3Spatrick       // quarantine is unlikely to contain batches suitable for merge.
1683cab2bb3Spatrick       const uptr kOverheadThresholdPercents = 100;
1693cab2bb3Spatrick       if (cache_size > overhead_size &&
1703cab2bb3Spatrick           overhead_size * (100 + kOverheadThresholdPercents) >
1713cab2bb3Spatrick               cache_size * kOverheadThresholdPercents) {
1723cab2bb3Spatrick         cache_.MergeBatches(&tmp);
1733cab2bb3Spatrick       }
1743cab2bb3Spatrick       // Extract enough chunks from the quarantine to get below the max
1753cab2bb3Spatrick       // quarantine size and leave some leeway for the newly quarantined chunks.
1763cab2bb3Spatrick       while (cache_.Size() > min_size) {
1773cab2bb3Spatrick         tmp.EnqueueBatch(cache_.DequeueBatch());
1783cab2bb3Spatrick       }
1793cab2bb3Spatrick     }
1803cab2bb3Spatrick     recycle_mutex_.Unlock();
1813cab2bb3Spatrick     DoRecycle(&tmp, cb);
1823cab2bb3Spatrick   }
1833cab2bb3Spatrick 
DoRecycle(Cache * c,Callback cb)1843cab2bb3Spatrick   void NOINLINE DoRecycle(Cache *c, Callback cb) {
1853cab2bb3Spatrick     while (QuarantineBatch *b = c->DequeueBatch()) {
1863cab2bb3Spatrick       const uptr kPrefetch = 16;
1873cab2bb3Spatrick       CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
1883cab2bb3Spatrick       for (uptr i = 0; i < kPrefetch; i++)
1893cab2bb3Spatrick         PREFETCH(b->batch[i]);
1903cab2bb3Spatrick       for (uptr i = 0, count = b->count; i < count; i++) {
1913cab2bb3Spatrick         if (i + kPrefetch < count)
1923cab2bb3Spatrick           PREFETCH(b->batch[i + kPrefetch]);
1933cab2bb3Spatrick         cb.Recycle((Node*)b->batch[i]);
1943cab2bb3Spatrick       }
1953cab2bb3Spatrick       cb.Deallocate(b);
1963cab2bb3Spatrick     }
1973cab2bb3Spatrick   }
1983cab2bb3Spatrick };
1993cab2bb3Spatrick 
2003cab2bb3Spatrick // Per-thread cache of memory blocks.
2013cab2bb3Spatrick template<typename Callback>
2023cab2bb3Spatrick class QuarantineCache {
2033cab2bb3Spatrick  public:
QuarantineCache(LinkerInitialized)2043cab2bb3Spatrick   explicit QuarantineCache(LinkerInitialized) {
2053cab2bb3Spatrick   }
2063cab2bb3Spatrick 
QuarantineCache()2073cab2bb3Spatrick   QuarantineCache()
2083cab2bb3Spatrick       : size_() {
2093cab2bb3Spatrick     list_.clear();
2103cab2bb3Spatrick   }
2113cab2bb3Spatrick 
2123cab2bb3Spatrick   // Total memory used, including internal accounting.
Size()2133cab2bb3Spatrick   uptr Size() const {
2143cab2bb3Spatrick     return atomic_load_relaxed(&size_);
2153cab2bb3Spatrick   }
2163cab2bb3Spatrick 
2173cab2bb3Spatrick   // Memory used for internal accounting.
OverheadSize()2183cab2bb3Spatrick   uptr OverheadSize() const {
2193cab2bb3Spatrick     return list_.size() * sizeof(QuarantineBatch);
2203cab2bb3Spatrick   }
2213cab2bb3Spatrick 
Enqueue(Callback cb,void * ptr,uptr size)2223cab2bb3Spatrick   void Enqueue(Callback cb, void *ptr, uptr size) {
2233cab2bb3Spatrick     if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
2243cab2bb3Spatrick       QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
2253cab2bb3Spatrick       CHECK(b);
2263cab2bb3Spatrick       b->init(ptr, size);
2273cab2bb3Spatrick       EnqueueBatch(b);
2283cab2bb3Spatrick     } else {
2293cab2bb3Spatrick       list_.back()->push_back(ptr, size);
2303cab2bb3Spatrick       SizeAdd(size);
2313cab2bb3Spatrick     }
2323cab2bb3Spatrick   }
2333cab2bb3Spatrick 
Transfer(QuarantineCache * from_cache)2343cab2bb3Spatrick   void Transfer(QuarantineCache *from_cache) {
2353cab2bb3Spatrick     list_.append_back(&from_cache->list_);
2363cab2bb3Spatrick     SizeAdd(from_cache->Size());
2373cab2bb3Spatrick 
2383cab2bb3Spatrick     atomic_store_relaxed(&from_cache->size_, 0);
2393cab2bb3Spatrick   }
2403cab2bb3Spatrick 
EnqueueBatch(QuarantineBatch * b)2413cab2bb3Spatrick   void EnqueueBatch(QuarantineBatch *b) {
2423cab2bb3Spatrick     list_.push_back(b);
2433cab2bb3Spatrick     SizeAdd(b->size);
2443cab2bb3Spatrick   }
2453cab2bb3Spatrick 
DequeueBatch()2463cab2bb3Spatrick   QuarantineBatch *DequeueBatch() {
2473cab2bb3Spatrick     if (list_.empty())
2483cab2bb3Spatrick       return nullptr;
2493cab2bb3Spatrick     QuarantineBatch *b = list_.front();
2503cab2bb3Spatrick     list_.pop_front();
2513cab2bb3Spatrick     SizeSub(b->size);
2523cab2bb3Spatrick     return b;
2533cab2bb3Spatrick   }
2543cab2bb3Spatrick 
MergeBatches(QuarantineCache * to_deallocate)2553cab2bb3Spatrick   void MergeBatches(QuarantineCache *to_deallocate) {
2563cab2bb3Spatrick     uptr extracted_size = 0;
2573cab2bb3Spatrick     QuarantineBatch *current = list_.front();
2583cab2bb3Spatrick     while (current && current->next) {
2593cab2bb3Spatrick       if (current->can_merge(current->next)) {
2603cab2bb3Spatrick         QuarantineBatch *extracted = current->next;
2613cab2bb3Spatrick         // Move all the chunks into the current batch.
2623cab2bb3Spatrick         current->merge(extracted);
2633cab2bb3Spatrick         CHECK_EQ(extracted->count, 0);
2643cab2bb3Spatrick         CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
2653cab2bb3Spatrick         // Remove the next batch from the list and account for its size.
2663cab2bb3Spatrick         list_.extract(current, extracted);
2673cab2bb3Spatrick         extracted_size += extracted->size;
2683cab2bb3Spatrick         // Add it to deallocation list.
2693cab2bb3Spatrick         to_deallocate->EnqueueBatch(extracted);
2703cab2bb3Spatrick       } else {
2713cab2bb3Spatrick         current = current->next;
2723cab2bb3Spatrick       }
2733cab2bb3Spatrick     }
2743cab2bb3Spatrick     SizeSub(extracted_size);
2753cab2bb3Spatrick   }
2763cab2bb3Spatrick 
PrintStats()2773cab2bb3Spatrick   void PrintStats() const {
2783cab2bb3Spatrick     uptr batch_count = 0;
2793cab2bb3Spatrick     uptr total_overhead_bytes = 0;
2803cab2bb3Spatrick     uptr total_bytes = 0;
2813cab2bb3Spatrick     uptr total_quarantine_chunks = 0;
2823cab2bb3Spatrick     for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
2833cab2bb3Spatrick       batch_count++;
2843cab2bb3Spatrick       total_bytes += (*it).size;
2853cab2bb3Spatrick       total_overhead_bytes += (*it).size - (*it).quarantined_size();
2863cab2bb3Spatrick       total_quarantine_chunks += (*it).count;
2873cab2bb3Spatrick     }
2883cab2bb3Spatrick     uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
2893cab2bb3Spatrick     int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
2903cab2bb3Spatrick         0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
2913cab2bb3Spatrick     uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
2923cab2bb3Spatrick     int memory_overhead_percent = total_quarantined_bytes == 0 ?
2933cab2bb3Spatrick         0 : total_overhead_bytes * 100 / total_quarantined_bytes;
2943cab2bb3Spatrick     Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
2953cab2bb3Spatrick            "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
2963cab2bb3Spatrick            "\n",
2973cab2bb3Spatrick            batch_count, total_bytes, total_quarantined_bytes,
2983cab2bb3Spatrick            total_quarantine_chunks, quarantine_chunks_capacity,
2993cab2bb3Spatrick            chunks_usage_percent, memory_overhead_percent);
3003cab2bb3Spatrick   }
3013cab2bb3Spatrick 
3023cab2bb3Spatrick  private:
3033cab2bb3Spatrick   typedef IntrusiveList<QuarantineBatch> List;
3043cab2bb3Spatrick 
3053cab2bb3Spatrick   List list_;
3063cab2bb3Spatrick   atomic_uintptr_t size_;
3073cab2bb3Spatrick 
SizeAdd(uptr add)3083cab2bb3Spatrick   void SizeAdd(uptr add) {
3093cab2bb3Spatrick     atomic_store_relaxed(&size_, Size() + add);
3103cab2bb3Spatrick   }
SizeSub(uptr sub)3113cab2bb3Spatrick   void SizeSub(uptr sub) {
3123cab2bb3Spatrick     atomic_store_relaxed(&size_, Size() - sub);
3133cab2bb3Spatrick   }
3143cab2bb3Spatrick };
3153cab2bb3Spatrick 
3163cab2bb3Spatrick } // namespace __sanitizer
3173cab2bb3Spatrick 
3183cab2bb3Spatrick #endif // SANITIZER_QUARANTINE_H
319