xref: /netbsd-src/external/gpl3/gcc/dist/libsanitizer/sanitizer_common/sanitizer_quarantine.h (revision 9616dacfef448e70e3fbbd865bddf60d54b656c5)
1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Memory quarantine for AddressSanitizer and potentially other tools.
9 // Quarantine caches some specified amount of memory in per-thread caches,
10 // then evicts to global FIFO queue. When the queue reaches specified threshold,
11 // oldest memory is recycled.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef SANITIZER_QUARANTINE_H
16 #define SANITIZER_QUARANTINE_H
17 
18 #include "sanitizer_internal_defs.h"
19 #include "sanitizer_mutex.h"
20 #include "sanitizer_list.h"
21 
22 namespace __sanitizer {
23 
24 template<typename Node> class QuarantineCache;
25 
26 struct QuarantineBatch {
27   static const uptr kSize = 1021;
28   QuarantineBatch *next;
29   uptr size;
30   uptr count;
31   void *batch[kSize];
32 };
33 
34 COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13));  // 8Kb.
35 
36 // The callback interface is:
37 // void Callback::Recycle(Node *ptr);
38 // void *cb.Allocate(uptr size);
39 // void cb.Deallocate(void *ptr);
40 template<typename Callback, typename Node>
41 class Quarantine {
42  public:
43   typedef QuarantineCache<Callback> Cache;
44 
45   explicit Quarantine(LinkerInitialized)
46       : cache_(LINKER_INITIALIZED) {
47   }
48 
49   void Init(uptr size, uptr cache_size) {
50     max_size_ = size;
51     min_size_ = size / 10 * 9;  // 90% of max size.
52     max_cache_size_ = cache_size;
53   }
54 
55   void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
56     c->Enqueue(cb, ptr, size);
57     if (c->Size() > max_cache_size_)
58       Drain(c, cb);
59   }
60 
61   void NOINLINE Drain(Cache *c, Callback cb) {
62     {
63       SpinMutexLock l(&cache_mutex_);
64       cache_.Transfer(c);
65     }
66     if (cache_.Size() > max_size_ && recycle_mutex_.TryLock())
67       Recycle(cb);
68   }
69 
70  private:
71   // Read-only data.
72   char pad0_[kCacheLineSize];
73   uptr max_size_;
74   uptr min_size_;
75   uptr max_cache_size_;
76   char pad1_[kCacheLineSize];
77   SpinMutex cache_mutex_;
78   SpinMutex recycle_mutex_;
79   Cache cache_;
80   char pad2_[kCacheLineSize];
81 
82   void NOINLINE Recycle(Callback cb) {
83     Cache tmp;
84     {
85       SpinMutexLock l(&cache_mutex_);
86       while (cache_.Size() > min_size_) {
87         QuarantineBatch *b = cache_.DequeueBatch();
88         tmp.EnqueueBatch(b);
89       }
90     }
91     recycle_mutex_.Unlock();
92     DoRecycle(&tmp, cb);
93   }
94 
95   void NOINLINE DoRecycle(Cache *c, Callback cb) {
96     while (QuarantineBatch *b = c->DequeueBatch()) {
97       const uptr kPrefetch = 16;
98       for (uptr i = 0; i < kPrefetch; i++)
99         PREFETCH(b->batch[i]);
100       for (uptr i = 0; i < b->count; i++) {
101         PREFETCH(b->batch[i + kPrefetch]);
102         cb.Recycle((Node*)b->batch[i]);
103       }
104       cb.Deallocate(b);
105     }
106   }
107 };
108 
109 // Per-thread cache of memory blocks.
110 template<typename Callback>
111 class QuarantineCache {
112  public:
113   explicit QuarantineCache(LinkerInitialized) {
114   }
115 
116   QuarantineCache()
117       : size_() {
118     list_.clear();
119   }
120 
121   uptr Size() const {
122     return atomic_load(&size_, memory_order_relaxed);
123   }
124 
125   void Enqueue(Callback cb, void *ptr, uptr size) {
126     if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
127       AllocBatch(cb);
128       size += sizeof(QuarantineBatch);  // Count the batch in Quarantine size.
129     }
130     QuarantineBatch *b = list_.back();
131     b->batch[b->count++] = ptr;
132     b->size += size;
133     SizeAdd(size);
134   }
135 
136   void Transfer(QuarantineCache *c) {
137     list_.append_back(&c->list_);
138     SizeAdd(c->Size());
139     atomic_store(&c->size_, 0, memory_order_relaxed);
140   }
141 
142   void EnqueueBatch(QuarantineBatch *b) {
143     list_.push_back(b);
144     SizeAdd(b->size);
145   }
146 
147   QuarantineBatch *DequeueBatch() {
148     if (list_.empty())
149       return 0;
150     QuarantineBatch *b = list_.front();
151     list_.pop_front();
152     SizeSub(b->size);
153     return b;
154   }
155 
156  private:
157   IntrusiveList<QuarantineBatch> list_;
158   atomic_uintptr_t size_;
159 
160   void SizeAdd(uptr add) {
161     atomic_store(&size_, Size() + add, memory_order_relaxed);
162   }
163   void SizeSub(uptr sub) {
164     atomic_store(&size_, Size() - sub, memory_order_relaxed);
165   }
166 
167   NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
168     QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
169     b->count = 0;
170     b->size = 0;
171     list_.push_back(b);
172     return b;
173   }
174 };
175 }  // namespace __sanitizer
176 
177 #endif  // #ifndef SANITIZER_QUARANTINE_H
178