1 //===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Memory quarantine for AddressSanitizer and potentially other tools. 11 // Quarantine caches some specified amount of memory in per-thread caches, 12 // then evicts to global FIFO queue. When the queue reaches specified threshold, 13 // oldest memory is recycled. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #ifndef SANITIZER_QUARANTINE_H 18 #define SANITIZER_QUARANTINE_H 19 20 #include "sanitizer_internal_defs.h" 21 #include "sanitizer_mutex.h" 22 #include "sanitizer_list.h" 23 24 namespace __sanitizer { 25 26 template<typename Node> class QuarantineCache; 27 28 struct QuarantineBatch { 29 static const uptr kSize = 1021; 30 QuarantineBatch *next; 31 uptr size; 32 uptr count; 33 void *batch[kSize]; 34 initQuarantineBatch35 void init(void *ptr, uptr size) { 36 count = 1; 37 batch[0] = ptr; 38 this->size = size + sizeof(QuarantineBatch); // Account for the batch size. 39 } 40 41 // The total size of quarantined nodes recorded in this batch. quarantined_sizeQuarantineBatch42 uptr quarantined_size() const { 43 return size - sizeof(QuarantineBatch); 44 } 45 push_backQuarantineBatch46 void push_back(void *ptr, uptr size) { 47 CHECK_LT(count, kSize); 48 batch[count++] = ptr; 49 this->size += size; 50 } 51 can_mergeQuarantineBatch52 bool can_merge(const QuarantineBatch* const from) const { 53 return count + from->count <= kSize; 54 } 55 mergeQuarantineBatch56 void merge(QuarantineBatch* const from) { 57 CHECK_LE(count + from->count, kSize); 58 CHECK_GE(size, sizeof(QuarantineBatch)); 59 60 for (uptr i = 0; i < from->count; ++i) 61 batch[count + i] = from->batch[i]; 62 count += from->count; 63 size += from->quarantined_size(); 64 65 from->count = 0; 66 from->size = sizeof(QuarantineBatch); 67 } 68 }; 69 70 COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb. 71 72 // The callback interface is: 73 // void Callback::Recycle(Node *ptr); 74 // void *cb.Allocate(uptr size); 75 // void cb.Deallocate(void *ptr); 76 template<typename Callback, typename Node> 77 class Quarantine { 78 public: 79 typedef QuarantineCache<Callback> Cache; 80 Quarantine(LinkerInitialized)81 explicit Quarantine(LinkerInitialized) 82 : cache_(LINKER_INITIALIZED) { 83 } 84 Init(uptr size,uptr cache_size)85 void Init(uptr size, uptr cache_size) { 86 // Thread local quarantine size can be zero only when global quarantine size 87 // is zero (it allows us to perform just one atomic read per Put() call). 88 CHECK((size == 0 && cache_size == 0) || cache_size != 0); 89 90 atomic_store_relaxed(&max_size_, size); 91 atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size. 92 atomic_store_relaxed(&max_cache_size_, cache_size); 93 94 cache_mutex_.Init(); 95 recycle_mutex_.Init(); 96 } 97 GetSize()98 uptr GetSize() const { return atomic_load_relaxed(&max_size_); } GetCacheSize()99 uptr GetCacheSize() const { 100 return atomic_load_relaxed(&max_cache_size_); 101 } 102 Put(Cache * c,Callback cb,Node * ptr,uptr size)103 void Put(Cache *c, Callback cb, Node *ptr, uptr size) { 104 uptr cache_size = GetCacheSize(); 105 if (cache_size) { 106 c->Enqueue(cb, ptr, size); 107 } else { 108 // GetCacheSize() == 0 only when GetSize() == 0 (see Init). 109 cb.Recycle(ptr); 110 } 111 // Check cache size anyway to accommodate for runtime cache_size change. 112 if (c->Size() > cache_size) 113 Drain(c, cb); 114 } 115 Drain(Cache * c,Callback cb)116 void NOINLINE Drain(Cache *c, Callback cb) { 117 { 118 SpinMutexLock l(&cache_mutex_); 119 cache_.Transfer(c); 120 } 121 if (cache_.Size() > GetSize() && recycle_mutex_.TryLock()) 122 Recycle(atomic_load_relaxed(&min_size_), cb); 123 } 124 DrainAndRecycle(Cache * c,Callback cb)125 void NOINLINE DrainAndRecycle(Cache *c, Callback cb) { 126 { 127 SpinMutexLock l(&cache_mutex_); 128 cache_.Transfer(c); 129 } 130 recycle_mutex_.Lock(); 131 Recycle(0, cb); 132 } 133 PrintStats()134 void PrintStats() const { 135 // It assumes that the world is stopped, just as the allocator's PrintStats. 136 Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n", 137 GetSize() >> 20, GetCacheSize() >> 10); 138 cache_.PrintStats(); 139 } 140 141 private: 142 // Read-only data. 143 char pad0_[kCacheLineSize]; 144 atomic_uintptr_t max_size_; 145 atomic_uintptr_t min_size_; 146 atomic_uintptr_t max_cache_size_; 147 char pad1_[kCacheLineSize]; 148 StaticSpinMutex cache_mutex_; 149 StaticSpinMutex recycle_mutex_; 150 Cache cache_; 151 char pad2_[kCacheLineSize]; 152 Recycle(uptr min_size,Callback cb)153 void NOINLINE Recycle(uptr min_size, Callback cb) { 154 Cache tmp; 155 { 156 SpinMutexLock l(&cache_mutex_); 157 // Go over the batches and merge partially filled ones to 158 // save some memory, otherwise batches themselves (since the memory used 159 // by them is counted against quarantine limit) can overcome the actual 160 // user's quarantined chunks, which diminishes the purpose of the 161 // quarantine. 162 uptr cache_size = cache_.Size(); 163 uptr overhead_size = cache_.OverheadSize(); 164 CHECK_GE(cache_size, overhead_size); 165 // Do the merge only when overhead exceeds this predefined limit (might 166 // require some tuning). It saves us merge attempt when the batch list 167 // quarantine is unlikely to contain batches suitable for merge. 168 const uptr kOverheadThresholdPercents = 100; 169 if (cache_size > overhead_size && 170 overhead_size * (100 + kOverheadThresholdPercents) > 171 cache_size * kOverheadThresholdPercents) { 172 cache_.MergeBatches(&tmp); 173 } 174 // Extract enough chunks from the quarantine to get below the max 175 // quarantine size and leave some leeway for the newly quarantined chunks. 176 while (cache_.Size() > min_size) { 177 tmp.EnqueueBatch(cache_.DequeueBatch()); 178 } 179 } 180 recycle_mutex_.Unlock(); 181 DoRecycle(&tmp, cb); 182 } 183 DoRecycle(Cache * c,Callback cb)184 void NOINLINE DoRecycle(Cache *c, Callback cb) { 185 while (QuarantineBatch *b = c->DequeueBatch()) { 186 const uptr kPrefetch = 16; 187 CHECK(kPrefetch <= ARRAY_SIZE(b->batch)); 188 for (uptr i = 0; i < kPrefetch; i++) 189 PREFETCH(b->batch[i]); 190 for (uptr i = 0, count = b->count; i < count; i++) { 191 if (i + kPrefetch < count) 192 PREFETCH(b->batch[i + kPrefetch]); 193 cb.Recycle((Node*)b->batch[i]); 194 } 195 cb.Deallocate(b); 196 } 197 } 198 }; 199 200 // Per-thread cache of memory blocks. 201 template<typename Callback> 202 class QuarantineCache { 203 public: QuarantineCache(LinkerInitialized)204 explicit QuarantineCache(LinkerInitialized) { 205 } 206 QuarantineCache()207 QuarantineCache() 208 : size_() { 209 list_.clear(); 210 } 211 212 // Total memory used, including internal accounting. Size()213 uptr Size() const { 214 return atomic_load_relaxed(&size_); 215 } 216 217 // Memory used for internal accounting. OverheadSize()218 uptr OverheadSize() const { 219 return list_.size() * sizeof(QuarantineBatch); 220 } 221 Enqueue(Callback cb,void * ptr,uptr size)222 void Enqueue(Callback cb, void *ptr, uptr size) { 223 if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) { 224 QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b)); 225 CHECK(b); 226 b->init(ptr, size); 227 EnqueueBatch(b); 228 } else { 229 list_.back()->push_back(ptr, size); 230 SizeAdd(size); 231 } 232 } 233 Transfer(QuarantineCache * from_cache)234 void Transfer(QuarantineCache *from_cache) { 235 list_.append_back(&from_cache->list_); 236 SizeAdd(from_cache->Size()); 237 238 atomic_store_relaxed(&from_cache->size_, 0); 239 } 240 EnqueueBatch(QuarantineBatch * b)241 void EnqueueBatch(QuarantineBatch *b) { 242 list_.push_back(b); 243 SizeAdd(b->size); 244 } 245 DequeueBatch()246 QuarantineBatch *DequeueBatch() { 247 if (list_.empty()) 248 return nullptr; 249 QuarantineBatch *b = list_.front(); 250 list_.pop_front(); 251 SizeSub(b->size); 252 return b; 253 } 254 MergeBatches(QuarantineCache * to_deallocate)255 void MergeBatches(QuarantineCache *to_deallocate) { 256 uptr extracted_size = 0; 257 QuarantineBatch *current = list_.front(); 258 while (current && current->next) { 259 if (current->can_merge(current->next)) { 260 QuarantineBatch *extracted = current->next; 261 // Move all the chunks into the current batch. 262 current->merge(extracted); 263 CHECK_EQ(extracted->count, 0); 264 CHECK_EQ(extracted->size, sizeof(QuarantineBatch)); 265 // Remove the next batch from the list and account for its size. 266 list_.extract(current, extracted); 267 extracted_size += extracted->size; 268 // Add it to deallocation list. 269 to_deallocate->EnqueueBatch(extracted); 270 } else { 271 current = current->next; 272 } 273 } 274 SizeSub(extracted_size); 275 } 276 PrintStats()277 void PrintStats() const { 278 uptr batch_count = 0; 279 uptr total_overhead_bytes = 0; 280 uptr total_bytes = 0; 281 uptr total_quarantine_chunks = 0; 282 for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) { 283 batch_count++; 284 total_bytes += (*it).size; 285 total_overhead_bytes += (*it).size - (*it).quarantined_size(); 286 total_quarantine_chunks += (*it).count; 287 } 288 uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize; 289 int chunks_usage_percent = quarantine_chunks_capacity == 0 ? 290 0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity; 291 uptr total_quarantined_bytes = total_bytes - total_overhead_bytes; 292 int memory_overhead_percent = total_quarantined_bytes == 0 ? 293 0 : total_overhead_bytes * 100 / total_quarantined_bytes; 294 Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); " 295 "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead" 296 "\n", 297 batch_count, total_bytes, total_quarantined_bytes, 298 total_quarantine_chunks, quarantine_chunks_capacity, 299 chunks_usage_percent, memory_overhead_percent); 300 } 301 302 private: 303 typedef IntrusiveList<QuarantineBatch> List; 304 305 List list_; 306 atomic_uintptr_t size_; 307 SizeAdd(uptr add)308 void SizeAdd(uptr add) { 309 atomic_store_relaxed(&size_, Size() + add); 310 } SizeSub(uptr sub)311 void SizeSub(uptr sub) { 312 atomic_store_relaxed(&size_, Size() - sub); 313 } 314 }; 315 316 } // namespace __sanitizer 317 318 #endif // SANITIZER_QUARANTINE_H 319