13cab2bb3Spatrick //===-- xray_buffer_queue.cpp ----------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
9*810390e3Srobert // This file is a part of XRay, a dynamic runtime instrumentation system.
103cab2bb3Spatrick //
113cab2bb3Spatrick // Defines the interface for a buffer queue implementation.
123cab2bb3Spatrick //
133cab2bb3Spatrick //===----------------------------------------------------------------------===//
143cab2bb3Spatrick #include "xray_buffer_queue.h"
153cab2bb3Spatrick #include "sanitizer_common/sanitizer_atomic.h"
163cab2bb3Spatrick #include "sanitizer_common/sanitizer_common.h"
173cab2bb3Spatrick #include "sanitizer_common/sanitizer_libc.h"
183cab2bb3Spatrick #if !SANITIZER_FUCHSIA
193cab2bb3Spatrick #include "sanitizer_common/sanitizer_posix.h"
203cab2bb3Spatrick #endif
213cab2bb3Spatrick #include "xray_allocator.h"
223cab2bb3Spatrick #include "xray_defs.h"
233cab2bb3Spatrick #include <memory>
243cab2bb3Spatrick #include <sys/mman.h>
253cab2bb3Spatrick
263cab2bb3Spatrick using namespace __xray;
273cab2bb3Spatrick
283cab2bb3Spatrick namespace {
293cab2bb3Spatrick
allocControlBlock(size_t Size,size_t Count)303cab2bb3Spatrick BufferQueue::ControlBlock *allocControlBlock(size_t Size, size_t Count) {
313cab2bb3Spatrick auto B =
323cab2bb3Spatrick allocateBuffer((sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
333cab2bb3Spatrick return B == nullptr ? nullptr
343cab2bb3Spatrick : reinterpret_cast<BufferQueue::ControlBlock *>(B);
353cab2bb3Spatrick }
363cab2bb3Spatrick
deallocControlBlock(BufferQueue::ControlBlock * C,size_t Size,size_t Count)373cab2bb3Spatrick void deallocControlBlock(BufferQueue::ControlBlock *C, size_t Size,
383cab2bb3Spatrick size_t Count) {
393cab2bb3Spatrick deallocateBuffer(reinterpret_cast<unsigned char *>(C),
403cab2bb3Spatrick (sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
413cab2bb3Spatrick }
423cab2bb3Spatrick
decRefCount(BufferQueue::ControlBlock * C,size_t Size,size_t Count)433cab2bb3Spatrick void decRefCount(BufferQueue::ControlBlock *C, size_t Size, size_t Count) {
443cab2bb3Spatrick if (C == nullptr)
453cab2bb3Spatrick return;
463cab2bb3Spatrick if (atomic_fetch_sub(&C->RefCount, 1, memory_order_acq_rel) == 1)
473cab2bb3Spatrick deallocControlBlock(C, Size, Count);
483cab2bb3Spatrick }
493cab2bb3Spatrick
incRefCount(BufferQueue::ControlBlock * C)503cab2bb3Spatrick void incRefCount(BufferQueue::ControlBlock *C) {
513cab2bb3Spatrick if (C == nullptr)
523cab2bb3Spatrick return;
533cab2bb3Spatrick atomic_fetch_add(&C->RefCount, 1, memory_order_acq_rel);
543cab2bb3Spatrick }
553cab2bb3Spatrick
563cab2bb3Spatrick // We use a struct to ensure that we are allocating one atomic_uint64_t per
573cab2bb3Spatrick // cache line. This allows us to not worry about false-sharing among atomic
583cab2bb3Spatrick // objects being updated (constantly) by different threads.
593cab2bb3Spatrick struct ExtentsPadded {
603cab2bb3Spatrick union {
613cab2bb3Spatrick atomic_uint64_t Extents;
623cab2bb3Spatrick unsigned char Storage[kCacheLineSize];
633cab2bb3Spatrick };
643cab2bb3Spatrick };
653cab2bb3Spatrick
663cab2bb3Spatrick constexpr size_t kExtentsSize = sizeof(ExtentsPadded);
673cab2bb3Spatrick
683cab2bb3Spatrick } // namespace
693cab2bb3Spatrick
init(size_t BS,size_t BC)703cab2bb3Spatrick BufferQueue::ErrorCode BufferQueue::init(size_t BS, size_t BC) {
713cab2bb3Spatrick SpinMutexLock Guard(&Mutex);
723cab2bb3Spatrick
733cab2bb3Spatrick if (!finalizing())
743cab2bb3Spatrick return BufferQueue::ErrorCode::AlreadyInitialized;
753cab2bb3Spatrick
763cab2bb3Spatrick cleanupBuffers();
773cab2bb3Spatrick
783cab2bb3Spatrick bool Success = false;
793cab2bb3Spatrick BufferSize = BS;
803cab2bb3Spatrick BufferCount = BC;
813cab2bb3Spatrick
823cab2bb3Spatrick BackingStore = allocControlBlock(BufferSize, BufferCount);
833cab2bb3Spatrick if (BackingStore == nullptr)
843cab2bb3Spatrick return BufferQueue::ErrorCode::NotEnoughMemory;
853cab2bb3Spatrick
863cab2bb3Spatrick auto CleanupBackingStore = at_scope_exit([&, this] {
873cab2bb3Spatrick if (Success)
883cab2bb3Spatrick return;
893cab2bb3Spatrick deallocControlBlock(BackingStore, BufferSize, BufferCount);
903cab2bb3Spatrick BackingStore = nullptr;
913cab2bb3Spatrick });
923cab2bb3Spatrick
933cab2bb3Spatrick // Initialize enough atomic_uint64_t instances, each
943cab2bb3Spatrick ExtentsBackingStore = allocControlBlock(kExtentsSize, BufferCount);
953cab2bb3Spatrick if (ExtentsBackingStore == nullptr)
963cab2bb3Spatrick return BufferQueue::ErrorCode::NotEnoughMemory;
973cab2bb3Spatrick
983cab2bb3Spatrick auto CleanupExtentsBackingStore = at_scope_exit([&, this] {
993cab2bb3Spatrick if (Success)
1003cab2bb3Spatrick return;
1013cab2bb3Spatrick deallocControlBlock(ExtentsBackingStore, kExtentsSize, BufferCount);
1023cab2bb3Spatrick ExtentsBackingStore = nullptr;
1033cab2bb3Spatrick });
1043cab2bb3Spatrick
1053cab2bb3Spatrick Buffers = initArray<BufferRep>(BufferCount);
1063cab2bb3Spatrick if (Buffers == nullptr)
1073cab2bb3Spatrick return BufferQueue::ErrorCode::NotEnoughMemory;
1083cab2bb3Spatrick
1093cab2bb3Spatrick // At this point we increment the generation number to associate the buffers
1103cab2bb3Spatrick // to the new generation.
1113cab2bb3Spatrick atomic_fetch_add(&Generation, 1, memory_order_acq_rel);
1123cab2bb3Spatrick
1133cab2bb3Spatrick // First, we initialize the refcount in the ControlBlock, which we treat as
1143cab2bb3Spatrick // being at the start of the BackingStore pointer.
1153cab2bb3Spatrick atomic_store(&BackingStore->RefCount, 1, memory_order_release);
1163cab2bb3Spatrick atomic_store(&ExtentsBackingStore->RefCount, 1, memory_order_release);
1173cab2bb3Spatrick
1183cab2bb3Spatrick // Then we initialise the individual buffers that sub-divide the whole backing
1193cab2bb3Spatrick // store. Each buffer will start at the `Data` member of the ControlBlock, and
1203cab2bb3Spatrick // will be offsets from these locations.
1213cab2bb3Spatrick for (size_t i = 0; i < BufferCount; ++i) {
1223cab2bb3Spatrick auto &T = Buffers[i];
1233cab2bb3Spatrick auto &Buf = T.Buff;
1243cab2bb3Spatrick auto *E = reinterpret_cast<ExtentsPadded *>(&ExtentsBackingStore->Data +
1253cab2bb3Spatrick (kExtentsSize * i));
1263cab2bb3Spatrick Buf.Extents = &E->Extents;
1273cab2bb3Spatrick atomic_store(Buf.Extents, 0, memory_order_release);
1283cab2bb3Spatrick Buf.Generation = generation();
1293cab2bb3Spatrick Buf.Data = &BackingStore->Data + (BufferSize * i);
1303cab2bb3Spatrick Buf.Size = BufferSize;
1313cab2bb3Spatrick Buf.BackingStore = BackingStore;
1323cab2bb3Spatrick Buf.ExtentsBackingStore = ExtentsBackingStore;
1333cab2bb3Spatrick Buf.Count = BufferCount;
1343cab2bb3Spatrick T.Used = false;
1353cab2bb3Spatrick }
1363cab2bb3Spatrick
1373cab2bb3Spatrick Next = Buffers;
1383cab2bb3Spatrick First = Buffers;
1393cab2bb3Spatrick LiveBuffers = 0;
1403cab2bb3Spatrick atomic_store(&Finalizing, 0, memory_order_release);
1413cab2bb3Spatrick Success = true;
1423cab2bb3Spatrick return BufferQueue::ErrorCode::Ok;
1433cab2bb3Spatrick }
1443cab2bb3Spatrick
BufferQueue(size_t B,size_t N,bool & Success)1453cab2bb3Spatrick BufferQueue::BufferQueue(size_t B, size_t N,
1463cab2bb3Spatrick bool &Success) XRAY_NEVER_INSTRUMENT
1473cab2bb3Spatrick : BufferSize(B),
1483cab2bb3Spatrick BufferCount(N),
1493cab2bb3Spatrick Mutex(),
1503cab2bb3Spatrick Finalizing{1},
1513cab2bb3Spatrick BackingStore(nullptr),
1523cab2bb3Spatrick ExtentsBackingStore(nullptr),
1533cab2bb3Spatrick Buffers(nullptr),
1543cab2bb3Spatrick Next(Buffers),
1553cab2bb3Spatrick First(Buffers),
1563cab2bb3Spatrick LiveBuffers(0),
1573cab2bb3Spatrick Generation{0} {
1583cab2bb3Spatrick Success = init(B, N) == BufferQueue::ErrorCode::Ok;
1593cab2bb3Spatrick }
1603cab2bb3Spatrick
getBuffer(Buffer & Buf)1613cab2bb3Spatrick BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) {
1623cab2bb3Spatrick if (atomic_load(&Finalizing, memory_order_acquire))
1633cab2bb3Spatrick return ErrorCode::QueueFinalizing;
1643cab2bb3Spatrick
1653cab2bb3Spatrick BufferRep *B = nullptr;
1663cab2bb3Spatrick {
1673cab2bb3Spatrick SpinMutexLock Guard(&Mutex);
1683cab2bb3Spatrick if (LiveBuffers == BufferCount)
1693cab2bb3Spatrick return ErrorCode::NotEnoughMemory;
1703cab2bb3Spatrick B = Next++;
1713cab2bb3Spatrick if (Next == (Buffers + BufferCount))
1723cab2bb3Spatrick Next = Buffers;
1733cab2bb3Spatrick ++LiveBuffers;
1743cab2bb3Spatrick }
1753cab2bb3Spatrick
1763cab2bb3Spatrick incRefCount(BackingStore);
1773cab2bb3Spatrick incRefCount(ExtentsBackingStore);
1783cab2bb3Spatrick Buf = B->Buff;
1793cab2bb3Spatrick Buf.Generation = generation();
1803cab2bb3Spatrick B->Used = true;
1813cab2bb3Spatrick return ErrorCode::Ok;
1823cab2bb3Spatrick }
1833cab2bb3Spatrick
releaseBuffer(Buffer & Buf)1843cab2bb3Spatrick BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) {
1853cab2bb3Spatrick // Check whether the buffer being referred to is within the bounds of the
1863cab2bb3Spatrick // backing store's range.
1873cab2bb3Spatrick BufferRep *B = nullptr;
1883cab2bb3Spatrick {
1893cab2bb3Spatrick SpinMutexLock Guard(&Mutex);
1903cab2bb3Spatrick if (Buf.Generation != generation() || LiveBuffers == 0) {
1913cab2bb3Spatrick Buf = {};
1923cab2bb3Spatrick decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
1933cab2bb3Spatrick decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);
1943cab2bb3Spatrick return BufferQueue::ErrorCode::Ok;
1953cab2bb3Spatrick }
1963cab2bb3Spatrick
1973cab2bb3Spatrick if (Buf.Data < &BackingStore->Data ||
1983cab2bb3Spatrick Buf.Data > &BackingStore->Data + (BufferCount * BufferSize))
1993cab2bb3Spatrick return BufferQueue::ErrorCode::UnrecognizedBuffer;
2003cab2bb3Spatrick
2013cab2bb3Spatrick --LiveBuffers;
2023cab2bb3Spatrick B = First++;
2033cab2bb3Spatrick if (First == (Buffers + BufferCount))
2043cab2bb3Spatrick First = Buffers;
2053cab2bb3Spatrick }
2063cab2bb3Spatrick
2073cab2bb3Spatrick // Now that the buffer has been released, we mark it as "used".
2083cab2bb3Spatrick B->Buff = Buf;
2093cab2bb3Spatrick B->Used = true;
2103cab2bb3Spatrick decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
2113cab2bb3Spatrick decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);
2123cab2bb3Spatrick atomic_store(B->Buff.Extents, atomic_load(Buf.Extents, memory_order_acquire),
2133cab2bb3Spatrick memory_order_release);
2143cab2bb3Spatrick Buf = {};
2153cab2bb3Spatrick return ErrorCode::Ok;
2163cab2bb3Spatrick }
2173cab2bb3Spatrick
finalize()2183cab2bb3Spatrick BufferQueue::ErrorCode BufferQueue::finalize() {
2193cab2bb3Spatrick if (atomic_exchange(&Finalizing, 1, memory_order_acq_rel))
2203cab2bb3Spatrick return ErrorCode::QueueFinalizing;
2213cab2bb3Spatrick return ErrorCode::Ok;
2223cab2bb3Spatrick }
2233cab2bb3Spatrick
cleanupBuffers()2243cab2bb3Spatrick void BufferQueue::cleanupBuffers() {
2253cab2bb3Spatrick for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B)
2263cab2bb3Spatrick B->~BufferRep();
2273cab2bb3Spatrick deallocateBuffer(Buffers, BufferCount);
2283cab2bb3Spatrick decRefCount(BackingStore, BufferSize, BufferCount);
2293cab2bb3Spatrick decRefCount(ExtentsBackingStore, kExtentsSize, BufferCount);
2303cab2bb3Spatrick BackingStore = nullptr;
2313cab2bb3Spatrick ExtentsBackingStore = nullptr;
2323cab2bb3Spatrick Buffers = nullptr;
2333cab2bb3Spatrick BufferCount = 0;
2343cab2bb3Spatrick BufferSize = 0;
2353cab2bb3Spatrick }
2363cab2bb3Spatrick
~BufferQueue()2373cab2bb3Spatrick BufferQueue::~BufferQueue() { cleanupBuffers(); }
238