1*d07c3cf6SSteven Wu //===- ThreadSafeAllocator.h ------------------------------------*- C++ -*-===// 2*d07c3cf6SSteven Wu // 3*d07c3cf6SSteven Wu // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*d07c3cf6SSteven Wu // See https://llvm.org/LICENSE.txt for license information. 5*d07c3cf6SSteven Wu // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*d07c3cf6SSteven Wu // 7*d07c3cf6SSteven Wu //===----------------------------------------------------------------------===// 8*d07c3cf6SSteven Wu 9*d07c3cf6SSteven Wu #ifndef LLVM_SUPPORT_THREADSAFEALLOCATOR_H 10*d07c3cf6SSteven Wu #define LLVM_SUPPORT_THREADSAFEALLOCATOR_H 11*d07c3cf6SSteven Wu 12*d07c3cf6SSteven Wu #include "llvm/ADT/STLExtras.h" 13*d07c3cf6SSteven Wu #include "llvm/Support/Allocator.h" 14*d07c3cf6SSteven Wu #include <atomic> 15*d07c3cf6SSteven Wu 16*d07c3cf6SSteven Wu namespace llvm { 17*d07c3cf6SSteven Wu 18*d07c3cf6SSteven Wu /// Thread-safe allocator adaptor. Uses a spin lock on the assumption that 19*d07c3cf6SSteven Wu /// contention here is extremely rare. 20*d07c3cf6SSteven Wu /// 21*d07c3cf6SSteven Wu /// TODO: Using a spin lock on every allocation can be quite expensive when 22*d07c3cf6SSteven Wu /// contention is high. Since this is mainly used for BumpPtrAllocator and 23*d07c3cf6SSteven Wu /// SpecificBumpPtrAllocator, it'd be better to have a specific thread-safe 24*d07c3cf6SSteven Wu /// BumpPtrAllocator implementation that only use a fair lock when allocating a 25*d07c3cf6SSteven Wu /// new slab but otherwise using atomic and be lock-free. 26*d07c3cf6SSteven Wu template <class AllocatorType> class ThreadSafeAllocator { 27*d07c3cf6SSteven Wu struct LockGuard { LockGuardLockGuard28*d07c3cf6SSteven Wu LockGuard(std::atomic_flag &Flag) : Flag(Flag) { 29*d07c3cf6SSteven Wu if (LLVM_UNLIKELY(Flag.test_and_set(std::memory_order_acquire))) 30*d07c3cf6SSteven Wu while (Flag.test_and_set(std::memory_order_acquire)) { 31*d07c3cf6SSteven Wu } 32*d07c3cf6SSteven Wu } ~LockGuardLockGuard33*d07c3cf6SSteven Wu ~LockGuard() { Flag.clear(std::memory_order_release); } 34*d07c3cf6SSteven Wu std::atomic_flag &Flag; 35*d07c3cf6SSteven Wu }; 36*d07c3cf6SSteven Wu 37*d07c3cf6SSteven Wu public: Allocate(size_t N)38*d07c3cf6SSteven Wu auto Allocate(size_t N) { 39*d07c3cf6SSteven Wu return applyLocked([N](AllocatorType &Alloc) { return Alloc.Allocate(N); }); 40*d07c3cf6SSteven Wu } 41*d07c3cf6SSteven Wu Allocate(size_t Size,size_t Align)42*d07c3cf6SSteven Wu auto Allocate(size_t Size, size_t Align) { 43*d07c3cf6SSteven Wu return applyLocked([Size, Align](AllocatorType &Alloc) { 44*d07c3cf6SSteven Wu return Alloc.Allocate(Size, Align); 45*d07c3cf6SSteven Wu }); 46*d07c3cf6SSteven Wu } 47*d07c3cf6SSteven Wu 48*d07c3cf6SSteven Wu template <typename FnT, 49*d07c3cf6SSteven Wu typename T = typename llvm::function_traits<FnT>::result_t> applyLocked(FnT Fn)50*d07c3cf6SSteven Wu T applyLocked(FnT Fn) { 51*d07c3cf6SSteven Wu LockGuard Lock(Flag); 52*d07c3cf6SSteven Wu return Fn(Alloc); 53*d07c3cf6SSteven Wu } 54*d07c3cf6SSteven Wu 55*d07c3cf6SSteven Wu private: 56*d07c3cf6SSteven Wu AllocatorType Alloc; 57*d07c3cf6SSteven Wu std::atomic_flag Flag = ATOMIC_FLAG_INIT; 58*d07c3cf6SSteven Wu }; 59*d07c3cf6SSteven Wu 60*d07c3cf6SSteven Wu } // namespace llvm 61*d07c3cf6SSteven Wu 62*d07c3cf6SSteven Wu #endif // LLVM_SUPPORT_THREADSAFEALLOCATOR_H 63