1*38fd1498Szrj // Allocator details. 2*38fd1498Szrj 3*38fd1498Szrj // Copyright (C) 2004-2018 Free Software Foundation, Inc. 4*38fd1498Szrj // 5*38fd1498Szrj // This file is part of the GNU ISO C++ Library. This library is free 6*38fd1498Szrj // software; you can redistribute it and/or modify it under the 7*38fd1498Szrj // terms of the GNU General Public License as published by the 8*38fd1498Szrj // Free Software Foundation; either version 3, or (at your option) 9*38fd1498Szrj // any later version. 10*38fd1498Szrj 11*38fd1498Szrj // This library is distributed in the hope that it will be useful, 12*38fd1498Szrj // but WITHOUT ANY WARRANTY; without even the implied warranty of 13*38fd1498Szrj // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14*38fd1498Szrj // GNU General Public License for more details. 15*38fd1498Szrj 16*38fd1498Szrj // Under Section 7 of GPL version 3, you are granted additional 17*38fd1498Szrj // permissions described in the GCC Runtime Library Exception, version 18*38fd1498Szrj // 3.1, as published by the Free Software Foundation. 19*38fd1498Szrj 20*38fd1498Szrj // You should have received a copy of the GNU General Public License and 21*38fd1498Szrj // a copy of the GCC Runtime Library Exception along with this program; 22*38fd1498Szrj // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 23*38fd1498Szrj // <http://www.gnu.org/licenses/>. 24*38fd1498Szrj 25*38fd1498Szrj // 26*38fd1498Szrj // ISO C++ 14882: 27*38fd1498Szrj // 28*38fd1498Szrj 29*38fd1498Szrj #include <bits/c++config.h> 30*38fd1498Szrj #include <ext/concurrence.h> 31*38fd1498Szrj #include <ext/mt_allocator.h> 32*38fd1498Szrj #include <cstring> 33*38fd1498Szrj 34*38fd1498Szrj // The include file is needed for uintptr_t. If this file does not compile, 35*38fd1498Szrj // check to make sure the target has <stdint.h> and that it provides 36*38fd1498Szrj // uintptr_t. 37*38fd1498Szrj #include <stdint.h> 38*38fd1498Szrj 39*38fd1498Szrj namespace 40*38fd1498Szrj { 41*38fd1498Szrj #ifdef __GTHREADS 42*38fd1498Szrj struct __freelist 43*38fd1498Szrj { 44*38fd1498Szrj typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record; 45*38fd1498Szrj _Thread_record* _M_thread_freelist; 46*38fd1498Szrj _Thread_record* _M_thread_freelist_array; 47*38fd1498Szrj size_t _M_max_threads; 48*38fd1498Szrj __gthread_key_t _M_key; 49*38fd1498Szrj ~__freelist__anon16eb4fc40111::__freelist50*38fd1498Szrj ~__freelist() 51*38fd1498Szrj { 52*38fd1498Szrj if (_M_thread_freelist_array) 53*38fd1498Szrj { 54*38fd1498Szrj __gthread_key_delete(_M_key); 55*38fd1498Szrj ::operator delete(static_cast<void*>(_M_thread_freelist_array)); 56*38fd1498Szrj _M_thread_freelist = 0; 57*38fd1498Szrj } 58*38fd1498Szrj } 59*38fd1498Szrj }; 60*38fd1498Szrj 61*38fd1498Szrj __freelist& get_freelist()62*38fd1498Szrj get_freelist() 63*38fd1498Szrj { 64*38fd1498Szrj static __freelist freelist; 65*38fd1498Szrj return freelist; 66*38fd1498Szrj } 67*38fd1498Szrj 68*38fd1498Szrj __gnu_cxx::__mutex& get_freelist_mutex()69*38fd1498Szrj get_freelist_mutex() 70*38fd1498Szrj { 71*38fd1498Szrj static __gnu_cxx::__mutex freelist_mutex; 72*38fd1498Szrj return freelist_mutex; 73*38fd1498Szrj } 74*38fd1498Szrj 75*38fd1498Szrj static void _M_destroy_thread_key(void * __id)76*38fd1498Szrj _M_destroy_thread_key(void* __id) 77*38fd1498Szrj { 78*38fd1498Szrj // Return this thread id record to the front of thread_freelist. 79*38fd1498Szrj __freelist& freelist = get_freelist(); 80*38fd1498Szrj { 81*38fd1498Szrj __gnu_cxx::__scoped_lock sentry(get_freelist_mutex()); 82*38fd1498Szrj uintptr_t _M_id = reinterpret_cast<uintptr_t>(__id); 83*38fd1498Szrj 84*38fd1498Szrj typedef __gnu_cxx::__pool<true>::_Thread_record _Thread_record; 85*38fd1498Szrj _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1]; 86*38fd1498Szrj __tr->_M_next = freelist._M_thread_freelist; 87*38fd1498Szrj freelist._M_thread_freelist = __tr; 88*38fd1498Szrj } 89*38fd1498Szrj } 90*38fd1498Szrj #endif 91*38fd1498Szrj } // anonymous namespace 92*38fd1498Szrj 93*38fd1498Szrj namespace __gnu_cxx _GLIBCXX_VISIBILITY(default) 94*38fd1498Szrj { 95*38fd1498Szrj _GLIBCXX_BEGIN_NAMESPACE_VERSION 96*38fd1498Szrj 97*38fd1498Szrj void _M_destroy()98*38fd1498Szrj __pool<false>::_M_destroy() throw() 99*38fd1498Szrj { 100*38fd1498Szrj if (_M_init && !_M_options._M_force_new) 101*38fd1498Szrj { 102*38fd1498Szrj for (size_t __n = 0; __n < _M_bin_size; ++__n) 103*38fd1498Szrj { 104*38fd1498Szrj _Bin_record& __bin = _M_bin[__n]; 105*38fd1498Szrj while (__bin._M_address) 106*38fd1498Szrj { 107*38fd1498Szrj _Block_address* __tmp = __bin._M_address->_M_next; 108*38fd1498Szrj ::operator delete(__bin._M_address->_M_initial); 109*38fd1498Szrj __bin._M_address = __tmp; 110*38fd1498Szrj } 111*38fd1498Szrj ::operator delete(__bin._M_first); 112*38fd1498Szrj } 113*38fd1498Szrj ::operator delete(_M_bin); 114*38fd1498Szrj ::operator delete(_M_binmap); 115*38fd1498Szrj } 116*38fd1498Szrj } 117*38fd1498Szrj 118*38fd1498Szrj void _M_reclaim_block(char * __p,size_t __bytes)119*38fd1498Szrj __pool<false>::_M_reclaim_block(char* __p, size_t __bytes) throw () 120*38fd1498Szrj { 121*38fd1498Szrj // Round up to power of 2 and figure out which bin to use. 122*38fd1498Szrj const size_t __which = _M_binmap[__bytes]; 123*38fd1498Szrj _Bin_record& __bin = _M_bin[__which]; 124*38fd1498Szrj 125*38fd1498Szrj char* __c = __p - _M_get_align(); 126*38fd1498Szrj _Block_record* __block = reinterpret_cast<_Block_record*>(__c); 127*38fd1498Szrj 128*38fd1498Szrj // Single threaded application - return to global pool. 129*38fd1498Szrj __block->_M_next = __bin._M_first[0]; 130*38fd1498Szrj __bin._M_first[0] = __block; 131*38fd1498Szrj } 132*38fd1498Szrj 133*38fd1498Szrj char* _M_reserve_block(size_t __bytes,const size_t __thread_id)134*38fd1498Szrj __pool<false>::_M_reserve_block(size_t __bytes, const size_t __thread_id) 135*38fd1498Szrj { 136*38fd1498Szrj // Round up to power of 2 and figure out which bin to use. 137*38fd1498Szrj const size_t __which = _M_binmap[__bytes]; 138*38fd1498Szrj _Bin_record& __bin = _M_bin[__which]; 139*38fd1498Szrj const _Tune& __options = _M_get_options(); 140*38fd1498Szrj const size_t __bin_size = (__options._M_min_bin << __which) 141*38fd1498Szrj + __options._M_align; 142*38fd1498Szrj size_t __block_count = __options._M_chunk_size - sizeof(_Block_address); 143*38fd1498Szrj __block_count /= __bin_size; 144*38fd1498Szrj 145*38fd1498Szrj // Get a new block dynamically, set it up for use. 146*38fd1498Szrj void* __v = ::operator new(__options._M_chunk_size); 147*38fd1498Szrj _Block_address* __address = static_cast<_Block_address*>(__v); 148*38fd1498Szrj __address->_M_initial = __v; 149*38fd1498Szrj __address->_M_next = __bin._M_address; 150*38fd1498Szrj __bin._M_address = __address; 151*38fd1498Szrj 152*38fd1498Szrj char* __c = static_cast<char*>(__v) + sizeof(_Block_address); 153*38fd1498Szrj _Block_record* __block = reinterpret_cast<_Block_record*>(__c); 154*38fd1498Szrj __bin._M_first[__thread_id] = __block; 155*38fd1498Szrj while (--__block_count > 0) 156*38fd1498Szrj { 157*38fd1498Szrj __c += __bin_size; 158*38fd1498Szrj __block->_M_next = reinterpret_cast<_Block_record*>(__c); 159*38fd1498Szrj __block = __block->_M_next; 160*38fd1498Szrj } 161*38fd1498Szrj __block->_M_next = 0; 162*38fd1498Szrj 163*38fd1498Szrj __block = __bin._M_first[__thread_id]; 164*38fd1498Szrj __bin._M_first[__thread_id] = __block->_M_next; 165*38fd1498Szrj 166*38fd1498Szrj // NB: For alignment reasons, we can't use the first _M_align 167*38fd1498Szrj // bytes, even when sizeof(_Block_record) < _M_align. 168*38fd1498Szrj return reinterpret_cast<char*>(__block) + __options._M_align; 169*38fd1498Szrj } 170*38fd1498Szrj 171*38fd1498Szrj void _M_initialize()172*38fd1498Szrj __pool<false>::_M_initialize() 173*38fd1498Szrj { 174*38fd1498Szrj // _M_force_new must not change after the first allocate(), which 175*38fd1498Szrj // in turn calls this method, so if it's false, it's false forever 176*38fd1498Szrj // and we don't need to return here ever again. 177*38fd1498Szrj if (_M_options._M_force_new) 178*38fd1498Szrj { 179*38fd1498Szrj _M_init = true; 180*38fd1498Szrj return; 181*38fd1498Szrj } 182*38fd1498Szrj 183*38fd1498Szrj // Create the bins. 184*38fd1498Szrj // Calculate the number of bins required based on _M_max_bytes. 185*38fd1498Szrj // _M_bin_size is statically-initialized to one. 186*38fd1498Szrj size_t __bin_size = _M_options._M_min_bin; 187*38fd1498Szrj while (_M_options._M_max_bytes > __bin_size) 188*38fd1498Szrj { 189*38fd1498Szrj __bin_size <<= 1; 190*38fd1498Szrj ++_M_bin_size; 191*38fd1498Szrj } 192*38fd1498Szrj 193*38fd1498Szrj // Setup the bin map for quick lookup of the relevant bin. 194*38fd1498Szrj const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type); 195*38fd1498Szrj _M_binmap = static_cast<_Binmap_type*>(::operator new(__j)); 196*38fd1498Szrj _Binmap_type* __bp = _M_binmap; 197*38fd1498Szrj _Binmap_type __bin_max = _M_options._M_min_bin; 198*38fd1498Szrj _Binmap_type __bint = 0; 199*38fd1498Szrj for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct) 200*38fd1498Szrj { 201*38fd1498Szrj if (__ct > __bin_max) 202*38fd1498Szrj { 203*38fd1498Szrj __bin_max <<= 1; 204*38fd1498Szrj ++__bint; 205*38fd1498Szrj } 206*38fd1498Szrj *__bp++ = __bint; 207*38fd1498Szrj } 208*38fd1498Szrj 209*38fd1498Szrj // Initialize _M_bin and its members. 210*38fd1498Szrj void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size); 211*38fd1498Szrj _M_bin = static_cast<_Bin_record*>(__v); 212*38fd1498Szrj for (size_t __n = 0; __n < _M_bin_size; ++__n) 213*38fd1498Szrj { 214*38fd1498Szrj _Bin_record& __bin = _M_bin[__n]; 215*38fd1498Szrj __v = ::operator new(sizeof(_Block_record*)); 216*38fd1498Szrj __bin._M_first = static_cast<_Block_record**>(__v); 217*38fd1498Szrj __bin._M_first[0] = 0; 218*38fd1498Szrj __bin._M_address = 0; 219*38fd1498Szrj } 220*38fd1498Szrj _M_init = true; 221*38fd1498Szrj } 222*38fd1498Szrj 223*38fd1498Szrj 224*38fd1498Szrj #ifdef __GTHREADS 225*38fd1498Szrj void _M_destroy()226*38fd1498Szrj __pool<true>::_M_destroy() throw() 227*38fd1498Szrj { 228*38fd1498Szrj if (_M_init && !_M_options._M_force_new) 229*38fd1498Szrj { 230*38fd1498Szrj if (__gthread_active_p()) 231*38fd1498Szrj { 232*38fd1498Szrj for (size_t __n = 0; __n < _M_bin_size; ++__n) 233*38fd1498Szrj { 234*38fd1498Szrj _Bin_record& __bin = _M_bin[__n]; 235*38fd1498Szrj while (__bin._M_address) 236*38fd1498Szrj { 237*38fd1498Szrj _Block_address* __tmp = __bin._M_address->_M_next; 238*38fd1498Szrj ::operator delete(__bin._M_address->_M_initial); 239*38fd1498Szrj __bin._M_address = __tmp; 240*38fd1498Szrj } 241*38fd1498Szrj ::operator delete(__bin._M_first); 242*38fd1498Szrj ::operator delete(__bin._M_free); 243*38fd1498Szrj ::operator delete(__bin._M_used); 244*38fd1498Szrj ::operator delete(__bin._M_mutex); 245*38fd1498Szrj } 246*38fd1498Szrj } 247*38fd1498Szrj else 248*38fd1498Szrj { 249*38fd1498Szrj for (size_t __n = 0; __n < _M_bin_size; ++__n) 250*38fd1498Szrj { 251*38fd1498Szrj _Bin_record& __bin = _M_bin[__n]; 252*38fd1498Szrj while (__bin._M_address) 253*38fd1498Szrj { 254*38fd1498Szrj _Block_address* __tmp = __bin._M_address->_M_next; 255*38fd1498Szrj ::operator delete(__bin._M_address->_M_initial); 256*38fd1498Szrj __bin._M_address = __tmp; 257*38fd1498Szrj } 258*38fd1498Szrj ::operator delete(__bin._M_first); 259*38fd1498Szrj } 260*38fd1498Szrj } 261*38fd1498Szrj ::operator delete(_M_bin); 262*38fd1498Szrj ::operator delete(_M_binmap); 263*38fd1498Szrj } 264*38fd1498Szrj } 265*38fd1498Szrj 266*38fd1498Szrj void _M_reclaim_block(char * __p,size_t __bytes)267*38fd1498Szrj __pool<true>::_M_reclaim_block(char* __p, size_t __bytes) throw () 268*38fd1498Szrj { 269*38fd1498Szrj // Round up to power of 2 and figure out which bin to use. 270*38fd1498Szrj const size_t __which = _M_binmap[__bytes]; 271*38fd1498Szrj const _Bin_record& __bin = _M_bin[__which]; 272*38fd1498Szrj 273*38fd1498Szrj // Know __p not null, assume valid block. 274*38fd1498Szrj char* __c = __p - _M_get_align(); 275*38fd1498Szrj _Block_record* __block = reinterpret_cast<_Block_record*>(__c); 276*38fd1498Szrj if (__gthread_active_p()) 277*38fd1498Szrj { 278*38fd1498Szrj // Calculate the number of records to remove from our freelist: 279*38fd1498Szrj // in order to avoid too much contention we wait until the 280*38fd1498Szrj // number of records is "high enough". 281*38fd1498Szrj const size_t __thread_id = _M_get_thread_id(); 282*38fd1498Szrj const _Tune& __options = _M_get_options(); 283*38fd1498Szrj const size_t __limit = (100 * (_M_bin_size - __which) 284*38fd1498Szrj * __options._M_freelist_headroom); 285*38fd1498Szrj 286*38fd1498Szrj size_t __remove = __bin._M_free[__thread_id]; 287*38fd1498Szrj __remove *= __options._M_freelist_headroom; 288*38fd1498Szrj 289*38fd1498Szrj // NB: We assume that reads of _Atomic_words are atomic. 290*38fd1498Szrj const size_t __max_threads = __options._M_max_threads + 1; 291*38fd1498Szrj _Atomic_word* const __reclaimed_base = 292*38fd1498Szrj reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads); 293*38fd1498Szrj const _Atomic_word __reclaimed = __reclaimed_base[__thread_id]; 294*38fd1498Szrj const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed; 295*38fd1498Szrj 296*38fd1498Szrj // NB: For performance sake we don't resync every time, in order 297*38fd1498Szrj // to spare atomic ops. Note that if __reclaimed increased by, 298*38fd1498Szrj // say, 1024, since the last sync, it means that the other 299*38fd1498Szrj // threads executed the atomic in the else below at least the 300*38fd1498Szrj // same number of times (at least, because _M_reserve_block may 301*38fd1498Szrj // have decreased the counter), therefore one more cannot hurt. 302*38fd1498Szrj if (__reclaimed > 1024) 303*38fd1498Szrj { 304*38fd1498Szrj __bin._M_used[__thread_id] -= __reclaimed; 305*38fd1498Szrj __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed); 306*38fd1498Szrj } 307*38fd1498Szrj 308*38fd1498Szrj if (__remove >= __net_used) 309*38fd1498Szrj __remove -= __net_used; 310*38fd1498Szrj else 311*38fd1498Szrj __remove = 0; 312*38fd1498Szrj if (__remove > __limit && __remove > __bin._M_free[__thread_id]) 313*38fd1498Szrj { 314*38fd1498Szrj _Block_record* __first = __bin._M_first[__thread_id]; 315*38fd1498Szrj _Block_record* __tmp = __first; 316*38fd1498Szrj __remove /= __options._M_freelist_headroom; 317*38fd1498Szrj const size_t __removed = __remove; 318*38fd1498Szrj while (--__remove > 0) 319*38fd1498Szrj __tmp = __tmp->_M_next; 320*38fd1498Szrj __bin._M_first[__thread_id] = __tmp->_M_next; 321*38fd1498Szrj __bin._M_free[__thread_id] -= __removed; 322*38fd1498Szrj 323*38fd1498Szrj __gthread_mutex_lock(__bin._M_mutex); 324*38fd1498Szrj __tmp->_M_next = __bin._M_first[0]; 325*38fd1498Szrj __bin._M_first[0] = __first; 326*38fd1498Szrj __bin._M_free[0] += __removed; 327*38fd1498Szrj __gthread_mutex_unlock(__bin._M_mutex); 328*38fd1498Szrj } 329*38fd1498Szrj 330*38fd1498Szrj // Return this block to our list and update counters and 331*38fd1498Szrj // owner id as needed. 332*38fd1498Szrj if (__block->_M_thread_id == __thread_id) 333*38fd1498Szrj --__bin._M_used[__thread_id]; 334*38fd1498Szrj else 335*38fd1498Szrj __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1); 336*38fd1498Szrj 337*38fd1498Szrj __block->_M_next = __bin._M_first[__thread_id]; 338*38fd1498Szrj __bin._M_first[__thread_id] = __block; 339*38fd1498Szrj 340*38fd1498Szrj ++__bin._M_free[__thread_id]; 341*38fd1498Szrj } 342*38fd1498Szrj else 343*38fd1498Szrj { 344*38fd1498Szrj // Not using threads, so single threaded application - return 345*38fd1498Szrj // to global pool. 346*38fd1498Szrj __block->_M_next = __bin._M_first[0]; 347*38fd1498Szrj __bin._M_first[0] = __block; 348*38fd1498Szrj } 349*38fd1498Szrj } 350*38fd1498Szrj 351*38fd1498Szrj char* _M_reserve_block(size_t __bytes,const size_t __thread_id)352*38fd1498Szrj __pool<true>::_M_reserve_block(size_t __bytes, const size_t __thread_id) 353*38fd1498Szrj { 354*38fd1498Szrj // Round up to power of 2 and figure out which bin to use. 355*38fd1498Szrj const size_t __which = _M_binmap[__bytes]; 356*38fd1498Szrj const _Tune& __options = _M_get_options(); 357*38fd1498Szrj const size_t __bin_size = ((__options._M_min_bin << __which) 358*38fd1498Szrj + __options._M_align); 359*38fd1498Szrj size_t __block_count = __options._M_chunk_size - sizeof(_Block_address); 360*38fd1498Szrj __block_count /= __bin_size; 361*38fd1498Szrj 362*38fd1498Szrj // Are we using threads? 363*38fd1498Szrj // - Yes, check if there are free blocks on the global 364*38fd1498Szrj // list. If so, grab up to __block_count blocks in one 365*38fd1498Szrj // lock and change ownership. If the global list is 366*38fd1498Szrj // empty, we allocate a new chunk and add those blocks 367*38fd1498Szrj // directly to our own freelist (with us as owner). 368*38fd1498Szrj // - No, all operations are made directly to global pool 0 369*38fd1498Szrj // no need to lock or change ownership but check for free 370*38fd1498Szrj // blocks on global list (and if not add new ones) and 371*38fd1498Szrj // get the first one. 372*38fd1498Szrj _Bin_record& __bin = _M_bin[__which]; 373*38fd1498Szrj _Block_record* __block = 0; 374*38fd1498Szrj if (__gthread_active_p()) 375*38fd1498Szrj { 376*38fd1498Szrj // Resync the _M_used counters. 377*38fd1498Szrj const size_t __max_threads = __options._M_max_threads + 1; 378*38fd1498Szrj _Atomic_word* const __reclaimed_base = 379*38fd1498Szrj reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads); 380*38fd1498Szrj const _Atomic_word __reclaimed = __reclaimed_base[__thread_id]; 381*38fd1498Szrj __bin._M_used[__thread_id] -= __reclaimed; 382*38fd1498Szrj __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed); 383*38fd1498Szrj 384*38fd1498Szrj __gthread_mutex_lock(__bin._M_mutex); 385*38fd1498Szrj if (__bin._M_first[0] == 0) 386*38fd1498Szrj { 387*38fd1498Szrj void* __v = ::operator new(__options._M_chunk_size); 388*38fd1498Szrj _Block_address* __address = static_cast<_Block_address*>(__v); 389*38fd1498Szrj __address->_M_initial = __v; 390*38fd1498Szrj __address->_M_next = __bin._M_address; 391*38fd1498Szrj __bin._M_address = __address; 392*38fd1498Szrj __gthread_mutex_unlock(__bin._M_mutex); 393*38fd1498Szrj 394*38fd1498Szrj // No need to hold the lock when we are adding a whole 395*38fd1498Szrj // chunk to our own list. 396*38fd1498Szrj char* __c = static_cast<char*>(__v) + sizeof(_Block_address); 397*38fd1498Szrj __block = reinterpret_cast<_Block_record*>(__c); 398*38fd1498Szrj __bin._M_free[__thread_id] = __block_count; 399*38fd1498Szrj __bin._M_first[__thread_id] = __block; 400*38fd1498Szrj while (--__block_count > 0) 401*38fd1498Szrj { 402*38fd1498Szrj __c += __bin_size; 403*38fd1498Szrj __block->_M_next = reinterpret_cast<_Block_record*>(__c); 404*38fd1498Szrj __block = __block->_M_next; 405*38fd1498Szrj } 406*38fd1498Szrj __block->_M_next = 0; 407*38fd1498Szrj } 408*38fd1498Szrj else 409*38fd1498Szrj { 410*38fd1498Szrj // Is the number of required blocks greater than or equal 411*38fd1498Szrj // to the number that can be provided by the global free 412*38fd1498Szrj // list? 413*38fd1498Szrj __bin._M_first[__thread_id] = __bin._M_first[0]; 414*38fd1498Szrj if (__block_count >= __bin._M_free[0]) 415*38fd1498Szrj { 416*38fd1498Szrj __bin._M_free[__thread_id] = __bin._M_free[0]; 417*38fd1498Szrj __bin._M_free[0] = 0; 418*38fd1498Szrj __bin._M_first[0] = 0; 419*38fd1498Szrj } 420*38fd1498Szrj else 421*38fd1498Szrj { 422*38fd1498Szrj __bin._M_free[__thread_id] = __block_count; 423*38fd1498Szrj __bin._M_free[0] -= __block_count; 424*38fd1498Szrj __block = __bin._M_first[0]; 425*38fd1498Szrj while (--__block_count > 0) 426*38fd1498Szrj __block = __block->_M_next; 427*38fd1498Szrj __bin._M_first[0] = __block->_M_next; 428*38fd1498Szrj __block->_M_next = 0; 429*38fd1498Szrj } 430*38fd1498Szrj __gthread_mutex_unlock(__bin._M_mutex); 431*38fd1498Szrj } 432*38fd1498Szrj } 433*38fd1498Szrj else 434*38fd1498Szrj { 435*38fd1498Szrj void* __v = ::operator new(__options._M_chunk_size); 436*38fd1498Szrj _Block_address* __address = static_cast<_Block_address*>(__v); 437*38fd1498Szrj __address->_M_initial = __v; 438*38fd1498Szrj __address->_M_next = __bin._M_address; 439*38fd1498Szrj __bin._M_address = __address; 440*38fd1498Szrj 441*38fd1498Szrj char* __c = static_cast<char*>(__v) + sizeof(_Block_address); 442*38fd1498Szrj __block = reinterpret_cast<_Block_record*>(__c); 443*38fd1498Szrj __bin._M_first[0] = __block; 444*38fd1498Szrj while (--__block_count > 0) 445*38fd1498Szrj { 446*38fd1498Szrj __c += __bin_size; 447*38fd1498Szrj __block->_M_next = reinterpret_cast<_Block_record*>(__c); 448*38fd1498Szrj __block = __block->_M_next; 449*38fd1498Szrj } 450*38fd1498Szrj __block->_M_next = 0; 451*38fd1498Szrj } 452*38fd1498Szrj 453*38fd1498Szrj __block = __bin._M_first[__thread_id]; 454*38fd1498Szrj __bin._M_first[__thread_id] = __block->_M_next; 455*38fd1498Szrj 456*38fd1498Szrj if (__gthread_active_p()) 457*38fd1498Szrj { 458*38fd1498Szrj __block->_M_thread_id = __thread_id; 459*38fd1498Szrj --__bin._M_free[__thread_id]; 460*38fd1498Szrj ++__bin._M_used[__thread_id]; 461*38fd1498Szrj } 462*38fd1498Szrj 463*38fd1498Szrj // NB: For alignment reasons, we can't use the first _M_align 464*38fd1498Szrj // bytes, even when sizeof(_Block_record) < _M_align. 465*38fd1498Szrj return reinterpret_cast<char*>(__block) + __options._M_align; 466*38fd1498Szrj } 467*38fd1498Szrj 468*38fd1498Szrj void _M_initialize()469*38fd1498Szrj __pool<true>::_M_initialize() 470*38fd1498Szrj { 471*38fd1498Szrj // _M_force_new must not change after the first allocate(), 472*38fd1498Szrj // which in turn calls this method, so if it's false, it's false 473*38fd1498Szrj // forever and we don't need to return here ever again. 474*38fd1498Szrj if (_M_options._M_force_new) 475*38fd1498Szrj { 476*38fd1498Szrj _M_init = true; 477*38fd1498Szrj return; 478*38fd1498Szrj } 479*38fd1498Szrj 480*38fd1498Szrj // Create the bins. 481*38fd1498Szrj // Calculate the number of bins required based on _M_max_bytes. 482*38fd1498Szrj // _M_bin_size is statically-initialized to one. 483*38fd1498Szrj size_t __bin_size = _M_options._M_min_bin; 484*38fd1498Szrj while (_M_options._M_max_bytes > __bin_size) 485*38fd1498Szrj { 486*38fd1498Szrj __bin_size <<= 1; 487*38fd1498Szrj ++_M_bin_size; 488*38fd1498Szrj } 489*38fd1498Szrj 490*38fd1498Szrj // Setup the bin map for quick lookup of the relevant bin. 491*38fd1498Szrj const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type); 492*38fd1498Szrj _M_binmap = static_cast<_Binmap_type*>(::operator new(__j)); 493*38fd1498Szrj _Binmap_type* __bp = _M_binmap; 494*38fd1498Szrj _Binmap_type __bin_max = _M_options._M_min_bin; 495*38fd1498Szrj _Binmap_type __bint = 0; 496*38fd1498Szrj for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct) 497*38fd1498Szrj { 498*38fd1498Szrj if (__ct > __bin_max) 499*38fd1498Szrj { 500*38fd1498Szrj __bin_max <<= 1; 501*38fd1498Szrj ++__bint; 502*38fd1498Szrj } 503*38fd1498Szrj *__bp++ = __bint; 504*38fd1498Szrj } 505*38fd1498Szrj 506*38fd1498Szrj // Initialize _M_bin and its members. 507*38fd1498Szrj void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size); 508*38fd1498Szrj _M_bin = static_cast<_Bin_record*>(__v); 509*38fd1498Szrj 510*38fd1498Szrj // If __gthread_active_p() create and initialize the list of 511*38fd1498Szrj // free thread ids. Single threaded applications use thread id 0 512*38fd1498Szrj // directly and have no need for this. 513*38fd1498Szrj if (__gthread_active_p()) 514*38fd1498Szrj { 515*38fd1498Szrj __freelist& freelist = get_freelist(); 516*38fd1498Szrj { 517*38fd1498Szrj __gnu_cxx::__scoped_lock sentry(get_freelist_mutex()); 518*38fd1498Szrj 519*38fd1498Szrj if (!freelist._M_thread_freelist_array 520*38fd1498Szrj || freelist._M_max_threads < _M_options._M_max_threads) 521*38fd1498Szrj { 522*38fd1498Szrj const size_t __k = sizeof(_Thread_record) 523*38fd1498Szrj * _M_options._M_max_threads; 524*38fd1498Szrj __v = ::operator new(__k); 525*38fd1498Szrj _M_thread_freelist = static_cast<_Thread_record*>(__v); 526*38fd1498Szrj 527*38fd1498Szrj // NOTE! The first assignable thread id is 1 since the 528*38fd1498Szrj // global pool uses id 0 529*38fd1498Szrj size_t __i; 530*38fd1498Szrj for (__i = 1; __i < _M_options._M_max_threads; ++__i) 531*38fd1498Szrj { 532*38fd1498Szrj _Thread_record& __tr = _M_thread_freelist[__i - 1]; 533*38fd1498Szrj __tr._M_next = &_M_thread_freelist[__i]; 534*38fd1498Szrj __tr._M_id = __i; 535*38fd1498Szrj } 536*38fd1498Szrj 537*38fd1498Szrj // Set last record. 538*38fd1498Szrj _M_thread_freelist[__i - 1]._M_next = 0; 539*38fd1498Szrj _M_thread_freelist[__i - 1]._M_id = __i; 540*38fd1498Szrj 541*38fd1498Szrj if (!freelist._M_thread_freelist_array) 542*38fd1498Szrj { 543*38fd1498Szrj // Initialize per thread key to hold pointer to 544*38fd1498Szrj // _M_thread_freelist. 545*38fd1498Szrj __gthread_key_create(&freelist._M_key, 546*38fd1498Szrj ::_M_destroy_thread_key); 547*38fd1498Szrj freelist._M_thread_freelist = _M_thread_freelist; 548*38fd1498Szrj } 549*38fd1498Szrj else 550*38fd1498Szrj { 551*38fd1498Szrj _Thread_record* _M_old_freelist 552*38fd1498Szrj = freelist._M_thread_freelist; 553*38fd1498Szrj _Thread_record* _M_old_array 554*38fd1498Szrj = freelist._M_thread_freelist_array; 555*38fd1498Szrj freelist._M_thread_freelist 556*38fd1498Szrj = &_M_thread_freelist[_M_old_freelist - _M_old_array]; 557*38fd1498Szrj while (_M_old_freelist) 558*38fd1498Szrj { 559*38fd1498Szrj size_t next_id; 560*38fd1498Szrj if (_M_old_freelist->_M_next) 561*38fd1498Szrj next_id = _M_old_freelist->_M_next - _M_old_array; 562*38fd1498Szrj else 563*38fd1498Szrj next_id = freelist._M_max_threads; 564*38fd1498Szrj _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next 565*38fd1498Szrj = &_M_thread_freelist[next_id]; 566*38fd1498Szrj _M_old_freelist = _M_old_freelist->_M_next; 567*38fd1498Szrj } 568*38fd1498Szrj ::operator delete(static_cast<void*>(_M_old_array)); 569*38fd1498Szrj } 570*38fd1498Szrj freelist._M_thread_freelist_array = _M_thread_freelist; 571*38fd1498Szrj freelist._M_max_threads = _M_options._M_max_threads; 572*38fd1498Szrj } 573*38fd1498Szrj } 574*38fd1498Szrj 575*38fd1498Szrj const size_t __max_threads = _M_options._M_max_threads + 1; 576*38fd1498Szrj for (size_t __n = 0; __n < _M_bin_size; ++__n) 577*38fd1498Szrj { 578*38fd1498Szrj _Bin_record& __bin = _M_bin[__n]; 579*38fd1498Szrj __v = ::operator new(sizeof(_Block_record*) * __max_threads); 580*38fd1498Szrj std::memset(__v, 0, sizeof(_Block_record*) * __max_threads); 581*38fd1498Szrj __bin._M_first = static_cast<_Block_record**>(__v); 582*38fd1498Szrj 583*38fd1498Szrj __bin._M_address = 0; 584*38fd1498Szrj 585*38fd1498Szrj __v = ::operator new(sizeof(size_t) * __max_threads); 586*38fd1498Szrj std::memset(__v, 0, sizeof(size_t) * __max_threads); 587*38fd1498Szrj 588*38fd1498Szrj __bin._M_free = static_cast<size_t*>(__v); 589*38fd1498Szrj 590*38fd1498Szrj __v = ::operator new(sizeof(size_t) * __max_threads 591*38fd1498Szrj + sizeof(_Atomic_word) * __max_threads); 592*38fd1498Szrj std::memset(__v, 0, (sizeof(size_t) * __max_threads 593*38fd1498Szrj + sizeof(_Atomic_word) * __max_threads)); 594*38fd1498Szrj __bin._M_used = static_cast<size_t*>(__v); 595*38fd1498Szrj 596*38fd1498Szrj __v = ::operator new(sizeof(__gthread_mutex_t)); 597*38fd1498Szrj __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v); 598*38fd1498Szrj 599*38fd1498Szrj #ifdef __GTHREAD_MUTEX_INIT 600*38fd1498Szrj { 601*38fd1498Szrj // Do not copy a POSIX/gthr mutex once in use. 602*38fd1498Szrj __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT; 603*38fd1498Szrj *__bin._M_mutex = __tmp; 604*38fd1498Szrj } 605*38fd1498Szrj #else 606*38fd1498Szrj { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); } 607*38fd1498Szrj #endif 608*38fd1498Szrj } 609*38fd1498Szrj } 610*38fd1498Szrj else 611*38fd1498Szrj { 612*38fd1498Szrj for (size_t __n = 0; __n < _M_bin_size; ++__n) 613*38fd1498Szrj { 614*38fd1498Szrj _Bin_record& __bin = _M_bin[__n]; 615*38fd1498Szrj __v = ::operator new(sizeof(_Block_record*)); 616*38fd1498Szrj __bin._M_first = static_cast<_Block_record**>(__v); 617*38fd1498Szrj __bin._M_first[0] = 0; 618*38fd1498Szrj __bin._M_address = 0; 619*38fd1498Szrj } 620*38fd1498Szrj } 621*38fd1498Szrj _M_init = true; 622*38fd1498Szrj } 623*38fd1498Szrj 624*38fd1498Szrj size_t _M_get_thread_id()625*38fd1498Szrj __pool<true>::_M_get_thread_id() 626*38fd1498Szrj { 627*38fd1498Szrj // If we have thread support and it's active we check the thread 628*38fd1498Szrj // key value and return its id or if it's not set we take the 629*38fd1498Szrj // first record from _M_thread_freelist and sets the key and 630*38fd1498Szrj // returns its id. 631*38fd1498Szrj if (__gthread_active_p()) 632*38fd1498Szrj { 633*38fd1498Szrj __freelist& freelist = get_freelist(); 634*38fd1498Szrj void* v = __gthread_getspecific(freelist._M_key); 635*38fd1498Szrj uintptr_t _M_id = (uintptr_t)v; 636*38fd1498Szrj if (_M_id == 0) 637*38fd1498Szrj { 638*38fd1498Szrj { 639*38fd1498Szrj __gnu_cxx::__scoped_lock sentry(get_freelist_mutex()); 640*38fd1498Szrj if (freelist._M_thread_freelist) 641*38fd1498Szrj { 642*38fd1498Szrj _M_id = freelist._M_thread_freelist->_M_id; 643*38fd1498Szrj freelist._M_thread_freelist 644*38fd1498Szrj = freelist._M_thread_freelist->_M_next; 645*38fd1498Szrj } 646*38fd1498Szrj } 647*38fd1498Szrj 648*38fd1498Szrj __gthread_setspecific(freelist._M_key, (void*)_M_id); 649*38fd1498Szrj } 650*38fd1498Szrj return _M_id >= _M_options._M_max_threads ? 0 : _M_id; 651*38fd1498Szrj } 652*38fd1498Szrj 653*38fd1498Szrj // Otherwise (no thread support or inactive) all requests are 654*38fd1498Szrj // served from the global pool 0. 655*38fd1498Szrj return 0; 656*38fd1498Szrj } 657*38fd1498Szrj 658*38fd1498Szrj // XXX GLIBCXX_ABI Deprecated 659*38fd1498Szrj void _M_destroy_thread_key(void *)660*38fd1498Szrj __pool<true>::_M_destroy_thread_key(void*) throw () { } 661*38fd1498Szrj 662*38fd1498Szrj // XXX GLIBCXX_ABI Deprecated 663*38fd1498Szrj void _M_initialize(__destroy_handler)664*38fd1498Szrj __pool<true>::_M_initialize(__destroy_handler) 665*38fd1498Szrj { 666*38fd1498Szrj // _M_force_new must not change after the first allocate(), 667*38fd1498Szrj // which in turn calls this method, so if it's false, it's false 668*38fd1498Szrj // forever and we don't need to return here ever again. 669*38fd1498Szrj if (_M_options._M_force_new) 670*38fd1498Szrj { 671*38fd1498Szrj _M_init = true; 672*38fd1498Szrj return; 673*38fd1498Szrj } 674*38fd1498Szrj 675*38fd1498Szrj // Create the bins. 676*38fd1498Szrj // Calculate the number of bins required based on _M_max_bytes. 677*38fd1498Szrj // _M_bin_size is statically-initialized to one. 678*38fd1498Szrj size_t __bin_size = _M_options._M_min_bin; 679*38fd1498Szrj while (_M_options._M_max_bytes > __bin_size) 680*38fd1498Szrj { 681*38fd1498Szrj __bin_size <<= 1; 682*38fd1498Szrj ++_M_bin_size; 683*38fd1498Szrj } 684*38fd1498Szrj 685*38fd1498Szrj // Setup the bin map for quick lookup of the relevant bin. 686*38fd1498Szrj const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type); 687*38fd1498Szrj _M_binmap = static_cast<_Binmap_type*>(::operator new(__j)); 688*38fd1498Szrj _Binmap_type* __bp = _M_binmap; 689*38fd1498Szrj _Binmap_type __bin_max = _M_options._M_min_bin; 690*38fd1498Szrj _Binmap_type __bint = 0; 691*38fd1498Szrj for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct) 692*38fd1498Szrj { 693*38fd1498Szrj if (__ct > __bin_max) 694*38fd1498Szrj { 695*38fd1498Szrj __bin_max <<= 1; 696*38fd1498Szrj ++__bint; 697*38fd1498Szrj } 698*38fd1498Szrj *__bp++ = __bint; 699*38fd1498Szrj } 700*38fd1498Szrj 701*38fd1498Szrj // Initialize _M_bin and its members. 702*38fd1498Szrj void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size); 703*38fd1498Szrj _M_bin = static_cast<_Bin_record*>(__v); 704*38fd1498Szrj 705*38fd1498Szrj // If __gthread_active_p() create and initialize the list of 706*38fd1498Szrj // free thread ids. Single threaded applications use thread id 0 707*38fd1498Szrj // directly and have no need for this. 708*38fd1498Szrj if (__gthread_active_p()) 709*38fd1498Szrj { 710*38fd1498Szrj __freelist& freelist = get_freelist(); 711*38fd1498Szrj { 712*38fd1498Szrj __gnu_cxx::__scoped_lock sentry(get_freelist_mutex()); 713*38fd1498Szrj 714*38fd1498Szrj if (!freelist._M_thread_freelist_array 715*38fd1498Szrj || freelist._M_max_threads < _M_options._M_max_threads) 716*38fd1498Szrj { 717*38fd1498Szrj const size_t __k = sizeof(_Thread_record) 718*38fd1498Szrj * _M_options._M_max_threads; 719*38fd1498Szrj __v = ::operator new(__k); 720*38fd1498Szrj _M_thread_freelist = static_cast<_Thread_record*>(__v); 721*38fd1498Szrj 722*38fd1498Szrj // NOTE! The first assignable thread id is 1 since the 723*38fd1498Szrj // global pool uses id 0 724*38fd1498Szrj size_t __i; 725*38fd1498Szrj for (__i = 1; __i < _M_options._M_max_threads; ++__i) 726*38fd1498Szrj { 727*38fd1498Szrj _Thread_record& __tr = _M_thread_freelist[__i - 1]; 728*38fd1498Szrj __tr._M_next = &_M_thread_freelist[__i]; 729*38fd1498Szrj __tr._M_id = __i; 730*38fd1498Szrj } 731*38fd1498Szrj 732*38fd1498Szrj // Set last record. 733*38fd1498Szrj _M_thread_freelist[__i - 1]._M_next = 0; 734*38fd1498Szrj _M_thread_freelist[__i - 1]._M_id = __i; 735*38fd1498Szrj 736*38fd1498Szrj if (!freelist._M_thread_freelist_array) 737*38fd1498Szrj { 738*38fd1498Szrj // Initialize per thread key to hold pointer to 739*38fd1498Szrj // _M_thread_freelist. 740*38fd1498Szrj __gthread_key_create(&freelist._M_key, 741*38fd1498Szrj ::_M_destroy_thread_key); 742*38fd1498Szrj freelist._M_thread_freelist = _M_thread_freelist; 743*38fd1498Szrj } 744*38fd1498Szrj else 745*38fd1498Szrj { 746*38fd1498Szrj _Thread_record* _M_old_freelist 747*38fd1498Szrj = freelist._M_thread_freelist; 748*38fd1498Szrj _Thread_record* _M_old_array 749*38fd1498Szrj = freelist._M_thread_freelist_array; 750*38fd1498Szrj freelist._M_thread_freelist 751*38fd1498Szrj = &_M_thread_freelist[_M_old_freelist - _M_old_array]; 752*38fd1498Szrj while (_M_old_freelist) 753*38fd1498Szrj { 754*38fd1498Szrj size_t next_id; 755*38fd1498Szrj if (_M_old_freelist->_M_next) 756*38fd1498Szrj next_id = _M_old_freelist->_M_next - _M_old_array; 757*38fd1498Szrj else 758*38fd1498Szrj next_id = freelist._M_max_threads; 759*38fd1498Szrj _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next 760*38fd1498Szrj = &_M_thread_freelist[next_id]; 761*38fd1498Szrj _M_old_freelist = _M_old_freelist->_M_next; 762*38fd1498Szrj } 763*38fd1498Szrj ::operator delete(static_cast<void*>(_M_old_array)); 764*38fd1498Szrj } 765*38fd1498Szrj freelist._M_thread_freelist_array = _M_thread_freelist; 766*38fd1498Szrj freelist._M_max_threads = _M_options._M_max_threads; 767*38fd1498Szrj } 768*38fd1498Szrj } 769*38fd1498Szrj 770*38fd1498Szrj const size_t __max_threads = _M_options._M_max_threads + 1; 771*38fd1498Szrj for (size_t __n = 0; __n < _M_bin_size; ++__n) 772*38fd1498Szrj { 773*38fd1498Szrj _Bin_record& __bin = _M_bin[__n]; 774*38fd1498Szrj __v = ::operator new(sizeof(_Block_record*) * __max_threads); 775*38fd1498Szrj std::memset(__v, 0, sizeof(_Block_record*) * __max_threads); 776*38fd1498Szrj __bin._M_first = static_cast<_Block_record**>(__v); 777*38fd1498Szrj 778*38fd1498Szrj __bin._M_address = 0; 779*38fd1498Szrj 780*38fd1498Szrj __v = ::operator new(sizeof(size_t) * __max_threads); 781*38fd1498Szrj std::memset(__v, 0, sizeof(size_t) * __max_threads); 782*38fd1498Szrj __bin._M_free = static_cast<size_t*>(__v); 783*38fd1498Szrj 784*38fd1498Szrj __v = ::operator new(sizeof(size_t) * __max_threads + 785*38fd1498Szrj sizeof(_Atomic_word) * __max_threads); 786*38fd1498Szrj std::memset(__v, 0, (sizeof(size_t) * __max_threads 787*38fd1498Szrj + sizeof(_Atomic_word) * __max_threads)); 788*38fd1498Szrj __bin._M_used = static_cast<size_t*>(__v); 789*38fd1498Szrj 790*38fd1498Szrj __v = ::operator new(sizeof(__gthread_mutex_t)); 791*38fd1498Szrj __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v); 792*38fd1498Szrj 793*38fd1498Szrj #ifdef __GTHREAD_MUTEX_INIT 794*38fd1498Szrj { 795*38fd1498Szrj // Do not copy a POSIX/gthr mutex once in use. 796*38fd1498Szrj __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT; 797*38fd1498Szrj *__bin._M_mutex = __tmp; 798*38fd1498Szrj } 799*38fd1498Szrj #else 800*38fd1498Szrj { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); } 801*38fd1498Szrj #endif 802*38fd1498Szrj } 803*38fd1498Szrj } 804*38fd1498Szrj else 805*38fd1498Szrj { 806*38fd1498Szrj for (size_t __n = 0; __n < _M_bin_size; ++__n) 807*38fd1498Szrj { 808*38fd1498Szrj _Bin_record& __bin = _M_bin[__n]; 809*38fd1498Szrj __v = ::operator new(sizeof(_Block_record*)); 810*38fd1498Szrj __bin._M_first = static_cast<_Block_record**>(__v); 811*38fd1498Szrj __bin._M_first[0] = 0; 812*38fd1498Szrj __bin._M_address = 0; 813*38fd1498Szrj } 814*38fd1498Szrj } 815*38fd1498Szrj _M_init = true; 816*38fd1498Szrj } 817*38fd1498Szrj #endif 818*38fd1498Szrj 819*38fd1498Szrj // Instantiations. 820*38fd1498Szrj template class __mt_alloc<char>; 821*38fd1498Szrj template class __mt_alloc<wchar_t>; 822*38fd1498Szrj 823*38fd1498Szrj _GLIBCXX_END_NAMESPACE_VERSION 824*38fd1498Szrj } // namespace 825