1*404b540aSrobert // Bitmap Allocator. -*- C++ -*- 2*404b540aSrobert 3*404b540aSrobert // Copyright (C) 2004, 2005, 2006 Free Software Foundation, Inc. 4*404b540aSrobert // 5*404b540aSrobert // This file is part of the GNU ISO C++ Library. This library is free 6*404b540aSrobert // software; you can redistribute it and/or modify it under the 7*404b540aSrobert // terms of the GNU General Public License as published by the 8*404b540aSrobert // Free Software Foundation; either version 2, or (at your option) 9*404b540aSrobert // any later version. 10*404b540aSrobert 11*404b540aSrobert // This library is distributed in the hope that it will be useful, 12*404b540aSrobert // but WITHOUT ANY WARRANTY; without even the implied warranty of 13*404b540aSrobert // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14*404b540aSrobert // GNU General Public License for more details. 15*404b540aSrobert 16*404b540aSrobert // You should have received a copy of the GNU General Public License along 17*404b540aSrobert // with this library; see the file COPYING. If not, write to the Free 18*404b540aSrobert // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, 19*404b540aSrobert // USA. 20*404b540aSrobert 21*404b540aSrobert // As a special exception, you may use this file as part of a free software 22*404b540aSrobert // library without restriction. Specifically, if other files instantiate 23*404b540aSrobert // templates or use macros or inline functions from this file, or you compile 24*404b540aSrobert // this file and link it with other files to produce an executable, this 25*404b540aSrobert // file does not by itself cause the resulting executable to be covered by 26*404b540aSrobert // the GNU General Public License. This exception does not however 27*404b540aSrobert // invalidate any other reasons why the executable file might be covered by 28*404b540aSrobert // the GNU General Public License. 29*404b540aSrobert 30*404b540aSrobert /** @file ext/bitmap_allocator.h 31*404b540aSrobert * This file is a GNU extension to the Standard C++ Library. 32*404b540aSrobert */ 33*404b540aSrobert 34*404b540aSrobert #ifndef _BITMAP_ALLOCATOR_H 35*404b540aSrobert #define _BITMAP_ALLOCATOR_H 1 36*404b540aSrobert 37*404b540aSrobert #include <cstddef> // For std::size_t, and ptrdiff_t. 38*404b540aSrobert #include <bits/functexcept.h> // For __throw_bad_alloc(). 39*404b540aSrobert #include <utility> // For std::pair. 40*404b540aSrobert #include <functional> // For greater_equal, and less_equal. 41*404b540aSrobert #include <new> // For operator new. 42*404b540aSrobert #include <debug/debug.h> // _GLIBCXX_DEBUG_ASSERT 43*404b540aSrobert #include <ext/concurrence.h> 44*404b540aSrobert 45*404b540aSrobert 46*404b540aSrobert /** @brief The constant in the expression below is the alignment 47*404b540aSrobert * required in bytes. 48*404b540aSrobert */ 49*404b540aSrobert #define _BALLOC_ALIGN_BYTES 8 50*404b540aSrobert 51*404b540aSrobert _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx) 52*404b540aSrobert 53*404b540aSrobert using std::size_t; 54*404b540aSrobert using std::ptrdiff_t; 55*404b540aSrobert 56*404b540aSrobert namespace __detail 57*404b540aSrobert { 58*404b540aSrobert /** @class __mini_vector bitmap_allocator.h bitmap_allocator.h 59*404b540aSrobert * 60*404b540aSrobert * @brief __mini_vector<> is a stripped down version of the 61*404b540aSrobert * full-fledged std::vector<>. 62*404b540aSrobert * 63*404b540aSrobert * It is to be used only for built-in types or PODs. Notable 64*404b540aSrobert * differences are: 65*404b540aSrobert * 66*404b540aSrobert * @detail 67*404b540aSrobert * 1. Not all accessor functions are present. 68*404b540aSrobert * 2. Used ONLY for PODs. 69*404b540aSrobert * 3. No Allocator template argument. Uses ::operator new() to get 70*404b540aSrobert * memory, and ::operator delete() to free it. 71*404b540aSrobert * Caveat: The dtor does NOT free the memory allocated, so this a 72*404b540aSrobert * memory-leaking vector! 73*404b540aSrobert */ 74*404b540aSrobert template<typename _Tp> 75*404b540aSrobert class __mini_vector 76*404b540aSrobert { 77*404b540aSrobert __mini_vector(const __mini_vector&); 78*404b540aSrobert __mini_vector& operator=(const __mini_vector&); 79*404b540aSrobert 80*404b540aSrobert public: 81*404b540aSrobert typedef _Tp value_type; 82*404b540aSrobert typedef _Tp* pointer; 83*404b540aSrobert typedef _Tp& reference; 84*404b540aSrobert typedef const _Tp& const_reference; 85*404b540aSrobert typedef size_t size_type; 86*404b540aSrobert typedef ptrdiff_t difference_type; 87*404b540aSrobert typedef pointer iterator; 88*404b540aSrobert 89*404b540aSrobert private: 90*404b540aSrobert pointer _M_start; 91*404b540aSrobert pointer _M_finish; 92*404b540aSrobert pointer _M_end_of_storage; 93*404b540aSrobert 94*404b540aSrobert size_type _M_space_left()95*404b540aSrobert _M_space_left() const throw() 96*404b540aSrobert { return _M_end_of_storage - _M_finish; } 97*404b540aSrobert 98*404b540aSrobert pointer allocate(size_type __n)99*404b540aSrobert allocate(size_type __n) 100*404b540aSrobert { return static_cast<pointer>(::operator new(__n * sizeof(_Tp))); } 101*404b540aSrobert 102*404b540aSrobert void deallocate(pointer __p,size_type)103*404b540aSrobert deallocate(pointer __p, size_type) 104*404b540aSrobert { ::operator delete(__p); } 105*404b540aSrobert 106*404b540aSrobert public: 107*404b540aSrobert // Members used: size(), push_back(), pop_back(), 108*404b540aSrobert // insert(iterator, const_reference), erase(iterator), 109*404b540aSrobert // begin(), end(), back(), operator[]. 110*404b540aSrobert __mini_vector()111*404b540aSrobert __mini_vector() : _M_start(0), _M_finish(0), 112*404b540aSrobert _M_end_of_storage(0) 113*404b540aSrobert { } 114*404b540aSrobert 115*404b540aSrobert #if 0 ~__mini_vector()116*404b540aSrobert ~__mini_vector() 117*404b540aSrobert { 118*404b540aSrobert if (this->_M_start) 119*404b540aSrobert { 120*404b540aSrobert this->deallocate(this->_M_start, this->_M_end_of_storage 121*404b540aSrobert - this->_M_start); 122*404b540aSrobert } 123*404b540aSrobert } 124*404b540aSrobert #endif 125*404b540aSrobert 126*404b540aSrobert size_type size()127*404b540aSrobert size() const throw() 128*404b540aSrobert { return _M_finish - _M_start; } 129*404b540aSrobert 130*404b540aSrobert iterator begin()131*404b540aSrobert begin() const throw() 132*404b540aSrobert { return this->_M_start; } 133*404b540aSrobert 134*404b540aSrobert iterator end()135*404b540aSrobert end() const throw() 136*404b540aSrobert { return this->_M_finish; } 137*404b540aSrobert 138*404b540aSrobert reference back()139*404b540aSrobert back() const throw() 140*404b540aSrobert { return *(this->end() - 1); } 141*404b540aSrobert 142*404b540aSrobert reference throw()143*404b540aSrobert operator[](const size_type __pos) const throw() 144*404b540aSrobert { return this->_M_start[__pos]; } 145*404b540aSrobert 146*404b540aSrobert void 147*404b540aSrobert insert(iterator __pos, const_reference __x); 148*404b540aSrobert 149*404b540aSrobert void push_back(const_reference __x)150*404b540aSrobert push_back(const_reference __x) 151*404b540aSrobert { 152*404b540aSrobert if (this->_M_space_left()) 153*404b540aSrobert { 154*404b540aSrobert *this->end() = __x; 155*404b540aSrobert ++this->_M_finish; 156*404b540aSrobert } 157*404b540aSrobert else 158*404b540aSrobert this->insert(this->end(), __x); 159*404b540aSrobert } 160*404b540aSrobert 161*404b540aSrobert void pop_back()162*404b540aSrobert pop_back() throw() 163*404b540aSrobert { --this->_M_finish; } 164*404b540aSrobert 165*404b540aSrobert void 166*404b540aSrobert erase(iterator __pos) throw(); 167*404b540aSrobert 168*404b540aSrobert void clear()169*404b540aSrobert clear() throw() 170*404b540aSrobert { this->_M_finish = this->_M_start; } 171*404b540aSrobert }; 172*404b540aSrobert 173*404b540aSrobert // Out of line function definitions. 174*404b540aSrobert template<typename _Tp> 175*404b540aSrobert void __mini_vector<_Tp>:: insert(iterator __pos,const_reference __x)176*404b540aSrobert insert(iterator __pos, const_reference __x) 177*404b540aSrobert { 178*404b540aSrobert if (this->_M_space_left()) 179*404b540aSrobert { 180*404b540aSrobert size_type __to_move = this->_M_finish - __pos; 181*404b540aSrobert iterator __dest = this->end(); 182*404b540aSrobert iterator __src = this->end() - 1; 183*404b540aSrobert 184*404b540aSrobert ++this->_M_finish; 185*404b540aSrobert while (__to_move) 186*404b540aSrobert { 187*404b540aSrobert *__dest = *__src; 188*404b540aSrobert --__dest; --__src; --__to_move; 189*404b540aSrobert } 190*404b540aSrobert *__pos = __x; 191*404b540aSrobert } 192*404b540aSrobert else 193*404b540aSrobert { 194*404b540aSrobert size_type __new_size = this->size() ? this->size() * 2 : 1; 195*404b540aSrobert iterator __new_start = this->allocate(__new_size); 196*404b540aSrobert iterator __first = this->begin(); 197*404b540aSrobert iterator __start = __new_start; 198*404b540aSrobert while (__first != __pos) 199*404b540aSrobert { 200*404b540aSrobert *__start = *__first; 201*404b540aSrobert ++__start; ++__first; 202*404b540aSrobert } 203*404b540aSrobert *__start = __x; 204*404b540aSrobert ++__start; 205*404b540aSrobert while (__first != this->end()) 206*404b540aSrobert { 207*404b540aSrobert *__start = *__first; 208*404b540aSrobert ++__start; ++__first; 209*404b540aSrobert } 210*404b540aSrobert if (this->_M_start) 211*404b540aSrobert this->deallocate(this->_M_start, this->size()); 212*404b540aSrobert 213*404b540aSrobert this->_M_start = __new_start; 214*404b540aSrobert this->_M_finish = __start; 215*404b540aSrobert this->_M_end_of_storage = this->_M_start + __new_size; 216*404b540aSrobert } 217*404b540aSrobert } 218*404b540aSrobert 219*404b540aSrobert template<typename _Tp> 220*404b540aSrobert void __mini_vector<_Tp>:: erase(iterator __pos)221*404b540aSrobert erase(iterator __pos) throw() 222*404b540aSrobert { 223*404b540aSrobert while (__pos + 1 != this->end()) 224*404b540aSrobert { 225*404b540aSrobert *__pos = __pos[1]; 226*404b540aSrobert ++__pos; 227*404b540aSrobert } 228*404b540aSrobert --this->_M_finish; 229*404b540aSrobert } 230*404b540aSrobert 231*404b540aSrobert 232*404b540aSrobert template<typename _Tp> 233*404b540aSrobert struct __mv_iter_traits 234*404b540aSrobert { 235*404b540aSrobert typedef typename _Tp::value_type value_type; 236*404b540aSrobert typedef typename _Tp::difference_type difference_type; 237*404b540aSrobert }; 238*404b540aSrobert 239*404b540aSrobert template<typename _Tp> 240*404b540aSrobert struct __mv_iter_traits<_Tp*> 241*404b540aSrobert { 242*404b540aSrobert typedef _Tp value_type; 243*404b540aSrobert typedef ptrdiff_t difference_type; 244*404b540aSrobert }; 245*404b540aSrobert 246*404b540aSrobert enum 247*404b540aSrobert { 248*404b540aSrobert bits_per_byte = 8, 249*404b540aSrobert bits_per_block = sizeof(size_t) * size_t(bits_per_byte) 250*404b540aSrobert }; 251*404b540aSrobert 252*404b540aSrobert template<typename _ForwardIterator, typename _Tp, typename _Compare> 253*404b540aSrobert _ForwardIterator 254*404b540aSrobert __lower_bound(_ForwardIterator __first, _ForwardIterator __last, 255*404b540aSrobert const _Tp& __val, _Compare __comp) 256*404b540aSrobert { 257*404b540aSrobert typedef typename __mv_iter_traits<_ForwardIterator>::value_type 258*404b540aSrobert _ValueType; 259*404b540aSrobert typedef typename __mv_iter_traits<_ForwardIterator>::difference_type 260*404b540aSrobert _DistanceType; 261*404b540aSrobert 262*404b540aSrobert _DistanceType __len = __last - __first; 263*404b540aSrobert _DistanceType __half; 264*404b540aSrobert _ForwardIterator __middle; 265*404b540aSrobert 266*404b540aSrobert while (__len > 0) 267*404b540aSrobert { 268*404b540aSrobert __half = __len >> 1; 269*404b540aSrobert __middle = __first; 270*404b540aSrobert __middle += __half; 271*404b540aSrobert if (__comp(*__middle, __val)) 272*404b540aSrobert { 273*404b540aSrobert __first = __middle; 274*404b540aSrobert ++__first; 275*404b540aSrobert __len = __len - __half - 1; 276*404b540aSrobert } 277*404b540aSrobert else 278*404b540aSrobert __len = __half; 279*404b540aSrobert } 280*404b540aSrobert return __first; 281*404b540aSrobert } 282*404b540aSrobert 283*404b540aSrobert template<typename _InputIterator, typename _Predicate> 284*404b540aSrobert inline _InputIterator 285*404b540aSrobert __find_if(_InputIterator __first, _InputIterator __last, _Predicate __p) 286*404b540aSrobert { 287*404b540aSrobert while (__first != __last && !__p(*__first)) 288*404b540aSrobert ++__first; 289*404b540aSrobert return __first; 290*404b540aSrobert } 291*404b540aSrobert 292*404b540aSrobert /** @brief The number of Blocks pointed to by the address pair 293*404b540aSrobert * passed to the function. 294*404b540aSrobert */ 295*404b540aSrobert template<typename _AddrPair> 296*404b540aSrobert inline size_t 297*404b540aSrobert __num_blocks(_AddrPair __ap) 298*404b540aSrobert { return (__ap.second - __ap.first) + 1; } 299*404b540aSrobert 300*404b540aSrobert /** @brief The number of Bit-maps pointed to by the address pair 301*404b540aSrobert * passed to the function. 302*404b540aSrobert */ 303*404b540aSrobert template<typename _AddrPair> 304*404b540aSrobert inline size_t 305*404b540aSrobert __num_bitmaps(_AddrPair __ap) 306*404b540aSrobert { return __num_blocks(__ap) / size_t(bits_per_block); } 307*404b540aSrobert 308*404b540aSrobert // _Tp should be a pointer type. 309*404b540aSrobert template<typename _Tp> 310*404b540aSrobert class _Inclusive_between 311*404b540aSrobert : public std::unary_function<typename std::pair<_Tp, _Tp>, bool> 312*404b540aSrobert { 313*404b540aSrobert typedef _Tp pointer; 314*404b540aSrobert pointer _M_ptr_value; 315*404b540aSrobert typedef typename std::pair<_Tp, _Tp> _Block_pair; 316*404b540aSrobert 317*404b540aSrobert public: 318*404b540aSrobert _Inclusive_between(pointer __ptr) : _M_ptr_value(__ptr) 319*404b540aSrobert { } 320*404b540aSrobert 321*404b540aSrobert bool 322*404b540aSrobert operator()(_Block_pair __bp) const throw() 323*404b540aSrobert { 324*404b540aSrobert if (std::less_equal<pointer>()(_M_ptr_value, __bp.second) 325*404b540aSrobert && std::greater_equal<pointer>()(_M_ptr_value, __bp.first)) 326*404b540aSrobert return true; 327*404b540aSrobert else 328*404b540aSrobert return false; 329*404b540aSrobert } 330*404b540aSrobert }; 331*404b540aSrobert 332*404b540aSrobert // Used to pass a Functor to functions by reference. 333*404b540aSrobert template<typename _Functor> 334*404b540aSrobert class _Functor_Ref 335*404b540aSrobert : public std::unary_function<typename _Functor::argument_type, 336*404b540aSrobert typename _Functor::result_type> 337*404b540aSrobert { 338*404b540aSrobert _Functor& _M_fref; 339*404b540aSrobert 340*404b540aSrobert public: 341*404b540aSrobert typedef typename _Functor::argument_type argument_type; 342*404b540aSrobert typedef typename _Functor::result_type result_type; 343*404b540aSrobert 344*404b540aSrobert _Functor_Ref(_Functor& __fref) : _M_fref(__fref) 345*404b540aSrobert { } 346*404b540aSrobert 347*404b540aSrobert result_type 348*404b540aSrobert operator()(argument_type __arg) 349*404b540aSrobert { return _M_fref(__arg); } 350*404b540aSrobert }; 351*404b540aSrobert 352*404b540aSrobert /** @class _Ffit_finder bitmap_allocator.h bitmap_allocator.h 353*404b540aSrobert * 354*404b540aSrobert * @brief The class which acts as a predicate for applying the 355*404b540aSrobert * first-fit memory allocation policy for the bitmap allocator. 356*404b540aSrobert */ 357*404b540aSrobert // _Tp should be a pointer type, and _Alloc is the Allocator for 358*404b540aSrobert // the vector. 359*404b540aSrobert template<typename _Tp> 360*404b540aSrobert class _Ffit_finder 361*404b540aSrobert : public std::unary_function<typename std::pair<_Tp, _Tp>, bool> 362*404b540aSrobert { 363*404b540aSrobert typedef typename std::pair<_Tp, _Tp> _Block_pair; 364*404b540aSrobert typedef typename __detail::__mini_vector<_Block_pair> _BPVector; 365*404b540aSrobert typedef typename _BPVector::difference_type _Counter_type; 366*404b540aSrobert 367*404b540aSrobert size_t* _M_pbitmap; 368*404b540aSrobert _Counter_type _M_data_offset; 369*404b540aSrobert 370*404b540aSrobert public: 371*404b540aSrobert _Ffit_finder() : _M_pbitmap(0), _M_data_offset(0) 372*404b540aSrobert { } 373*404b540aSrobert 374*404b540aSrobert bool 375*404b540aSrobert operator()(_Block_pair __bp) throw() 376*404b540aSrobert { 377*404b540aSrobert // Set the _rover to the last physical location bitmap, 378*404b540aSrobert // which is the bitmap which belongs to the first free 379*404b540aSrobert // block. Thus, the bitmaps are in exact reverse order of 380*404b540aSrobert // the actual memory layout. So, we count down the bimaps, 381*404b540aSrobert // which is the same as moving up the memory. 382*404b540aSrobert 383*404b540aSrobert // If the used count stored at the start of the Bit Map headers 384*404b540aSrobert // is equal to the number of Objects that the current Block can 385*404b540aSrobert // store, then there is definitely no space for another single 386*404b540aSrobert // object, so just return false. 387*404b540aSrobert _Counter_type __diff = 388*404b540aSrobert __gnu_cxx::__detail::__num_bitmaps(__bp); 389*404b540aSrobert 390*404b540aSrobert if (*(reinterpret_cast<size_t*> 391*404b540aSrobert (__bp.first) - (__diff + 1)) 392*404b540aSrobert == __gnu_cxx::__detail::__num_blocks(__bp)) 393*404b540aSrobert return false; 394*404b540aSrobert 395*404b540aSrobert size_t* __rover = reinterpret_cast<size_t*>(__bp.first) - 1; 396*404b540aSrobert 397*404b540aSrobert for (_Counter_type __i = 0; __i < __diff; ++__i) 398*404b540aSrobert { 399*404b540aSrobert _M_data_offset = __i; 400*404b540aSrobert if (*__rover) 401*404b540aSrobert { 402*404b540aSrobert _M_pbitmap = __rover; 403*404b540aSrobert return true; 404*404b540aSrobert } 405*404b540aSrobert --__rover; 406*404b540aSrobert } 407*404b540aSrobert return false; 408*404b540aSrobert } 409*404b540aSrobert 410*404b540aSrobert 411*404b540aSrobert size_t* 412*404b540aSrobert _M_get() const throw() 413*404b540aSrobert { return _M_pbitmap; } 414*404b540aSrobert 415*404b540aSrobert _Counter_type 416*404b540aSrobert _M_offset() const throw() 417*404b540aSrobert { return _M_data_offset * size_t(bits_per_block); } 418*404b540aSrobert }; 419*404b540aSrobert 420*404b540aSrobert 421*404b540aSrobert /** @class _Bitmap_counter bitmap_allocator.h bitmap_allocator.h 422*404b540aSrobert * 423*404b540aSrobert * @brief The bitmap counter which acts as the bitmap 424*404b540aSrobert * manipulator, and manages the bit-manipulation functions and 425*404b540aSrobert * the searching and identification functions on the bit-map. 426*404b540aSrobert */ 427*404b540aSrobert // _Tp should be a pointer type. 428*404b540aSrobert template<typename _Tp> 429*404b540aSrobert class _Bitmap_counter 430*404b540aSrobert { 431*404b540aSrobert typedef typename __detail::__mini_vector<typename std::pair<_Tp, _Tp> > 432*404b540aSrobert _BPVector; 433*404b540aSrobert typedef typename _BPVector::size_type _Index_type; 434*404b540aSrobert typedef _Tp pointer; 435*404b540aSrobert 436*404b540aSrobert _BPVector& _M_vbp; 437*404b540aSrobert size_t* _M_curr_bmap; 438*404b540aSrobert size_t* _M_last_bmap_in_block; 439*404b540aSrobert _Index_type _M_curr_index; 440*404b540aSrobert 441*404b540aSrobert public: 442*404b540aSrobert // Use the 2nd parameter with care. Make sure that such an 443*404b540aSrobert // entry exists in the vector before passing that particular 444*404b540aSrobert // index to this ctor. 445*404b540aSrobert _Bitmap_counter(_BPVector& Rvbp, long __index = -1) : _M_vbp(Rvbp) 446*404b540aSrobert { this->_M_reset(__index); } 447*404b540aSrobert 448*404b540aSrobert void 449*404b540aSrobert _M_reset(long __index = -1) throw() 450*404b540aSrobert { 451*404b540aSrobert if (__index == -1) 452*404b540aSrobert { 453*404b540aSrobert _M_curr_bmap = 0; 454*404b540aSrobert _M_curr_index = static_cast<_Index_type>(-1); 455*404b540aSrobert return; 456*404b540aSrobert } 457*404b540aSrobert 458*404b540aSrobert _M_curr_index = __index; 459*404b540aSrobert _M_curr_bmap = reinterpret_cast<size_t*> 460*404b540aSrobert (_M_vbp[_M_curr_index].first) - 1; 461*404b540aSrobert 462*404b540aSrobert _GLIBCXX_DEBUG_ASSERT(__index <= (long)_M_vbp.size() - 1); 463*404b540aSrobert 464*404b540aSrobert _M_last_bmap_in_block = _M_curr_bmap 465*404b540aSrobert - ((_M_vbp[_M_curr_index].second 466*404b540aSrobert - _M_vbp[_M_curr_index].first + 1) 467*404b540aSrobert / size_t(bits_per_block) - 1); 468*404b540aSrobert } 469*404b540aSrobert 470*404b540aSrobert // Dangerous Function! Use with extreme care. Pass to this 471*404b540aSrobert // function ONLY those values that are known to be correct, 472*404b540aSrobert // otherwise this will mess up big time. 473*404b540aSrobert void 474*404b540aSrobert _M_set_internal_bitmap(size_t* __new_internal_marker) throw() 475*404b540aSrobert { _M_curr_bmap = __new_internal_marker; } 476*404b540aSrobert 477*404b540aSrobert bool 478*404b540aSrobert _M_finished() const throw() 479*404b540aSrobert { return(_M_curr_bmap == 0); } 480*404b540aSrobert 481*404b540aSrobert _Bitmap_counter& 482*404b540aSrobert operator++() throw() 483*404b540aSrobert { 484*404b540aSrobert if (_M_curr_bmap == _M_last_bmap_in_block) 485*404b540aSrobert { 486*404b540aSrobert if (++_M_curr_index == _M_vbp.size()) 487*404b540aSrobert _M_curr_bmap = 0; 488*404b540aSrobert else 489*404b540aSrobert this->_M_reset(_M_curr_index); 490*404b540aSrobert } 491*404b540aSrobert else 492*404b540aSrobert --_M_curr_bmap; 493*404b540aSrobert return *this; 494*404b540aSrobert } 495*404b540aSrobert 496*404b540aSrobert size_t* 497*404b540aSrobert _M_get() const throw() 498*404b540aSrobert { return _M_curr_bmap; } 499*404b540aSrobert 500*404b540aSrobert pointer 501*404b540aSrobert _M_base() const throw() 502*404b540aSrobert { return _M_vbp[_M_curr_index].first; } 503*404b540aSrobert 504*404b540aSrobert _Index_type 505*404b540aSrobert _M_offset() const throw() 506*404b540aSrobert { 507*404b540aSrobert return size_t(bits_per_block) 508*404b540aSrobert * ((reinterpret_cast<size_t*>(this->_M_base()) 509*404b540aSrobert - _M_curr_bmap) - 1); 510*404b540aSrobert } 511*404b540aSrobert 512*404b540aSrobert _Index_type 513*404b540aSrobert _M_where() const throw() 514*404b540aSrobert { return _M_curr_index; } 515*404b540aSrobert }; 516*404b540aSrobert 517*404b540aSrobert /** @brief Mark a memory address as allocated by re-setting the 518*404b540aSrobert * corresponding bit in the bit-map. 519*404b540aSrobert */ 520*404b540aSrobert inline void 521*404b540aSrobert __bit_allocate(size_t* __pbmap, size_t __pos) throw() 522*404b540aSrobert { 523*404b540aSrobert size_t __mask = 1 << __pos; 524*404b540aSrobert __mask = ~__mask; 525*404b540aSrobert *__pbmap &= __mask; 526*404b540aSrobert } 527*404b540aSrobert 528*404b540aSrobert /** @brief Mark a memory address as free by setting the 529*404b540aSrobert * corresponding bit in the bit-map. 530*404b540aSrobert */ 531*404b540aSrobert inline void 532*404b540aSrobert __bit_free(size_t* __pbmap, size_t __pos) throw() 533*404b540aSrobert { 534*404b540aSrobert size_t __mask = 1 << __pos; 535*404b540aSrobert *__pbmap |= __mask; 536*404b540aSrobert } 537*404b540aSrobert } // namespace __detail 538*404b540aSrobert 539*404b540aSrobert /** @brief Generic Version of the bsf instruction. 540*404b540aSrobert */ 541*404b540aSrobert inline size_t 542*404b540aSrobert _Bit_scan_forward(size_t __num) 543*404b540aSrobert { return static_cast<size_t>(__builtin_ctzl(__num)); } 544*404b540aSrobert 545*404b540aSrobert /** @class free_list bitmap_allocator.h bitmap_allocator.h 546*404b540aSrobert * 547*404b540aSrobert * @brief The free list class for managing chunks of memory to be 548*404b540aSrobert * given to and returned by the bitmap_allocator. 549*404b540aSrobert */ 550*404b540aSrobert class free_list 551*404b540aSrobert { 552*404b540aSrobert typedef size_t* value_type; 553*404b540aSrobert typedef __detail::__mini_vector<value_type> vector_type; 554*404b540aSrobert typedef vector_type::iterator iterator; 555*404b540aSrobert typedef __mutex __mutex_type; 556*404b540aSrobert 557*404b540aSrobert struct _LT_pointer_compare 558*404b540aSrobert { 559*404b540aSrobert bool 560*404b540aSrobert operator()(const size_t* __pui, 561*404b540aSrobert const size_t __cui) const throw() 562*404b540aSrobert { return *__pui < __cui; } 563*404b540aSrobert }; 564*404b540aSrobert 565*404b540aSrobert #if defined __GTHREADS 566*404b540aSrobert __mutex_type& 567*404b540aSrobert _M_get_mutex() 568*404b540aSrobert { 569*404b540aSrobert static __mutex_type _S_mutex; 570*404b540aSrobert return _S_mutex; 571*404b540aSrobert } 572*404b540aSrobert #endif 573*404b540aSrobert 574*404b540aSrobert vector_type& 575*404b540aSrobert _M_get_free_list() 576*404b540aSrobert { 577*404b540aSrobert static vector_type _S_free_list; 578*404b540aSrobert return _S_free_list; 579*404b540aSrobert } 580*404b540aSrobert 581*404b540aSrobert /** @brief Performs validation of memory based on their size. 582*404b540aSrobert * 583*404b540aSrobert * @param __addr The pointer to the memory block to be 584*404b540aSrobert * validated. 585*404b540aSrobert * 586*404b540aSrobert * @detail Validates the memory block passed to this function and 587*404b540aSrobert * appropriately performs the action of managing the free list of 588*404b540aSrobert * blocks by adding this block to the free list or deleting this 589*404b540aSrobert * or larger blocks from the free list. 590*404b540aSrobert */ 591*404b540aSrobert void 592*404b540aSrobert _M_validate(size_t* __addr) throw() 593*404b540aSrobert { 594*404b540aSrobert vector_type& __free_list = _M_get_free_list(); 595*404b540aSrobert const vector_type::size_type __max_size = 64; 596*404b540aSrobert if (__free_list.size() >= __max_size) 597*404b540aSrobert { 598*404b540aSrobert // Ok, the threshold value has been reached. We determine 599*404b540aSrobert // which block to remove from the list of free blocks. 600*404b540aSrobert if (*__addr >= *__free_list.back()) 601*404b540aSrobert { 602*404b540aSrobert // Ok, the new block is greater than or equal to the 603*404b540aSrobert // last block in the list of free blocks. We just free 604*404b540aSrobert // the new block. 605*404b540aSrobert ::operator delete(static_cast<void*>(__addr)); 606*404b540aSrobert return; 607*404b540aSrobert } 608*404b540aSrobert else 609*404b540aSrobert { 610*404b540aSrobert // Deallocate the last block in the list of free lists, 611*404b540aSrobert // and insert the new one in it's correct position. 612*404b540aSrobert ::operator delete(static_cast<void*>(__free_list.back())); 613*404b540aSrobert __free_list.pop_back(); 614*404b540aSrobert } 615*404b540aSrobert } 616*404b540aSrobert 617*404b540aSrobert // Just add the block to the list of free lists unconditionally. 618*404b540aSrobert iterator __temp = __gnu_cxx::__detail::__lower_bound 619*404b540aSrobert (__free_list.begin(), __free_list.end(), 620*404b540aSrobert *__addr, _LT_pointer_compare()); 621*404b540aSrobert 622*404b540aSrobert // We may insert the new free list before _temp; 623*404b540aSrobert __free_list.insert(__temp, __addr); 624*404b540aSrobert } 625*404b540aSrobert 626*404b540aSrobert /** @brief Decides whether the wastage of memory is acceptable for 627*404b540aSrobert * the current memory request and returns accordingly. 628*404b540aSrobert * 629*404b540aSrobert * @param __block_size The size of the block available in the free 630*404b540aSrobert * list. 631*404b540aSrobert * 632*404b540aSrobert * @param __required_size The required size of the memory block. 633*404b540aSrobert * 634*404b540aSrobert * @return true if the wastage incurred is acceptable, else returns 635*404b540aSrobert * false. 636*404b540aSrobert */ 637*404b540aSrobert bool 638*404b540aSrobert _M_should_i_give(size_t __block_size, 639*404b540aSrobert size_t __required_size) throw() 640*404b540aSrobert { 641*404b540aSrobert const size_t __max_wastage_percentage = 36; 642*404b540aSrobert if (__block_size >= __required_size && 643*404b540aSrobert (((__block_size - __required_size) * 100 / __block_size) 644*404b540aSrobert < __max_wastage_percentage)) 645*404b540aSrobert return true; 646*404b540aSrobert else 647*404b540aSrobert return false; 648*404b540aSrobert } 649*404b540aSrobert 650*404b540aSrobert public: 651*404b540aSrobert /** @brief This function returns the block of memory to the 652*404b540aSrobert * internal free list. 653*404b540aSrobert * 654*404b540aSrobert * @param __addr The pointer to the memory block that was given 655*404b540aSrobert * by a call to the _M_get function. 656*404b540aSrobert */ 657*404b540aSrobert inline void 658*404b540aSrobert _M_insert(size_t* __addr) throw() 659*404b540aSrobert { 660*404b540aSrobert #if defined __GTHREADS 661*404b540aSrobert __gnu_cxx::__scoped_lock __bfl_lock(_M_get_mutex()); 662*404b540aSrobert #endif 663*404b540aSrobert // Call _M_validate to decide what should be done with 664*404b540aSrobert // this particular free list. 665*404b540aSrobert this->_M_validate(reinterpret_cast<size_t*>(__addr) - 1); 666*404b540aSrobert // See discussion as to why this is 1! 667*404b540aSrobert } 668*404b540aSrobert 669*404b540aSrobert /** @brief This function gets a block of memory of the specified 670*404b540aSrobert * size from the free list. 671*404b540aSrobert * 672*404b540aSrobert * @param __sz The size in bytes of the memory required. 673*404b540aSrobert * 674*404b540aSrobert * @return A pointer to the new memory block of size at least 675*404b540aSrobert * equal to that requested. 676*404b540aSrobert */ 677*404b540aSrobert size_t* 678*404b540aSrobert _M_get(size_t __sz) throw(std::bad_alloc); 679*404b540aSrobert 680*404b540aSrobert /** @brief This function just clears the internal Free List, and 681*404b540aSrobert * gives back all the memory to the OS. 682*404b540aSrobert */ 683*404b540aSrobert void 684*404b540aSrobert _M_clear(); 685*404b540aSrobert }; 686*404b540aSrobert 687*404b540aSrobert 688*404b540aSrobert // Forward declare the class. 689*404b540aSrobert template<typename _Tp> 690*404b540aSrobert class bitmap_allocator; 691*404b540aSrobert 692*404b540aSrobert // Specialize for void: 693*404b540aSrobert template<> 694*404b540aSrobert class bitmap_allocator<void> 695*404b540aSrobert { 696*404b540aSrobert public: 697*404b540aSrobert typedef void* pointer; 698*404b540aSrobert typedef const void* const_pointer; 699*404b540aSrobert 700*404b540aSrobert // Reference-to-void members are impossible. 701*404b540aSrobert typedef void value_type; 702*404b540aSrobert template<typename _Tp1> 703*404b540aSrobert struct rebind 704*404b540aSrobert { 705*404b540aSrobert typedef bitmap_allocator<_Tp1> other; 706*404b540aSrobert }; 707*404b540aSrobert }; 708*404b540aSrobert 709*404b540aSrobert template<typename _Tp> 710*404b540aSrobert class bitmap_allocator : private free_list 711*404b540aSrobert { 712*404b540aSrobert public: 713*404b540aSrobert typedef size_t size_type; 714*404b540aSrobert typedef ptrdiff_t difference_type; 715*404b540aSrobert typedef _Tp* pointer; 716*404b540aSrobert typedef const _Tp* const_pointer; 717*404b540aSrobert typedef _Tp& reference; 718*404b540aSrobert typedef const _Tp& const_reference; 719*404b540aSrobert typedef _Tp value_type; 720*404b540aSrobert typedef free_list::__mutex_type __mutex_type; 721*404b540aSrobert 722*404b540aSrobert template<typename _Tp1> 723*404b540aSrobert struct rebind 724*404b540aSrobert { 725*404b540aSrobert typedef bitmap_allocator<_Tp1> other; 726*404b540aSrobert }; 727*404b540aSrobert 728*404b540aSrobert private: 729*404b540aSrobert template<size_t _BSize, size_t _AlignSize> 730*404b540aSrobert struct aligned_size 731*404b540aSrobert { 732*404b540aSrobert enum 733*404b540aSrobert { 734*404b540aSrobert modulus = _BSize % _AlignSize, 735*404b540aSrobert value = _BSize + (modulus ? _AlignSize - (modulus) : 0) 736*404b540aSrobert }; 737*404b540aSrobert }; 738*404b540aSrobert 739*404b540aSrobert struct _Alloc_block 740*404b540aSrobert { 741*404b540aSrobert char __M_unused[aligned_size<sizeof(value_type), 742*404b540aSrobert _BALLOC_ALIGN_BYTES>::value]; 743*404b540aSrobert }; 744*404b540aSrobert 745*404b540aSrobert 746*404b540aSrobert typedef typename std::pair<_Alloc_block*, _Alloc_block*> _Block_pair; 747*404b540aSrobert 748*404b540aSrobert typedef typename 749*404b540aSrobert __detail::__mini_vector<_Block_pair> _BPVector; 750*404b540aSrobert 751*404b540aSrobert #if defined _GLIBCXX_DEBUG 752*404b540aSrobert // Complexity: O(lg(N)). Where, N is the number of block of size 753*404b540aSrobert // sizeof(value_type). 754*404b540aSrobert void 755*404b540aSrobert _S_check_for_free_blocks() throw() 756*404b540aSrobert { 757*404b540aSrobert typedef typename 758*404b540aSrobert __gnu_cxx::__detail::_Ffit_finder<_Alloc_block*> _FFF; 759*404b540aSrobert _FFF __fff; 760*404b540aSrobert typedef typename _BPVector::iterator _BPiter; 761*404b540aSrobert _BPiter __bpi = 762*404b540aSrobert __gnu_cxx::__detail::__find_if 763*404b540aSrobert (_S_mem_blocks.begin(), _S_mem_blocks.end(), 764*404b540aSrobert __gnu_cxx::__detail::_Functor_Ref<_FFF>(__fff)); 765*404b540aSrobert 766*404b540aSrobert _GLIBCXX_DEBUG_ASSERT(__bpi == _S_mem_blocks.end()); 767*404b540aSrobert } 768*404b540aSrobert #endif 769*404b540aSrobert 770*404b540aSrobert /** @brief Responsible for exponentially growing the internal 771*404b540aSrobert * memory pool. 772*404b540aSrobert * 773*404b540aSrobert * @throw std::bad_alloc. If memory can not be allocated. 774*404b540aSrobert * 775*404b540aSrobert * @detail Complexity: O(1), but internally depends upon the 776*404b540aSrobert * complexity of the function free_list::_M_get. The part where 777*404b540aSrobert * the bitmap headers are written has complexity: O(X),where X 778*404b540aSrobert * is the number of blocks of size sizeof(value_type) within 779*404b540aSrobert * the newly acquired block. Having a tight bound. 780*404b540aSrobert */ 781*404b540aSrobert void 782*404b540aSrobert _S_refill_pool() throw(std::bad_alloc) 783*404b540aSrobert { 784*404b540aSrobert #if defined _GLIBCXX_DEBUG 785*404b540aSrobert _S_check_for_free_blocks(); 786*404b540aSrobert #endif 787*404b540aSrobert 788*404b540aSrobert const size_t __num_bitmaps = (_S_block_size 789*404b540aSrobert / size_t(__detail::bits_per_block)); 790*404b540aSrobert const size_t __size_to_allocate = sizeof(size_t) 791*404b540aSrobert + _S_block_size * sizeof(_Alloc_block) 792*404b540aSrobert + __num_bitmaps * sizeof(size_t); 793*404b540aSrobert 794*404b540aSrobert size_t* __temp = 795*404b540aSrobert reinterpret_cast<size_t*> 796*404b540aSrobert (this->_M_get(__size_to_allocate)); 797*404b540aSrobert *__temp = 0; 798*404b540aSrobert ++__temp; 799*404b540aSrobert 800*404b540aSrobert // The Header information goes at the Beginning of the Block. 801*404b540aSrobert _Block_pair __bp = 802*404b540aSrobert std::make_pair(reinterpret_cast<_Alloc_block*> 803*404b540aSrobert (__temp + __num_bitmaps), 804*404b540aSrobert reinterpret_cast<_Alloc_block*> 805*404b540aSrobert (__temp + __num_bitmaps) 806*404b540aSrobert + _S_block_size - 1); 807*404b540aSrobert 808*404b540aSrobert // Fill the Vector with this information. 809*404b540aSrobert _S_mem_blocks.push_back(__bp); 810*404b540aSrobert 811*404b540aSrobert size_t __bit_mask = 0; // 0 Indicates all Allocated. 812*404b540aSrobert __bit_mask = ~__bit_mask; // 1 Indicates all Free. 813*404b540aSrobert 814*404b540aSrobert for (size_t __i = 0; __i < __num_bitmaps; ++__i) 815*404b540aSrobert __temp[__i] = __bit_mask; 816*404b540aSrobert 817*404b540aSrobert _S_block_size *= 2; 818*404b540aSrobert } 819*404b540aSrobert 820*404b540aSrobert 821*404b540aSrobert static _BPVector _S_mem_blocks; 822*404b540aSrobert static size_t _S_block_size; 823*404b540aSrobert static __gnu_cxx::__detail:: 824*404b540aSrobert _Bitmap_counter<_Alloc_block*> _S_last_request; 825*404b540aSrobert static typename _BPVector::size_type _S_last_dealloc_index; 826*404b540aSrobert #if defined __GTHREADS 827*404b540aSrobert static __mutex_type _S_mut; 828*404b540aSrobert #endif 829*404b540aSrobert 830*404b540aSrobert public: 831*404b540aSrobert 832*404b540aSrobert /** @brief Allocates memory for a single object of size 833*404b540aSrobert * sizeof(_Tp). 834*404b540aSrobert * 835*404b540aSrobert * @throw std::bad_alloc. If memory can not be allocated. 836*404b540aSrobert * 837*404b540aSrobert * @detail Complexity: Worst case complexity is O(N), but that 838*404b540aSrobert * is hardly ever hit. If and when this particular case is 839*404b540aSrobert * encountered, the next few cases are guaranteed to have a 840*404b540aSrobert * worst case complexity of O(1)! That's why this function 841*404b540aSrobert * performs very well on average. You can consider this 842*404b540aSrobert * function to have a complexity referred to commonly as: 843*404b540aSrobert * Amortized Constant time. 844*404b540aSrobert */ 845*404b540aSrobert pointer 846*404b540aSrobert _M_allocate_single_object() throw(std::bad_alloc) 847*404b540aSrobert { 848*404b540aSrobert #if defined __GTHREADS 849*404b540aSrobert __gnu_cxx::__scoped_lock __bit_lock(_S_mut); 850*404b540aSrobert #endif 851*404b540aSrobert 852*404b540aSrobert // The algorithm is something like this: The last_request 853*404b540aSrobert // variable points to the last accessed Bit Map. When such a 854*404b540aSrobert // condition occurs, we try to find a free block in the 855*404b540aSrobert // current bitmap, or succeeding bitmaps until the last bitmap 856*404b540aSrobert // is reached. If no free block turns up, we resort to First 857*404b540aSrobert // Fit method. 858*404b540aSrobert 859*404b540aSrobert // WARNING: Do not re-order the condition in the while 860*404b540aSrobert // statement below, because it relies on C++'s short-circuit 861*404b540aSrobert // evaluation. The return from _S_last_request->_M_get() will 862*404b540aSrobert // NOT be dereference able if _S_last_request->_M_finished() 863*404b540aSrobert // returns true. This would inevitably lead to a NULL pointer 864*404b540aSrobert // dereference if tinkered with. 865*404b540aSrobert while (_S_last_request._M_finished() == false 866*404b540aSrobert && (*(_S_last_request._M_get()) == 0)) 867*404b540aSrobert { 868*404b540aSrobert _S_last_request.operator++(); 869*404b540aSrobert } 870*404b540aSrobert 871*404b540aSrobert if (__builtin_expect(_S_last_request._M_finished() == true, false)) 872*404b540aSrobert { 873*404b540aSrobert // Fall Back to First Fit algorithm. 874*404b540aSrobert typedef typename 875*404b540aSrobert __gnu_cxx::__detail::_Ffit_finder<_Alloc_block*> _FFF; 876*404b540aSrobert _FFF __fff; 877*404b540aSrobert typedef typename _BPVector::iterator _BPiter; 878*404b540aSrobert _BPiter __bpi = 879*404b540aSrobert __gnu_cxx::__detail::__find_if 880*404b540aSrobert (_S_mem_blocks.begin(), _S_mem_blocks.end(), 881*404b540aSrobert __gnu_cxx::__detail::_Functor_Ref<_FFF>(__fff)); 882*404b540aSrobert 883*404b540aSrobert if (__bpi != _S_mem_blocks.end()) 884*404b540aSrobert { 885*404b540aSrobert // Search was successful. Ok, now mark the first bit from 886*404b540aSrobert // the right as 0, meaning Allocated. This bit is obtained 887*404b540aSrobert // by calling _M_get() on __fff. 888*404b540aSrobert size_t __nz_bit = _Bit_scan_forward(*__fff._M_get()); 889*404b540aSrobert __detail::__bit_allocate(__fff._M_get(), __nz_bit); 890*404b540aSrobert 891*404b540aSrobert _S_last_request._M_reset(__bpi - _S_mem_blocks.begin()); 892*404b540aSrobert 893*404b540aSrobert // Now, get the address of the bit we marked as allocated. 894*404b540aSrobert pointer __ret = reinterpret_cast<pointer> 895*404b540aSrobert (__bpi->first + __fff._M_offset() + __nz_bit); 896*404b540aSrobert size_t* __puse_count = 897*404b540aSrobert reinterpret_cast<size_t*> 898*404b540aSrobert (__bpi->first) 899*404b540aSrobert - (__gnu_cxx::__detail::__num_bitmaps(*__bpi) + 1); 900*404b540aSrobert 901*404b540aSrobert ++(*__puse_count); 902*404b540aSrobert return __ret; 903*404b540aSrobert } 904*404b540aSrobert else 905*404b540aSrobert { 906*404b540aSrobert // Search was unsuccessful. We Add more memory to the 907*404b540aSrobert // pool by calling _S_refill_pool(). 908*404b540aSrobert _S_refill_pool(); 909*404b540aSrobert 910*404b540aSrobert // _M_Reset the _S_last_request structure to the first 911*404b540aSrobert // free block's bit map. 912*404b540aSrobert _S_last_request._M_reset(_S_mem_blocks.size() - 1); 913*404b540aSrobert 914*404b540aSrobert // Now, mark that bit as allocated. 915*404b540aSrobert } 916*404b540aSrobert } 917*404b540aSrobert 918*404b540aSrobert // _S_last_request holds a pointer to a valid bit map, that 919*404b540aSrobert // points to a free block in memory. 920*404b540aSrobert size_t __nz_bit = _Bit_scan_forward(*_S_last_request._M_get()); 921*404b540aSrobert __detail::__bit_allocate(_S_last_request._M_get(), __nz_bit); 922*404b540aSrobert 923*404b540aSrobert pointer __ret = reinterpret_cast<pointer> 924*404b540aSrobert (_S_last_request._M_base() + _S_last_request._M_offset() + __nz_bit); 925*404b540aSrobert 926*404b540aSrobert size_t* __puse_count = reinterpret_cast<size_t*> 927*404b540aSrobert (_S_mem_blocks[_S_last_request._M_where()].first) 928*404b540aSrobert - (__gnu_cxx::__detail:: 929*404b540aSrobert __num_bitmaps(_S_mem_blocks[_S_last_request._M_where()]) + 1); 930*404b540aSrobert 931*404b540aSrobert ++(*__puse_count); 932*404b540aSrobert return __ret; 933*404b540aSrobert } 934*404b540aSrobert 935*404b540aSrobert /** @brief Deallocates memory that belongs to a single object of 936*404b540aSrobert * size sizeof(_Tp). 937*404b540aSrobert * 938*404b540aSrobert * @detail Complexity: O(lg(N)), but the worst case is not hit 939*404b540aSrobert * often! This is because containers usually deallocate memory 940*404b540aSrobert * close to each other and this case is handled in O(1) time by 941*404b540aSrobert * the deallocate function. 942*404b540aSrobert */ 943*404b540aSrobert void 944*404b540aSrobert _M_deallocate_single_object(pointer __p) throw() 945*404b540aSrobert { 946*404b540aSrobert #if defined __GTHREADS 947*404b540aSrobert __gnu_cxx::__scoped_lock __bit_lock(_S_mut); 948*404b540aSrobert #endif 949*404b540aSrobert _Alloc_block* __real_p = reinterpret_cast<_Alloc_block*>(__p); 950*404b540aSrobert 951*404b540aSrobert typedef typename _BPVector::iterator _Iterator; 952*404b540aSrobert typedef typename _BPVector::difference_type _Difference_type; 953*404b540aSrobert 954*404b540aSrobert _Difference_type __diff; 955*404b540aSrobert long __displacement; 956*404b540aSrobert 957*404b540aSrobert _GLIBCXX_DEBUG_ASSERT(_S_last_dealloc_index >= 0); 958*404b540aSrobert 959*404b540aSrobert 960*404b540aSrobert if (__gnu_cxx::__detail::_Inclusive_between<_Alloc_block*> 961*404b540aSrobert (__real_p) (_S_mem_blocks[_S_last_dealloc_index])) 962*404b540aSrobert { 963*404b540aSrobert _GLIBCXX_DEBUG_ASSERT(_S_last_dealloc_index 964*404b540aSrobert <= _S_mem_blocks.size() - 1); 965*404b540aSrobert 966*404b540aSrobert // Initial Assumption was correct! 967*404b540aSrobert __diff = _S_last_dealloc_index; 968*404b540aSrobert __displacement = __real_p - _S_mem_blocks[__diff].first; 969*404b540aSrobert } 970*404b540aSrobert else 971*404b540aSrobert { 972*404b540aSrobert _Iterator _iter = __gnu_cxx::__detail:: 973*404b540aSrobert __find_if(_S_mem_blocks.begin(), 974*404b540aSrobert _S_mem_blocks.end(), 975*404b540aSrobert __gnu_cxx::__detail:: 976*404b540aSrobert _Inclusive_between<_Alloc_block*>(__real_p)); 977*404b540aSrobert 978*404b540aSrobert _GLIBCXX_DEBUG_ASSERT(_iter != _S_mem_blocks.end()); 979*404b540aSrobert 980*404b540aSrobert __diff = _iter - _S_mem_blocks.begin(); 981*404b540aSrobert __displacement = __real_p - _S_mem_blocks[__diff].first; 982*404b540aSrobert _S_last_dealloc_index = __diff; 983*404b540aSrobert } 984*404b540aSrobert 985*404b540aSrobert // Get the position of the iterator that has been found. 986*404b540aSrobert const size_t __rotate = (__displacement 987*404b540aSrobert % size_t(__detail::bits_per_block)); 988*404b540aSrobert size_t* __bitmapC = 989*404b540aSrobert reinterpret_cast<size_t*> 990*404b540aSrobert (_S_mem_blocks[__diff].first) - 1; 991*404b540aSrobert __bitmapC -= (__displacement / size_t(__detail::bits_per_block)); 992*404b540aSrobert 993*404b540aSrobert __detail::__bit_free(__bitmapC, __rotate); 994*404b540aSrobert size_t* __puse_count = reinterpret_cast<size_t*> 995*404b540aSrobert (_S_mem_blocks[__diff].first) 996*404b540aSrobert - (__gnu_cxx::__detail::__num_bitmaps(_S_mem_blocks[__diff]) + 1); 997*404b540aSrobert 998*404b540aSrobert _GLIBCXX_DEBUG_ASSERT(*__puse_count != 0); 999*404b540aSrobert 1000*404b540aSrobert --(*__puse_count); 1001*404b540aSrobert 1002*404b540aSrobert if (__builtin_expect(*__puse_count == 0, false)) 1003*404b540aSrobert { 1004*404b540aSrobert _S_block_size /= 2; 1005*404b540aSrobert 1006*404b540aSrobert // We can safely remove this block. 1007*404b540aSrobert // _Block_pair __bp = _S_mem_blocks[__diff]; 1008*404b540aSrobert this->_M_insert(__puse_count); 1009*404b540aSrobert _S_mem_blocks.erase(_S_mem_blocks.begin() + __diff); 1010*404b540aSrobert 1011*404b540aSrobert // Reset the _S_last_request variable to reflect the 1012*404b540aSrobert // erased block. We do this to protect future requests 1013*404b540aSrobert // after the last block has been removed from a particular 1014*404b540aSrobert // memory Chunk, which in turn has been returned to the 1015*404b540aSrobert // free list, and hence had been erased from the vector, 1016*404b540aSrobert // so the size of the vector gets reduced by 1. 1017*404b540aSrobert if ((_Difference_type)_S_last_request._M_where() >= __diff--) 1018*404b540aSrobert _S_last_request._M_reset(__diff); 1019*404b540aSrobert 1020*404b540aSrobert // If the Index into the vector of the region of memory 1021*404b540aSrobert // that might hold the next address that will be passed to 1022*404b540aSrobert // deallocated may have been invalidated due to the above 1023*404b540aSrobert // erase procedure being called on the vector, hence we 1024*404b540aSrobert // try to restore this invariant too. 1025*404b540aSrobert if (_S_last_dealloc_index >= _S_mem_blocks.size()) 1026*404b540aSrobert { 1027*404b540aSrobert _S_last_dealloc_index =(__diff != -1 ? __diff : 0); 1028*404b540aSrobert _GLIBCXX_DEBUG_ASSERT(_S_last_dealloc_index >= 0); 1029*404b540aSrobert } 1030*404b540aSrobert } 1031*404b540aSrobert } 1032*404b540aSrobert 1033*404b540aSrobert public: 1034*404b540aSrobert bitmap_allocator() throw() 1035*404b540aSrobert { } 1036*404b540aSrobert 1037*404b540aSrobert bitmap_allocator(const bitmap_allocator&) 1038*404b540aSrobert { } 1039*404b540aSrobert 1040*404b540aSrobert template<typename _Tp1> 1041*404b540aSrobert bitmap_allocator(const bitmap_allocator<_Tp1>&) throw() 1042*404b540aSrobert { } 1043*404b540aSrobert 1044*404b540aSrobert ~bitmap_allocator() throw() 1045*404b540aSrobert { } 1046*404b540aSrobert 1047*404b540aSrobert pointer 1048*404b540aSrobert allocate(size_type __n) 1049*404b540aSrobert { 1050*404b540aSrobert if (__builtin_expect(__n > this->max_size(), false)) 1051*404b540aSrobert std::__throw_bad_alloc(); 1052*404b540aSrobert 1053*404b540aSrobert if (__builtin_expect(__n == 1, true)) 1054*404b540aSrobert return this->_M_allocate_single_object(); 1055*404b540aSrobert else 1056*404b540aSrobert { 1057*404b540aSrobert const size_type __b = __n * sizeof(value_type); 1058*404b540aSrobert return reinterpret_cast<pointer>(::operator new(__b)); 1059*404b540aSrobert } 1060*404b540aSrobert } 1061*404b540aSrobert 1062*404b540aSrobert pointer 1063*404b540aSrobert allocate(size_type __n, typename bitmap_allocator<void>::const_pointer) 1064*404b540aSrobert { return allocate(__n); } 1065*404b540aSrobert 1066*404b540aSrobert void 1067*404b540aSrobert deallocate(pointer __p, size_type __n) throw() 1068*404b540aSrobert { 1069*404b540aSrobert if (__builtin_expect(__p != 0, true)) 1070*404b540aSrobert { 1071*404b540aSrobert if (__builtin_expect(__n == 1, true)) 1072*404b540aSrobert this->_M_deallocate_single_object(__p); 1073*404b540aSrobert else 1074*404b540aSrobert ::operator delete(__p); 1075*404b540aSrobert } 1076*404b540aSrobert } 1077*404b540aSrobert 1078*404b540aSrobert pointer 1079*404b540aSrobert address(reference __r) const 1080*404b540aSrobert { return &__r; } 1081*404b540aSrobert 1082*404b540aSrobert const_pointer 1083*404b540aSrobert address(const_reference __r) const 1084*404b540aSrobert { return &__r; } 1085*404b540aSrobert 1086*404b540aSrobert size_type 1087*404b540aSrobert max_size() const throw() 1088*404b540aSrobert { return size_type(-1) / sizeof(value_type); } 1089*404b540aSrobert 1090*404b540aSrobert void 1091*404b540aSrobert construct(pointer __p, const_reference __data) 1092*404b540aSrobert { ::new(__p) value_type(__data); } 1093*404b540aSrobert 1094*404b540aSrobert void 1095*404b540aSrobert destroy(pointer __p) 1096*404b540aSrobert { __p->~value_type(); } 1097*404b540aSrobert }; 1098*404b540aSrobert 1099*404b540aSrobert template<typename _Tp1, typename _Tp2> 1100*404b540aSrobert bool 1101*404b540aSrobert operator==(const bitmap_allocator<_Tp1>&, 1102*404b540aSrobert const bitmap_allocator<_Tp2>&) throw() 1103*404b540aSrobert { return true; } 1104*404b540aSrobert 1105*404b540aSrobert template<typename _Tp1, typename _Tp2> 1106*404b540aSrobert bool 1107*404b540aSrobert operator!=(const bitmap_allocator<_Tp1>&, 1108*404b540aSrobert const bitmap_allocator<_Tp2>&) throw() 1109*404b540aSrobert { return false; } 1110*404b540aSrobert 1111*404b540aSrobert // Static member definitions. 1112*404b540aSrobert template<typename _Tp> 1113*404b540aSrobert typename bitmap_allocator<_Tp>::_BPVector 1114*404b540aSrobert bitmap_allocator<_Tp>::_S_mem_blocks; 1115*404b540aSrobert 1116*404b540aSrobert template<typename _Tp> 1117*404b540aSrobert size_t bitmap_allocator<_Tp>::_S_block_size = 1118*404b540aSrobert 2 * size_t(__detail::bits_per_block); 1119*404b540aSrobert 1120*404b540aSrobert template<typename _Tp> 1121*404b540aSrobert typename __gnu_cxx::bitmap_allocator<_Tp>::_BPVector::size_type 1122*404b540aSrobert bitmap_allocator<_Tp>::_S_last_dealloc_index = 0; 1123*404b540aSrobert 1124*404b540aSrobert template<typename _Tp> 1125*404b540aSrobert __gnu_cxx::__detail::_Bitmap_counter 1126*404b540aSrobert <typename bitmap_allocator<_Tp>::_Alloc_block*> 1127*404b540aSrobert bitmap_allocator<_Tp>::_S_last_request(_S_mem_blocks); 1128*404b540aSrobert 1129*404b540aSrobert #if defined __GTHREADS 1130*404b540aSrobert template<typename _Tp> 1131*404b540aSrobert typename bitmap_allocator<_Tp>::__mutex_type 1132*404b540aSrobert bitmap_allocator<_Tp>::_S_mut; 1133*404b540aSrobert #endif 1134*404b540aSrobert 1135*404b540aSrobert _GLIBCXX_END_NAMESPACE 1136*404b540aSrobert 1137*404b540aSrobert #endif 1138*404b540aSrobert 1139