xref: /openbsd-src/gnu/gcc/libstdc++-v3/include/ext/mt_allocator.h (revision 404b540a9034ac75a6199ad1a32d1bbc7a0d4210)
1*404b540aSrobert // MT-optimized allocator -*- C++ -*-
2*404b540aSrobert 
3*404b540aSrobert // Copyright (C) 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4*404b540aSrobert //
5*404b540aSrobert // This file is part of the GNU ISO C++ Library.  This library is free
6*404b540aSrobert // software; you can redistribute it and/or modify it under the
7*404b540aSrobert // terms of the GNU General Public License as published by the
8*404b540aSrobert // Free Software Foundation; either version 2, or (at your option)
9*404b540aSrobert // any later version.
10*404b540aSrobert 
11*404b540aSrobert // This library is distributed in the hope that it will be useful,
12*404b540aSrobert // but WITHOUT ANY WARRANTY; without even the implied warranty of
13*404b540aSrobert // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14*404b540aSrobert // GNU General Public License for more details.
15*404b540aSrobert 
16*404b540aSrobert // You should have received a copy of the GNU General Public License along
17*404b540aSrobert // with this library; see the file COPYING.  If not, write to the Free
18*404b540aSrobert // Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
19*404b540aSrobert // USA.
20*404b540aSrobert 
21*404b540aSrobert // As a special exception, you may use this file as part of a free software
22*404b540aSrobert // library without restriction.  Specifically, if other files instantiate
23*404b540aSrobert // templates or use macros or inline functions from this file, or you compile
24*404b540aSrobert // this file and link it with other files to produce an executable, this
25*404b540aSrobert // file does not by itself cause the resulting executable to be covered by
26*404b540aSrobert // the GNU General Public License.  This exception does not however
27*404b540aSrobert // invalidate any other reasons why the executable file might be covered by
28*404b540aSrobert // the GNU General Public License.
29*404b540aSrobert 
30*404b540aSrobert /** @file ext/mt_allocator.h
31*404b540aSrobert  *  This file is a GNU extension to the Standard C++ Library.
32*404b540aSrobert  */
33*404b540aSrobert 
34*404b540aSrobert #ifndef _MT_ALLOCATOR_H
35*404b540aSrobert #define _MT_ALLOCATOR_H 1
36*404b540aSrobert 
37*404b540aSrobert #include <new>
38*404b540aSrobert #include <cstdlib>
39*404b540aSrobert #include <bits/functexcept.h>
40*404b540aSrobert #include <ext/atomicity.h>
41*404b540aSrobert 
42*404b540aSrobert _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
43*404b540aSrobert 
44*404b540aSrobert   using std::size_t;
45*404b540aSrobert   using std::ptrdiff_t;
46*404b540aSrobert 
47*404b540aSrobert   typedef void (*__destroy_handler)(void*);
48*404b540aSrobert 
49*404b540aSrobert   /// @brief  Base class for pool object.
50*404b540aSrobert   struct __pool_base
51*404b540aSrobert   {
52*404b540aSrobert     // Using short int as type for the binmap implies we are never
53*404b540aSrobert     // caching blocks larger than 32768 with this allocator.
54*404b540aSrobert     typedef unsigned short int _Binmap_type;
55*404b540aSrobert 
56*404b540aSrobert     // Variables used to configure the behavior of the allocator,
57*404b540aSrobert     // assigned and explained in detail below.
58*404b540aSrobert     struct _Tune
59*404b540aSrobert      {
60*404b540aSrobert       // Compile time constants for the default _Tune values.
61*404b540aSrobert       enum { _S_align = 8 };
62*404b540aSrobert       enum { _S_max_bytes = 128 };
63*404b540aSrobert       enum { _S_min_bin = 8 };
64*404b540aSrobert       enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
65*404b540aSrobert       enum { _S_max_threads = 4096 };
66*404b540aSrobert       enum { _S_freelist_headroom = 10 };
67*404b540aSrobert 
68*404b540aSrobert       // Alignment needed.
69*404b540aSrobert       // NB: In any case must be >= sizeof(_Block_record), that
70*404b540aSrobert       // is 4 on 32 bit machines and 8 on 64 bit machines.
71*404b540aSrobert       size_t	_M_align;
72*404b540aSrobert 
73*404b540aSrobert       // Allocation requests (after round-up to power of 2) below
74*404b540aSrobert       // this value will be handled by the allocator. A raw new/
75*404b540aSrobert       // call will be used for requests larger than this value.
76*404b540aSrobert       // NB: Must be much smaller than _M_chunk_size and in any
77*404b540aSrobert       // case <= 32768.
78*404b540aSrobert       size_t	_M_max_bytes;
79*404b540aSrobert 
80*404b540aSrobert       // Size in bytes of the smallest bin.
81*404b540aSrobert       // NB: Must be a power of 2 and >= _M_align (and of course
82*404b540aSrobert       // much smaller than _M_max_bytes).
83*404b540aSrobert       size_t	_M_min_bin;
84*404b540aSrobert 
85*404b540aSrobert       // In order to avoid fragmenting and minimize the number of
86*404b540aSrobert       // new() calls we always request new memory using this
87*404b540aSrobert       // value. Based on previous discussions on the libstdc++
88*404b540aSrobert       // mailing list we have choosen the value below.
89*404b540aSrobert       // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
90*404b540aSrobert       // NB: At least one order of magnitude > _M_max_bytes.
91*404b540aSrobert       size_t	_M_chunk_size;
92*404b540aSrobert 
93*404b540aSrobert       // The maximum number of supported threads. For
94*404b540aSrobert       // single-threaded operation, use one. Maximum values will
95*404b540aSrobert       // vary depending on details of the underlying system. (For
96*404b540aSrobert       // instance, Linux 2.4.18 reports 4070 in
97*404b540aSrobert       // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
98*404b540aSrobert       // 65534)
99*404b540aSrobert       size_t 	_M_max_threads;
100*404b540aSrobert 
101*404b540aSrobert       // Each time a deallocation occurs in a threaded application
102*404b540aSrobert       // we make sure that there are no more than
103*404b540aSrobert       // _M_freelist_headroom % of used memory on the freelist. If
104*404b540aSrobert       // the number of additional records is more than
105*404b540aSrobert       // _M_freelist_headroom % of the freelist, we move these
106*404b540aSrobert       // records back to the global pool.
107*404b540aSrobert       size_t 	_M_freelist_headroom;
108*404b540aSrobert 
109*404b540aSrobert       // Set to true forces all allocations to use new().
110*404b540aSrobert       bool 	_M_force_new;
111*404b540aSrobert 
112*404b540aSrobert       explicit
_Tune__pool_base::_Tune113*404b540aSrobert       _Tune()
114*404b540aSrobert       : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
115*404b540aSrobert       _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads),
116*404b540aSrobert       _M_freelist_headroom(_S_freelist_headroom),
117*404b540aSrobert       _M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false)
118*404b540aSrobert       { }
119*404b540aSrobert 
120*404b540aSrobert       explicit
_Tune__pool_base::_Tune121*404b540aSrobert       _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
122*404b540aSrobert 	    size_t __maxthreads, size_t __headroom, bool __force)
123*404b540aSrobert       : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
124*404b540aSrobert       _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
125*404b540aSrobert       _M_freelist_headroom(__headroom), _M_force_new(__force)
126*404b540aSrobert       { }
127*404b540aSrobert     };
128*404b540aSrobert 
129*404b540aSrobert     struct _Block_address
130*404b540aSrobert     {
131*404b540aSrobert       void* 			_M_initial;
132*404b540aSrobert       _Block_address* 		_M_next;
133*404b540aSrobert     };
134*404b540aSrobert 
135*404b540aSrobert     const _Tune&
_M_get_options__pool_base136*404b540aSrobert     _M_get_options() const
137*404b540aSrobert     { return _M_options; }
138*404b540aSrobert 
139*404b540aSrobert     void
_M_set_options__pool_base140*404b540aSrobert     _M_set_options(_Tune __t)
141*404b540aSrobert     {
142*404b540aSrobert       if (!_M_init)
143*404b540aSrobert 	_M_options = __t;
144*404b540aSrobert     }
145*404b540aSrobert 
146*404b540aSrobert     bool
_M_check_threshold__pool_base147*404b540aSrobert     _M_check_threshold(size_t __bytes)
148*404b540aSrobert     { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
149*404b540aSrobert 
150*404b540aSrobert     size_t
_M_get_binmap__pool_base151*404b540aSrobert     _M_get_binmap(size_t __bytes)
152*404b540aSrobert     { return _M_binmap[__bytes]; }
153*404b540aSrobert 
154*404b540aSrobert     const size_t
_M_get_align__pool_base155*404b540aSrobert     _M_get_align()
156*404b540aSrobert     { return _M_options._M_align; }
157*404b540aSrobert 
158*404b540aSrobert     explicit
__pool_base__pool_base159*404b540aSrobert     __pool_base()
160*404b540aSrobert     : _M_options(_Tune()), _M_binmap(NULL), _M_init(false) { }
161*404b540aSrobert 
162*404b540aSrobert     explicit
__pool_base__pool_base163*404b540aSrobert     __pool_base(const _Tune& __options)
164*404b540aSrobert     : _M_options(__options), _M_binmap(NULL), _M_init(false) { }
165*404b540aSrobert 
166*404b540aSrobert   private:
167*404b540aSrobert     explicit
168*404b540aSrobert     __pool_base(const __pool_base&);
169*404b540aSrobert 
170*404b540aSrobert     __pool_base&
171*404b540aSrobert     operator=(const __pool_base&);
172*404b540aSrobert 
173*404b540aSrobert   protected:
174*404b540aSrobert     // Configuration options.
175*404b540aSrobert     _Tune 	       		_M_options;
176*404b540aSrobert 
177*404b540aSrobert     _Binmap_type* 		_M_binmap;
178*404b540aSrobert 
179*404b540aSrobert     // Configuration of the pool object via _M_options can happen
180*404b540aSrobert     // after construction but before initialization. After
181*404b540aSrobert     // initialization is complete, this variable is set to true.
182*404b540aSrobert     bool 			_M_init;
183*404b540aSrobert   };
184*404b540aSrobert 
185*404b540aSrobert 
186*404b540aSrobert   /**
187*404b540aSrobert    *  @brief  Data describing the underlying memory pool, parameterized on
188*404b540aSrobert    *  threading support.
189*404b540aSrobert    */
190*404b540aSrobert   template<bool _Thread>
191*404b540aSrobert     class __pool;
192*404b540aSrobert 
193*404b540aSrobert   /// Specialization for single thread.
194*404b540aSrobert   template<>
195*404b540aSrobert     class __pool<false> : public __pool_base
196*404b540aSrobert     {
197*404b540aSrobert     public:
198*404b540aSrobert       union _Block_record
199*404b540aSrobert       {
200*404b540aSrobert 	// Points to the block_record of the next free block.
201*404b540aSrobert 	_Block_record* 			_M_next;
202*404b540aSrobert       };
203*404b540aSrobert 
204*404b540aSrobert       struct _Bin_record
205*404b540aSrobert       {
206*404b540aSrobert 	// An "array" of pointers to the first free block.
207*404b540aSrobert 	_Block_record**			_M_first;
208*404b540aSrobert 
209*404b540aSrobert 	// A list of the initial addresses of all allocated blocks.
210*404b540aSrobert 	_Block_address*		     	_M_address;
211*404b540aSrobert       };
212*404b540aSrobert 
213*404b540aSrobert       void
_M_initialize_once()214*404b540aSrobert       _M_initialize_once()
215*404b540aSrobert       {
216*404b540aSrobert 	if (__builtin_expect(_M_init == false, false))
217*404b540aSrobert 	  _M_initialize();
218*404b540aSrobert       }
219*404b540aSrobert 
220*404b540aSrobert       void
221*404b540aSrobert       _M_destroy() throw();
222*404b540aSrobert 
223*404b540aSrobert       char*
224*404b540aSrobert       _M_reserve_block(size_t __bytes, const size_t __thread_id);
225*404b540aSrobert 
226*404b540aSrobert       void
227*404b540aSrobert       _M_reclaim_block(char* __p, size_t __bytes);
228*404b540aSrobert 
229*404b540aSrobert       size_t
_M_get_thread_id()230*404b540aSrobert       _M_get_thread_id() { return 0; }
231*404b540aSrobert 
232*404b540aSrobert       const _Bin_record&
_M_get_bin(size_t __which)233*404b540aSrobert       _M_get_bin(size_t __which)
234*404b540aSrobert       { return _M_bin[__which]; }
235*404b540aSrobert 
236*404b540aSrobert       void
_M_adjust_freelist(const _Bin_record &,_Block_record *,size_t)237*404b540aSrobert       _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
238*404b540aSrobert       { }
239*404b540aSrobert 
__pool()240*404b540aSrobert       explicit __pool()
241*404b540aSrobert       : _M_bin(NULL), _M_bin_size(1) { }
242*404b540aSrobert 
__pool(const __pool_base::_Tune & __tune)243*404b540aSrobert       explicit __pool(const __pool_base::_Tune& __tune)
244*404b540aSrobert       : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1) { }
245*404b540aSrobert 
246*404b540aSrobert     private:
247*404b540aSrobert       // An "array" of bin_records each of which represents a specific
248*404b540aSrobert       // power of 2 size. Memory to this "array" is allocated in
249*404b540aSrobert       // _M_initialize().
250*404b540aSrobert       _Bin_record*		 _M_bin;
251*404b540aSrobert 
252*404b540aSrobert       // Actual value calculated in _M_initialize().
253*404b540aSrobert       size_t 	       	     	_M_bin_size;
254*404b540aSrobert 
255*404b540aSrobert       void
256*404b540aSrobert       _M_initialize();
257*404b540aSrobert   };
258*404b540aSrobert 
259*404b540aSrobert #ifdef __GTHREADS
260*404b540aSrobert   /// Specialization for thread enabled, via gthreads.h.
261*404b540aSrobert   template<>
262*404b540aSrobert     class __pool<true> : public __pool_base
263*404b540aSrobert     {
264*404b540aSrobert     public:
265*404b540aSrobert       // Each requesting thread is assigned an id ranging from 1 to
266*404b540aSrobert       // _S_max_threads. Thread id 0 is used as a global memory pool.
267*404b540aSrobert       // In order to get constant performance on the thread assignment
268*404b540aSrobert       // routine, we keep a list of free ids. When a thread first
269*404b540aSrobert       // requests memory we remove the first record in this list and
270*404b540aSrobert       // stores the address in a __gthread_key. When initializing the
271*404b540aSrobert       // __gthread_key we specify a destructor. When this destructor
272*404b540aSrobert       // (i.e. the thread dies) is called, we return the thread id to
273*404b540aSrobert       // the front of this list.
274*404b540aSrobert       struct _Thread_record
275*404b540aSrobert       {
276*404b540aSrobert 	// Points to next free thread id record. NULL if last record in list.
277*404b540aSrobert 	_Thread_record*			_M_next;
278*404b540aSrobert 
279*404b540aSrobert 	// Thread id ranging from 1 to _S_max_threads.
280*404b540aSrobert 	size_t                          _M_id;
281*404b540aSrobert       };
282*404b540aSrobert 
283*404b540aSrobert       union _Block_record
284*404b540aSrobert       {
285*404b540aSrobert 	// Points to the block_record of the next free block.
286*404b540aSrobert 	_Block_record*			_M_next;
287*404b540aSrobert 
288*404b540aSrobert 	// The thread id of the thread which has requested this block.
289*404b540aSrobert 	size_t                          _M_thread_id;
290*404b540aSrobert       };
291*404b540aSrobert 
292*404b540aSrobert       struct _Bin_record
293*404b540aSrobert       {
294*404b540aSrobert 	// An "array" of pointers to the first free block for each
295*404b540aSrobert 	// thread id. Memory to this "array" is allocated in
296*404b540aSrobert 	// _S_initialize() for _S_max_threads + global pool 0.
297*404b540aSrobert 	_Block_record**			_M_first;
298*404b540aSrobert 
299*404b540aSrobert 	// A list of the initial addresses of all allocated blocks.
300*404b540aSrobert 	_Block_address*		     	_M_address;
301*404b540aSrobert 
302*404b540aSrobert 	// An "array" of counters used to keep track of the amount of
303*404b540aSrobert 	// blocks that are on the freelist/used for each thread id.
304*404b540aSrobert 	// - Note that the second part of the allocated _M_used "array"
305*404b540aSrobert 	//   actually hosts (atomic) counters of reclaimed blocks:  in
306*404b540aSrobert 	//   _M_reserve_block and in _M_reclaim_block those numbers are
307*404b540aSrobert 	//   subtracted from the first ones to obtain the actual size
308*404b540aSrobert 	//   of the "working set" of the given thread.
309*404b540aSrobert 	// - Memory to these "arrays" is allocated in _S_initialize()
310*404b540aSrobert 	//   for _S_max_threads + global pool 0.
311*404b540aSrobert 	size_t*				_M_free;
312*404b540aSrobert 	size_t*			        _M_used;
313*404b540aSrobert 
314*404b540aSrobert 	// Each bin has its own mutex which is used to ensure data
315*404b540aSrobert 	// integrity while changing "ownership" on a block.  The mutex
316*404b540aSrobert 	// is initialized in _S_initialize().
317*404b540aSrobert 	__gthread_mutex_t*              _M_mutex;
318*404b540aSrobert       };
319*404b540aSrobert 
320*404b540aSrobert       // XXX GLIBCXX_ABI Deprecated
321*404b540aSrobert       void
322*404b540aSrobert       _M_initialize(__destroy_handler);
323*404b540aSrobert 
324*404b540aSrobert       void
_M_initialize_once()325*404b540aSrobert       _M_initialize_once()
326*404b540aSrobert       {
327*404b540aSrobert 	if (__builtin_expect(_M_init == false, false))
328*404b540aSrobert 	  _M_initialize();
329*404b540aSrobert       }
330*404b540aSrobert 
331*404b540aSrobert       void
332*404b540aSrobert       _M_destroy() throw();
333*404b540aSrobert 
334*404b540aSrobert       char*
335*404b540aSrobert       _M_reserve_block(size_t __bytes, const size_t __thread_id);
336*404b540aSrobert 
337*404b540aSrobert       void
338*404b540aSrobert       _M_reclaim_block(char* __p, size_t __bytes);
339*404b540aSrobert 
340*404b540aSrobert       const _Bin_record&
_M_get_bin(size_t __which)341*404b540aSrobert       _M_get_bin(size_t __which)
342*404b540aSrobert       { return _M_bin[__which]; }
343*404b540aSrobert 
344*404b540aSrobert       void
_M_adjust_freelist(const _Bin_record & __bin,_Block_record * __block,size_t __thread_id)345*404b540aSrobert       _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block,
346*404b540aSrobert 			 size_t __thread_id)
347*404b540aSrobert       {
348*404b540aSrobert 	if (__gthread_active_p())
349*404b540aSrobert 	  {
350*404b540aSrobert 	    __block->_M_thread_id = __thread_id;
351*404b540aSrobert 	    --__bin._M_free[__thread_id];
352*404b540aSrobert 	    ++__bin._M_used[__thread_id];
353*404b540aSrobert 	  }
354*404b540aSrobert       }
355*404b540aSrobert 
356*404b540aSrobert       // XXX GLIBCXX_ABI Deprecated
357*404b540aSrobert       void
358*404b540aSrobert       _M_destroy_thread_key(void*);
359*404b540aSrobert 
360*404b540aSrobert       size_t
361*404b540aSrobert       _M_get_thread_id();
362*404b540aSrobert 
__pool()363*404b540aSrobert       explicit __pool()
364*404b540aSrobert       : _M_bin(NULL), _M_bin_size(1), _M_thread_freelist(NULL)
365*404b540aSrobert       { }
366*404b540aSrobert 
__pool(const __pool_base::_Tune & __tune)367*404b540aSrobert       explicit __pool(const __pool_base::_Tune& __tune)
368*404b540aSrobert       : __pool_base(__tune), _M_bin(NULL), _M_bin_size(1),
369*404b540aSrobert       _M_thread_freelist(NULL)
370*404b540aSrobert       { }
371*404b540aSrobert 
372*404b540aSrobert     private:
373*404b540aSrobert       // An "array" of bin_records each of which represents a specific
374*404b540aSrobert       // power of 2 size. Memory to this "array" is allocated in
375*404b540aSrobert       // _M_initialize().
376*404b540aSrobert       _Bin_record*		_M_bin;
377*404b540aSrobert 
378*404b540aSrobert       // Actual value calculated in _M_initialize().
379*404b540aSrobert       size_t 	       	     	_M_bin_size;
380*404b540aSrobert 
381*404b540aSrobert       _Thread_record* 		_M_thread_freelist;
382*404b540aSrobert       void*			_M_thread_freelist_initial;
383*404b540aSrobert 
384*404b540aSrobert       void
385*404b540aSrobert       _M_initialize();
386*404b540aSrobert     };
387*404b540aSrobert #endif
388*404b540aSrobert 
389*404b540aSrobert   template<template <bool> class _PoolTp, bool _Thread>
390*404b540aSrobert     struct __common_pool
391*404b540aSrobert     {
392*404b540aSrobert       typedef _PoolTp<_Thread> 		pool_type;
393*404b540aSrobert 
394*404b540aSrobert       static pool_type&
_S_get_pool__common_pool395*404b540aSrobert       _S_get_pool()
396*404b540aSrobert       {
397*404b540aSrobert 	static pool_type _S_pool;
398*404b540aSrobert 	return _S_pool;
399*404b540aSrobert       }
400*404b540aSrobert     };
401*404b540aSrobert 
402*404b540aSrobert   template<template <bool> class _PoolTp, bool _Thread>
403*404b540aSrobert     struct __common_pool_base;
404*404b540aSrobert 
405*404b540aSrobert   template<template <bool> class _PoolTp>
406*404b540aSrobert     struct __common_pool_base<_PoolTp, false>
407*404b540aSrobert     : public __common_pool<_PoolTp, false>
408*404b540aSrobert     {
409*404b540aSrobert       using  __common_pool<_PoolTp, false>::_S_get_pool;
410*404b540aSrobert 
411*404b540aSrobert       static void
412*404b540aSrobert       _S_initialize_once()
413*404b540aSrobert       {
414*404b540aSrobert 	static bool __init;
415*404b540aSrobert 	if (__builtin_expect(__init == false, false))
416*404b540aSrobert 	  {
417*404b540aSrobert 	    _S_get_pool()._M_initialize_once();
418*404b540aSrobert 	    __init = true;
419*404b540aSrobert 	  }
420*404b540aSrobert       }
421*404b540aSrobert     };
422*404b540aSrobert 
423*404b540aSrobert #ifdef __GTHREADS
424*404b540aSrobert   template<template <bool> class _PoolTp>
425*404b540aSrobert     struct __common_pool_base<_PoolTp, true>
426*404b540aSrobert     : public __common_pool<_PoolTp, true>
427*404b540aSrobert     {
428*404b540aSrobert       using  __common_pool<_PoolTp, true>::_S_get_pool;
429*404b540aSrobert 
430*404b540aSrobert       static void
431*404b540aSrobert       _S_initialize()
432*404b540aSrobert       { _S_get_pool()._M_initialize_once(); }
433*404b540aSrobert 
434*404b540aSrobert       static void
435*404b540aSrobert       _S_initialize_once()
436*404b540aSrobert       {
437*404b540aSrobert 	static bool __init;
438*404b540aSrobert 	if (__builtin_expect(__init == false, false))
439*404b540aSrobert 	  {
440*404b540aSrobert 	    if (__gthread_active_p())
441*404b540aSrobert 	      {
442*404b540aSrobert 		// On some platforms, __gthread_once_t is an aggregate.
443*404b540aSrobert 		static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
444*404b540aSrobert 		__gthread_once(&__once, _S_initialize);
445*404b540aSrobert 	      }
446*404b540aSrobert 
447*404b540aSrobert 	    // Double check initialization. May be necessary on some
448*404b540aSrobert 	    // systems for proper construction when not compiling with
449*404b540aSrobert 	    // thread flags.
450*404b540aSrobert 	    _S_get_pool()._M_initialize_once();
451*404b540aSrobert 	    __init = true;
452*404b540aSrobert 	  }
453*404b540aSrobert       }
454*404b540aSrobert     };
455*404b540aSrobert #endif
456*404b540aSrobert 
457*404b540aSrobert   /// @brief  Policy for shared __pool objects.
458*404b540aSrobert   template<template <bool> class _PoolTp, bool _Thread>
459*404b540aSrobert     struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
460*404b540aSrobert     {
461*404b540aSrobert       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
462*404b540aSrobert 	       bool _Thread1 = _Thread>
463*404b540aSrobert         struct _M_rebind
464*404b540aSrobert         { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
465*404b540aSrobert 
466*404b540aSrobert       using  __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
467*404b540aSrobert       using  __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
468*404b540aSrobert   };
469*404b540aSrobert 
470*404b540aSrobert 
471*404b540aSrobert   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
472*404b540aSrobert     struct __per_type_pool
473*404b540aSrobert     {
474*404b540aSrobert       typedef _Tp 			value_type;
475*404b540aSrobert       typedef _PoolTp<_Thread> 		pool_type;
476*404b540aSrobert 
477*404b540aSrobert       static pool_type&
478*404b540aSrobert       _S_get_pool()
479*404b540aSrobert       {
480*404b540aSrobert 	// Sane defaults for the _PoolTp.
481*404b540aSrobert 	typedef typename pool_type::_Block_record _Block_record;
482*404b540aSrobert 	const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
483*404b540aSrobert 				   ? __alignof__(_Tp) : sizeof(_Block_record));
484*404b540aSrobert 
485*404b540aSrobert 	typedef typename __pool_base::_Tune _Tune;
486*404b540aSrobert 	static _Tune _S_tune(__a, sizeof(_Tp) * 64,
487*404b540aSrobert 			     sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
488*404b540aSrobert 			     sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
489*404b540aSrobert 			     _Tune::_S_max_threads,
490*404b540aSrobert 			     _Tune::_S_freelist_headroom,
491*404b540aSrobert 			     std::getenv("GLIBCXX_FORCE_NEW") ? true : false);
492*404b540aSrobert 	static pool_type _S_pool(_S_tune);
493*404b540aSrobert 	return _S_pool;
494*404b540aSrobert       }
495*404b540aSrobert     };
496*404b540aSrobert 
497*404b540aSrobert   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
498*404b540aSrobert     struct __per_type_pool_base;
499*404b540aSrobert 
500*404b540aSrobert   template<typename _Tp, template <bool> class _PoolTp>
501*404b540aSrobert     struct __per_type_pool_base<_Tp, _PoolTp, false>
502*404b540aSrobert     : public __per_type_pool<_Tp, _PoolTp, false>
503*404b540aSrobert     {
504*404b540aSrobert       using  __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
505*404b540aSrobert 
506*404b540aSrobert       static void
507*404b540aSrobert       _S_initialize_once()
508*404b540aSrobert       {
509*404b540aSrobert 	static bool __init;
510*404b540aSrobert 	if (__builtin_expect(__init == false, false))
511*404b540aSrobert 	  {
512*404b540aSrobert 	    _S_get_pool()._M_initialize_once();
513*404b540aSrobert 	    __init = true;
514*404b540aSrobert 	  }
515*404b540aSrobert       }
516*404b540aSrobert     };
517*404b540aSrobert 
518*404b540aSrobert  #ifdef __GTHREADS
519*404b540aSrobert  template<typename _Tp, template <bool> class _PoolTp>
520*404b540aSrobert     struct __per_type_pool_base<_Tp, _PoolTp, true>
521*404b540aSrobert     : public __per_type_pool<_Tp, _PoolTp, true>
522*404b540aSrobert     {
523*404b540aSrobert       using  __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
524*404b540aSrobert 
525*404b540aSrobert       static void
526*404b540aSrobert       _S_initialize()
527*404b540aSrobert       { _S_get_pool()._M_initialize_once(); }
528*404b540aSrobert 
529*404b540aSrobert       static void
530*404b540aSrobert       _S_initialize_once()
531*404b540aSrobert       {
532*404b540aSrobert 	static bool __init;
533*404b540aSrobert 	if (__builtin_expect(__init == false, false))
534*404b540aSrobert 	  {
535*404b540aSrobert 	    if (__gthread_active_p())
536*404b540aSrobert 	      {
537*404b540aSrobert 		// On some platforms, __gthread_once_t is an aggregate.
538*404b540aSrobert 		static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
539*404b540aSrobert 		__gthread_once(&__once, _S_initialize);
540*404b540aSrobert 	      }
541*404b540aSrobert 
542*404b540aSrobert 	    // Double check initialization. May be necessary on some
543*404b540aSrobert 	    // systems for proper construction when not compiling with
544*404b540aSrobert 	    // thread flags.
545*404b540aSrobert 	    _S_get_pool()._M_initialize_once();
546*404b540aSrobert 	    __init = true;
547*404b540aSrobert 	  }
548*404b540aSrobert       }
549*404b540aSrobert     };
550*404b540aSrobert #endif
551*404b540aSrobert 
552*404b540aSrobert   /// @brief  Policy for individual __pool objects.
553*404b540aSrobert   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
554*404b540aSrobert     struct __per_type_pool_policy
555*404b540aSrobert     : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
556*404b540aSrobert     {
557*404b540aSrobert       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
558*404b540aSrobert 	       bool _Thread1 = _Thread>
559*404b540aSrobert         struct _M_rebind
560*404b540aSrobert         { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
561*404b540aSrobert 
562*404b540aSrobert       using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
563*404b540aSrobert       using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
564*404b540aSrobert   };
565*404b540aSrobert 
566*404b540aSrobert 
567*404b540aSrobert   /// @brief  Base class for _Tp dependent member functions.
568*404b540aSrobert   template<typename _Tp>
569*404b540aSrobert     class __mt_alloc_base
570*404b540aSrobert     {
571*404b540aSrobert     public:
572*404b540aSrobert       typedef size_t                    size_type;
573*404b540aSrobert       typedef ptrdiff_t                 difference_type;
574*404b540aSrobert       typedef _Tp*                      pointer;
575*404b540aSrobert       typedef const _Tp*                const_pointer;
576*404b540aSrobert       typedef _Tp&                      reference;
577*404b540aSrobert       typedef const _Tp&                const_reference;
578*404b540aSrobert       typedef _Tp                       value_type;
579*404b540aSrobert 
580*404b540aSrobert       pointer
581*404b540aSrobert       address(reference __x) const
582*404b540aSrobert       { return &__x; }
583*404b540aSrobert 
584*404b540aSrobert       const_pointer
585*404b540aSrobert       address(const_reference __x) const
586*404b540aSrobert       { return &__x; }
587*404b540aSrobert 
588*404b540aSrobert       size_type
589*404b540aSrobert       max_size() const throw()
590*404b540aSrobert       { return size_t(-1) / sizeof(_Tp); }
591*404b540aSrobert 
592*404b540aSrobert       // _GLIBCXX_RESOLVE_LIB_DEFECTS
593*404b540aSrobert       // 402. wrong new expression in [some_] allocator::construct
594*404b540aSrobert       void
595*404b540aSrobert       construct(pointer __p, const _Tp& __val)
596*404b540aSrobert       { ::new(__p) _Tp(__val); }
597*404b540aSrobert 
598*404b540aSrobert       void
599*404b540aSrobert       destroy(pointer __p) { __p->~_Tp(); }
600*404b540aSrobert     };
601*404b540aSrobert 
602*404b540aSrobert #ifdef __GTHREADS
603*404b540aSrobert #define __thread_default true
604*404b540aSrobert #else
605*404b540aSrobert #define __thread_default false
606*404b540aSrobert #endif
607*404b540aSrobert 
608*404b540aSrobert   /**
609*404b540aSrobert    *  @brief  This is a fixed size (power of 2) allocator which - when
610*404b540aSrobert    *  compiled with thread support - will maintain one freelist per
611*404b540aSrobert    *  size per thread plus a "global" one. Steps are taken to limit
612*404b540aSrobert    *  the per thread freelist sizes (by returning excess back to
613*404b540aSrobert    *  the "global" list).
614*404b540aSrobert    *
615*404b540aSrobert    *  Further details:
616*404b540aSrobert    *  http://gcc.gnu.org/onlinedocs/libstdc++/ext/mt_allocator.html
617*404b540aSrobert    */
618*404b540aSrobert   template<typename _Tp,
619*404b540aSrobert 	   typename _Poolp = __common_pool_policy<__pool, __thread_default> >
620*404b540aSrobert     class __mt_alloc : public __mt_alloc_base<_Tp>
621*404b540aSrobert     {
622*404b540aSrobert     public:
623*404b540aSrobert       typedef size_t                    	size_type;
624*404b540aSrobert       typedef ptrdiff_t                 	difference_type;
625*404b540aSrobert       typedef _Tp*                      	pointer;
626*404b540aSrobert       typedef const _Tp*                	const_pointer;
627*404b540aSrobert       typedef _Tp&                      	reference;
628*404b540aSrobert       typedef const _Tp&                	const_reference;
629*404b540aSrobert       typedef _Tp                       	value_type;
630*404b540aSrobert       typedef _Poolp      			__policy_type;
631*404b540aSrobert       typedef typename _Poolp::pool_type	__pool_type;
632*404b540aSrobert 
633*404b540aSrobert       template<typename _Tp1, typename _Poolp1 = _Poolp>
634*404b540aSrobert         struct rebind
635*404b540aSrobert         {
636*404b540aSrobert 	  typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
637*404b540aSrobert 	  typedef __mt_alloc<_Tp1, pol_type> other;
638*404b540aSrobert 	};
639*404b540aSrobert 
640*404b540aSrobert       __mt_alloc() throw() { }
641*404b540aSrobert 
642*404b540aSrobert       __mt_alloc(const __mt_alloc&) throw() { }
643*404b540aSrobert 
644*404b540aSrobert       template<typename _Tp1, typename _Poolp1>
645*404b540aSrobert         __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) throw() { }
646*404b540aSrobert 
647*404b540aSrobert       ~__mt_alloc() throw() { }
648*404b540aSrobert 
649*404b540aSrobert       pointer
650*404b540aSrobert       allocate(size_type __n, const void* = 0);
651*404b540aSrobert 
652*404b540aSrobert       void
653*404b540aSrobert       deallocate(pointer __p, size_type __n);
654*404b540aSrobert 
655*404b540aSrobert       const __pool_base::_Tune
656*404b540aSrobert       _M_get_options()
657*404b540aSrobert       {
658*404b540aSrobert 	// Return a copy, not a reference, for external consumption.
659*404b540aSrobert 	return __policy_type::_S_get_pool()._M_get_options();
660*404b540aSrobert       }
661*404b540aSrobert 
662*404b540aSrobert       void
663*404b540aSrobert       _M_set_options(__pool_base::_Tune __t)
664*404b540aSrobert       { __policy_type::_S_get_pool()._M_set_options(__t); }
665*404b540aSrobert     };
666*404b540aSrobert 
667*404b540aSrobert   template<typename _Tp, typename _Poolp>
668*404b540aSrobert     typename __mt_alloc<_Tp, _Poolp>::pointer
669*404b540aSrobert     __mt_alloc<_Tp, _Poolp>::
670*404b540aSrobert     allocate(size_type __n, const void*)
671*404b540aSrobert     {
672*404b540aSrobert       if (__builtin_expect(__n > this->max_size(), false))
673*404b540aSrobert 	std::__throw_bad_alloc();
674*404b540aSrobert 
675*404b540aSrobert       __policy_type::_S_initialize_once();
676*404b540aSrobert 
677*404b540aSrobert       // Requests larger than _M_max_bytes are handled by operator
678*404b540aSrobert       // new/delete directly.
679*404b540aSrobert       __pool_type& __pool = __policy_type::_S_get_pool();
680*404b540aSrobert       const size_t __bytes = __n * sizeof(_Tp);
681*404b540aSrobert       if (__pool._M_check_threshold(__bytes))
682*404b540aSrobert 	{
683*404b540aSrobert 	  void* __ret = ::operator new(__bytes);
684*404b540aSrobert 	  return static_cast<_Tp*>(__ret);
685*404b540aSrobert 	}
686*404b540aSrobert 
687*404b540aSrobert       // Round up to power of 2 and figure out which bin to use.
688*404b540aSrobert       const size_t __which = __pool._M_get_binmap(__bytes);
689*404b540aSrobert       const size_t __thread_id = __pool._M_get_thread_id();
690*404b540aSrobert 
691*404b540aSrobert       // Find out if we have blocks on our freelist.  If so, go ahead
692*404b540aSrobert       // and use them directly without having to lock anything.
693*404b540aSrobert       char* __c;
694*404b540aSrobert       typedef typename __pool_type::_Bin_record _Bin_record;
695*404b540aSrobert       const _Bin_record& __bin = __pool._M_get_bin(__which);
696*404b540aSrobert       if (__bin._M_first[__thread_id])
697*404b540aSrobert 	{
698*404b540aSrobert 	  // Already reserved.
699*404b540aSrobert 	  typedef typename __pool_type::_Block_record _Block_record;
700*404b540aSrobert 	  _Block_record* __block = __bin._M_first[__thread_id];
701*404b540aSrobert 	  __bin._M_first[__thread_id] = __block->_M_next;
702*404b540aSrobert 
703*404b540aSrobert 	  __pool._M_adjust_freelist(__bin, __block, __thread_id);
704*404b540aSrobert 	  __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
705*404b540aSrobert 	}
706*404b540aSrobert       else
707*404b540aSrobert 	{
708*404b540aSrobert 	  // Null, reserve.
709*404b540aSrobert 	  __c = __pool._M_reserve_block(__bytes, __thread_id);
710*404b540aSrobert 	}
711*404b540aSrobert       return static_cast<_Tp*>(static_cast<void*>(__c));
712*404b540aSrobert     }
713*404b540aSrobert 
714*404b540aSrobert   template<typename _Tp, typename _Poolp>
715*404b540aSrobert     void
716*404b540aSrobert     __mt_alloc<_Tp, _Poolp>::
717*404b540aSrobert     deallocate(pointer __p, size_type __n)
718*404b540aSrobert     {
719*404b540aSrobert       if (__builtin_expect(__p != 0, true))
720*404b540aSrobert 	{
721*404b540aSrobert 	  // Requests larger than _M_max_bytes are handled by
722*404b540aSrobert 	  // operators new/delete directly.
723*404b540aSrobert 	  __pool_type& __pool = __policy_type::_S_get_pool();
724*404b540aSrobert 	  const size_t __bytes = __n * sizeof(_Tp);
725*404b540aSrobert 	  if (__pool._M_check_threshold(__bytes))
726*404b540aSrobert 	    ::operator delete(__p);
727*404b540aSrobert 	  else
728*404b540aSrobert 	    __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
729*404b540aSrobert 	}
730*404b540aSrobert     }
731*404b540aSrobert 
732*404b540aSrobert   template<typename _Tp, typename _Poolp>
733*404b540aSrobert     inline bool
734*404b540aSrobert     operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
735*404b540aSrobert     { return true; }
736*404b540aSrobert 
737*404b540aSrobert   template<typename _Tp, typename _Poolp>
738*404b540aSrobert     inline bool
739*404b540aSrobert     operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
740*404b540aSrobert     { return false; }
741*404b540aSrobert 
742*404b540aSrobert #undef __thread_default
743*404b540aSrobert 
744*404b540aSrobert _GLIBCXX_END_NAMESPACE
745*404b540aSrobert 
746*404b540aSrobert #endif
747