xref: /openbsd-src/gnu/lib/libstdc++/libstdc++/include/bits/stl_alloc.h (revision 03a78d155d6fff5698289342b62759a75b20d130)
1*03a78d15Sespie // Allocators -*- C++ -*-
2*03a78d15Sespie 
3*03a78d15Sespie // Copyright (C) 2001, 2002, 2003 Free Software Foundation, Inc.
4*03a78d15Sespie //
5*03a78d15Sespie // This file is part of the GNU ISO C++ Library.  This library is free
6*03a78d15Sespie // software; you can redistribute it and/or modify it under the
7*03a78d15Sespie // terms of the GNU General Public License as published by the
8*03a78d15Sespie // Free Software Foundation; either version 2, or (at your option)
9*03a78d15Sespie // any later version.
10*03a78d15Sespie 
11*03a78d15Sespie // This library is distributed in the hope that it will be useful,
12*03a78d15Sespie // but WITHOUT ANY WARRANTY; without even the implied warranty of
13*03a78d15Sespie // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14*03a78d15Sespie // GNU General Public License for more details.
15*03a78d15Sespie 
16*03a78d15Sespie // You should have received a copy of the GNU General Public License along
17*03a78d15Sespie // with this library; see the file COPYING.  If not, write to the Free
18*03a78d15Sespie // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19*03a78d15Sespie // USA.
20*03a78d15Sespie 
21*03a78d15Sespie // As a special exception, you may use this file as part of a free software
22*03a78d15Sespie // library without restriction.  Specifically, if other files instantiate
23*03a78d15Sespie // templates or use macros or inline functions from this file, or you compile
24*03a78d15Sespie // this file and link it with other files to produce an executable, this
25*03a78d15Sespie // file does not by itself cause the resulting executable to be covered by
26*03a78d15Sespie // the GNU General Public License.  This exception does not however
27*03a78d15Sespie // invalidate any other reasons why the executable file might be covered by
28*03a78d15Sespie // the GNU General Public License.
29*03a78d15Sespie 
30*03a78d15Sespie /*
31*03a78d15Sespie  * Copyright (c) 1996-1997
32*03a78d15Sespie  * Silicon Graphics Computer Systems, Inc.
33*03a78d15Sespie  *
34*03a78d15Sespie  * Permission to use, copy, modify, distribute and sell this software
35*03a78d15Sespie  * and its documentation for any purpose is hereby granted without fee,
36*03a78d15Sespie  * provided that the above copyright notice appear in all copies and
37*03a78d15Sespie  * that both that copyright notice and this permission notice appear
38*03a78d15Sespie  * in supporting documentation.  Silicon Graphics makes no
39*03a78d15Sespie  * representations about the suitability of this software for any
40*03a78d15Sespie  * purpose.  It is provided "as is" without express or implied warranty.
41*03a78d15Sespie  */
42*03a78d15Sespie 
43*03a78d15Sespie /** @file stl_alloc.h
44*03a78d15Sespie  *  This is an internal header file, included by other library headers.
45*03a78d15Sespie  *  You should not attempt to use it directly.
46*03a78d15Sespie  */
47*03a78d15Sespie 
48*03a78d15Sespie #ifndef __GLIBCPP_INTERNAL_ALLOC_H
49*03a78d15Sespie #define __GLIBCPP_INTERNAL_ALLOC_H
50*03a78d15Sespie 
51*03a78d15Sespie /**
52*03a78d15Sespie  *  @defgroup Allocators Memory Allocators
53*03a78d15Sespie  *  @if maint
54*03a78d15Sespie  *  stl_alloc.h implements some node allocators.  These are NOT the same as
55*03a78d15Sespie  *  allocators in the C++ standard, nor in the original H-P STL.  They do not
56*03a78d15Sespie  *  encapsulate different pointer types; we assume that there is only one
57*03a78d15Sespie  *  pointer type.  The C++ standard allocators are intended to allocate
58*03a78d15Sespie  *  individual objects, not pools or arenas.
59*03a78d15Sespie  *
60*03a78d15Sespie  *  In this file allocators are of two different styles:  "standard" and
61*03a78d15Sespie  *  "SGI" (quotes included).  "Standard" allocators conform to 20.4.  "SGI"
62*03a78d15Sespie  *  allocators differ in AT LEAST the following ways (add to this list as you
63*03a78d15Sespie  *  discover them):
64*03a78d15Sespie  *
65*03a78d15Sespie  *   - "Standard" allocate() takes two parameters (n_count,hint=0) but "SGI"
66*03a78d15Sespie  *     allocate() takes one paramter (n_size).
67*03a78d15Sespie  *   - Likewise, "standard" deallocate()'s argument is a count, but in "SGI"
68*03a78d15Sespie  *     is a byte size.
69*03a78d15Sespie  *   - max_size(), construct(), and destroy() are missing in "SGI" allocators.
70*03a78d15Sespie  *   - reallocate(p,oldsz,newsz) is added in "SGI", and behaves as
71*03a78d15Sespie  *     if p=realloc(p,newsz).
72*03a78d15Sespie  *
73*03a78d15Sespie  *  "SGI" allocators may be wrapped in __allocator to convert the interface
74*03a78d15Sespie  *  into a "standard" one.
75*03a78d15Sespie  *  @endif
76*03a78d15Sespie  *
77*03a78d15Sespie  *  @note The @c reallocate member functions have been deprecated for 3.2
78*03a78d15Sespie  *        and will be removed in 3.4.  You must define @c _GLIBCPP_DEPRECATED
79*03a78d15Sespie  *        to make this visible in 3.2; see c++config.h.
80*03a78d15Sespie  *
81*03a78d15Sespie  *  The canonical description of these classes is in docs/html/ext/howto.html
82*03a78d15Sespie  *  or online at http://gcc.gnu.org/onlinedocs/libstdc++/ext/howto.html#3
83*03a78d15Sespie */
84*03a78d15Sespie 
85*03a78d15Sespie #include <cstddef>
86*03a78d15Sespie #include <cstdlib>
87*03a78d15Sespie #include <cstring>
88*03a78d15Sespie #include <bits/functexcept.h>   // For __throw_bad_alloc
89*03a78d15Sespie #include <bits/stl_threads.h>
90*03a78d15Sespie 
91*03a78d15Sespie #include <bits/atomicity.h>
92*03a78d15Sespie 
93*03a78d15Sespie namespace std
94*03a78d15Sespie {
95*03a78d15Sespie   /**
96*03a78d15Sespie    *  @if maint
97*03a78d15Sespie    *  A new-based allocator, as required by the standard.  Allocation and
98*03a78d15Sespie    *  deallocation forward to global new and delete.  "SGI" style, minus
99*03a78d15Sespie    *  reallocate().
100*03a78d15Sespie    *  @endif
101*03a78d15Sespie    *  (See @link Allocators allocators info @endlink for more.)
102*03a78d15Sespie    */
103*03a78d15Sespie   class __new_alloc
104*03a78d15Sespie   {
105*03a78d15Sespie   public:
106*03a78d15Sespie     static void*
allocate(size_t __n)107*03a78d15Sespie     allocate(size_t __n)
108*03a78d15Sespie     { return ::operator new(__n); }
109*03a78d15Sespie 
110*03a78d15Sespie     static void
deallocate(void * __p,size_t)111*03a78d15Sespie     deallocate(void* __p, size_t)
112*03a78d15Sespie     { ::operator delete(__p); }
113*03a78d15Sespie   };
114*03a78d15Sespie 
115*03a78d15Sespie 
116*03a78d15Sespie   /**
117*03a78d15Sespie    *  @if maint
118*03a78d15Sespie    *  A malloc-based allocator.  Typically slower than the
119*03a78d15Sespie    *  __default_alloc_template (below).  Typically thread-safe and more
120*03a78d15Sespie    *  storage efficient.  The template argument is unused and is only present
121*03a78d15Sespie    *  to permit multiple instantiations (but see __default_alloc_template
122*03a78d15Sespie    *  for caveats).  "SGI" style, plus __set_malloc_handler for OOM conditions.
123*03a78d15Sespie    *  @endif
124*03a78d15Sespie    *  (See @link Allocators allocators info @endlink for more.)
125*03a78d15Sespie    */
126*03a78d15Sespie   template<int __inst>
127*03a78d15Sespie     class __malloc_alloc_template
128*03a78d15Sespie     {
129*03a78d15Sespie     private:
130*03a78d15Sespie       static void* _S_oom_malloc(size_t);
131*03a78d15Sespie       static void* _S_oom_realloc(void*, size_t);
132*03a78d15Sespie       static void (* __malloc_alloc_oom_handler)();
133*03a78d15Sespie 
134*03a78d15Sespie     public:
135*03a78d15Sespie       static void*
allocate(size_t __n)136*03a78d15Sespie       allocate(size_t __n)
137*03a78d15Sespie       {
138*03a78d15Sespie         void* __result = malloc(__n);
139*03a78d15Sespie         if (__builtin_expect(__result == 0, 0))
140*03a78d15Sespie 	  __result = _S_oom_malloc(__n);
141*03a78d15Sespie         return __result;
142*03a78d15Sespie       }
143*03a78d15Sespie 
144*03a78d15Sespie       static void
deallocate(void * __p,size_t)145*03a78d15Sespie       deallocate(void* __p, size_t /* __n */)
146*03a78d15Sespie       { free(__p); }
147*03a78d15Sespie 
148*03a78d15Sespie       static void*
reallocate(void * __p,size_t,size_t __new_sz)149*03a78d15Sespie       reallocate(void* __p, size_t /* old_sz */, size_t __new_sz)
150*03a78d15Sespie       {
151*03a78d15Sespie         void* __result = realloc(__p, __new_sz);
152*03a78d15Sespie         if (__builtin_expect(__result == 0, 0))
153*03a78d15Sespie           __result = _S_oom_realloc(__p, __new_sz);
154*03a78d15Sespie         return __result;
155*03a78d15Sespie       }
156*03a78d15Sespie 
__set_malloc_handler(void (* __f)())157*03a78d15Sespie       static void (* __set_malloc_handler(void (*__f)()))()
158*03a78d15Sespie       {
159*03a78d15Sespie         void (* __old)() = __malloc_alloc_oom_handler;
160*03a78d15Sespie         __malloc_alloc_oom_handler = __f;
161*03a78d15Sespie         return __old;
162*03a78d15Sespie       }
163*03a78d15Sespie     };
164*03a78d15Sespie 
165*03a78d15Sespie   // malloc_alloc out-of-memory handling
166*03a78d15Sespie   template<int __inst>
167*03a78d15Sespie     void (* __malloc_alloc_template<__inst>::__malloc_alloc_oom_handler)() = 0;
168*03a78d15Sespie 
169*03a78d15Sespie   template<int __inst>
170*03a78d15Sespie     void*
171*03a78d15Sespie     __malloc_alloc_template<__inst>::
_S_oom_malloc(size_t __n)172*03a78d15Sespie     _S_oom_malloc(size_t __n)
173*03a78d15Sespie     {
174*03a78d15Sespie       void (* __my_malloc_handler)();
175*03a78d15Sespie       void* __result;
176*03a78d15Sespie 
177*03a78d15Sespie       for (;;)
178*03a78d15Sespie         {
179*03a78d15Sespie           __my_malloc_handler = __malloc_alloc_oom_handler;
180*03a78d15Sespie           if (__builtin_expect(__my_malloc_handler == 0, 0))
181*03a78d15Sespie             __throw_bad_alloc();
182*03a78d15Sespie           (*__my_malloc_handler)();
183*03a78d15Sespie           __result = malloc(__n);
184*03a78d15Sespie           if (__result)
185*03a78d15Sespie             return __result;
186*03a78d15Sespie         }
187*03a78d15Sespie     }
188*03a78d15Sespie 
189*03a78d15Sespie   template<int __inst>
190*03a78d15Sespie     void*
191*03a78d15Sespie     __malloc_alloc_template<__inst>::
_S_oom_realloc(void * __p,size_t __n)192*03a78d15Sespie     _S_oom_realloc(void* __p, size_t __n)
193*03a78d15Sespie     {
194*03a78d15Sespie       void (* __my_malloc_handler)();
195*03a78d15Sespie       void* __result;
196*03a78d15Sespie 
197*03a78d15Sespie       for (;;)
198*03a78d15Sespie         {
199*03a78d15Sespie           __my_malloc_handler = __malloc_alloc_oom_handler;
200*03a78d15Sespie           if (__builtin_expect(__my_malloc_handler == 0, 0))
201*03a78d15Sespie             __throw_bad_alloc();
202*03a78d15Sespie           (*__my_malloc_handler)();
203*03a78d15Sespie           __result = realloc(__p, __n);
204*03a78d15Sespie           if (__result)
205*03a78d15Sespie             return __result;
206*03a78d15Sespie         }
207*03a78d15Sespie     }
208*03a78d15Sespie 
209*03a78d15Sespie   // Should not be referenced within the library anymore.
210*03a78d15Sespie   typedef __new_alloc                 __mem_interface;
211*03a78d15Sespie 
212*03a78d15Sespie   /**
213*03a78d15Sespie    *  @if maint
214*03a78d15Sespie    *  This is used primarily (only?) in _Alloc_traits and other places to
215*03a78d15Sespie    *  help provide the _Alloc_type typedef.  All it does is forward the
216*03a78d15Sespie    *  requests after some minimal checking.
217*03a78d15Sespie    *
218*03a78d15Sespie    *  This is neither "standard"-conforming nor "SGI".  The _Alloc parameter
219*03a78d15Sespie    *  must be "SGI" style.
220*03a78d15Sespie    *  @endif
221*03a78d15Sespie    *  (See @link Allocators allocators info @endlink for more.)
222*03a78d15Sespie    */
223*03a78d15Sespie   template<typename _Tp, typename _Alloc>
224*03a78d15Sespie     class __simple_alloc
225*03a78d15Sespie     {
226*03a78d15Sespie     public:
227*03a78d15Sespie       static _Tp*
allocate(size_t __n)228*03a78d15Sespie       allocate(size_t __n)
229*03a78d15Sespie       {
230*03a78d15Sespie 	_Tp* __ret = 0;
231*03a78d15Sespie 	if (__n)
232*03a78d15Sespie 	  __ret = static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)));
233*03a78d15Sespie 	return __ret;
234*03a78d15Sespie       }
235*03a78d15Sespie 
236*03a78d15Sespie       static _Tp*
allocate()237*03a78d15Sespie       allocate()
238*03a78d15Sespie       { return (_Tp*) _Alloc::allocate(sizeof (_Tp)); }
239*03a78d15Sespie 
240*03a78d15Sespie       static void
deallocate(_Tp * __p,size_t __n)241*03a78d15Sespie       deallocate(_Tp* __p, size_t __n)
242*03a78d15Sespie       { if (0 != __n) _Alloc::deallocate(__p, __n * sizeof (_Tp)); }
243*03a78d15Sespie 
244*03a78d15Sespie       static void
deallocate(_Tp * __p)245*03a78d15Sespie       deallocate(_Tp* __p)
246*03a78d15Sespie       { _Alloc::deallocate(__p, sizeof (_Tp)); }
247*03a78d15Sespie     };
248*03a78d15Sespie 
249*03a78d15Sespie 
250*03a78d15Sespie   /**
251*03a78d15Sespie    *  @if maint
252*03a78d15Sespie    *  An adaptor for an underlying allocator (_Alloc) to check the size
253*03a78d15Sespie    *  arguments for debugging.
254*03a78d15Sespie    *
255*03a78d15Sespie    *  "There is some evidence that this can confuse Purify." - SGI comment
256*03a78d15Sespie    *
257*03a78d15Sespie    *  This adaptor is "SGI" style.  The _Alloc parameter must also be "SGI".
258*03a78d15Sespie    *  @endif
259*03a78d15Sespie    *  (See @link Allocators allocators info @endlink for more.)
260*03a78d15Sespie    */
261*03a78d15Sespie   template<typename _Alloc>
262*03a78d15Sespie     class __debug_alloc
263*03a78d15Sespie     {
264*03a78d15Sespie     private:
265*03a78d15Sespie       // Size of space used to store size.  Note that this must be
266*03a78d15Sespie       // large enough to preserve alignment.
267*03a78d15Sespie       enum {_S_extra = 8};
268*03a78d15Sespie 
269*03a78d15Sespie     public:
270*03a78d15Sespie       static void*
allocate(size_t __n)271*03a78d15Sespie       allocate(size_t __n)
272*03a78d15Sespie       {
273*03a78d15Sespie         char* __result = (char*)_Alloc::allocate(__n + (int) _S_extra);
274*03a78d15Sespie         *(size_t*)__result = __n;
275*03a78d15Sespie         return __result + (int) _S_extra;
276*03a78d15Sespie       }
277*03a78d15Sespie 
278*03a78d15Sespie       static void
deallocate(void * __p,size_t __n)279*03a78d15Sespie       deallocate(void* __p, size_t __n)
280*03a78d15Sespie       {
281*03a78d15Sespie         char* __real_p = (char*)__p - (int) _S_extra;
282*03a78d15Sespie         if (*(size_t*)__real_p != __n)
283*03a78d15Sespie 	  abort();
284*03a78d15Sespie         _Alloc::deallocate(__real_p, __n + (int) _S_extra);
285*03a78d15Sespie       }
286*03a78d15Sespie 
287*03a78d15Sespie       static void*
reallocate(void * __p,size_t __old_sz,size_t __new_sz)288*03a78d15Sespie       reallocate(void* __p, size_t __old_sz, size_t __new_sz)
289*03a78d15Sespie       {
290*03a78d15Sespie         char* __real_p = (char*)__p - (int) _S_extra;
291*03a78d15Sespie         if (*(size_t*)__real_p != __old_sz)
292*03a78d15Sespie 	  abort();
293*03a78d15Sespie         char* __result = (char*) _Alloc::reallocate(__real_p,
294*03a78d15Sespie 						    __old_sz + (int) _S_extra,
295*03a78d15Sespie 						    __new_sz + (int) _S_extra);
296*03a78d15Sespie         *(size_t*)__result = __new_sz;
297*03a78d15Sespie         return __result + (int) _S_extra;
298*03a78d15Sespie       }
299*03a78d15Sespie     };
300*03a78d15Sespie 
301*03a78d15Sespie 
302*03a78d15Sespie   /**
303*03a78d15Sespie    *  @if maint
304*03a78d15Sespie    *  Default node allocator.  "SGI" style.  Uses various allocators to
305*03a78d15Sespie    *  fulfill underlying requests (and makes as few requests as possible
306*03a78d15Sespie    *  when in default high-speed pool mode).
307*03a78d15Sespie    *
308*03a78d15Sespie    *  Important implementation properties:
309*03a78d15Sespie    *  0. If globally mandated, then allocate objects from __new_alloc
310*03a78d15Sespie    *  1. If the clients request an object of size > _MAX_BYTES, the resulting
311*03a78d15Sespie    *     object will be obtained directly from __new_alloc
312*03a78d15Sespie    *  2. In all other cases, we allocate an object of size exactly
313*03a78d15Sespie    *     _S_round_up(requested_size).  Thus the client has enough size
314*03a78d15Sespie    *     information that we can return the object to the proper free list
315*03a78d15Sespie    *     without permanently losing part of the object.
316*03a78d15Sespie    *
317*03a78d15Sespie    *  The first template parameter specifies whether more than one thread may
318*03a78d15Sespie    *  use this allocator.  It is safe to allocate an object from one instance
319*03a78d15Sespie    *  of a default_alloc and deallocate it with another one.  This effectively
320*03a78d15Sespie    *  transfers its ownership to the second one.  This may have undesirable
321*03a78d15Sespie    *  effects on reference locality.
322*03a78d15Sespie    *
323*03a78d15Sespie    *  The second parameter is unused and serves only to allow the creation of
324*03a78d15Sespie    *  multiple default_alloc instances.  Note that containers built on different
325*03a78d15Sespie    *  allocator instances have different types, limiting the utility of this
326*03a78d15Sespie    *  approach.  If you do not wish to share the free lists with the main
327*03a78d15Sespie    *  default_alloc instance, instantiate this with a non-zero __inst.
328*03a78d15Sespie    *
329*03a78d15Sespie    *  @endif
330*03a78d15Sespie    *  (See @link Allocators allocators info @endlink for more.)
331*03a78d15Sespie    */
332*03a78d15Sespie   template<bool __threads, int __inst>
333*03a78d15Sespie     class __default_alloc_template
334*03a78d15Sespie     {
335*03a78d15Sespie     private:
336*03a78d15Sespie       enum {_ALIGN = 8};
337*03a78d15Sespie       enum {_MAX_BYTES = 128};
338*03a78d15Sespie       enum {_NFREELISTS = _MAX_BYTES / _ALIGN};
339*03a78d15Sespie 
340*03a78d15Sespie       union _Obj
341*03a78d15Sespie       {
342*03a78d15Sespie         union _Obj* _M_free_list_link;
343*03a78d15Sespie         char        _M_client_data[1];    // The client sees this.
344*03a78d15Sespie       };
345*03a78d15Sespie 
346*03a78d15Sespie       static _Obj* volatile         _S_free_list[_NFREELISTS];
347*03a78d15Sespie 
348*03a78d15Sespie       // Chunk allocation state.
349*03a78d15Sespie       static char*                  _S_start_free;
350*03a78d15Sespie       static char*                  _S_end_free;
351*03a78d15Sespie       static size_t                 _S_heap_size;
352*03a78d15Sespie 
353*03a78d15Sespie       static _STL_mutex_lock        _S_node_allocator_lock;
354*03a78d15Sespie 
355*03a78d15Sespie       static size_t
_S_round_up(size_t __bytes)356*03a78d15Sespie       _S_round_up(size_t __bytes)
357*03a78d15Sespie       { return (((__bytes) + (size_t) _ALIGN-1) & ~((size_t) _ALIGN - 1)); }
358*03a78d15Sespie 
359*03a78d15Sespie       static size_t
_S_freelist_index(size_t __bytes)360*03a78d15Sespie       _S_freelist_index(size_t __bytes)
361*03a78d15Sespie       { return (((__bytes) + (size_t)_ALIGN - 1)/(size_t)_ALIGN - 1); }
362*03a78d15Sespie 
363*03a78d15Sespie       // Returns an object of size __n, and optionally adds to size __n
364*03a78d15Sespie       // free list.
365*03a78d15Sespie       static void*
366*03a78d15Sespie       _S_refill(size_t __n);
367*03a78d15Sespie 
368*03a78d15Sespie       // Allocates a chunk for nobjs of size size.  nobjs may be reduced
369*03a78d15Sespie       // if it is inconvenient to allocate the requested number.
370*03a78d15Sespie       static char*
371*03a78d15Sespie       _S_chunk_alloc(size_t __size, int& __nobjs);
372*03a78d15Sespie 
373*03a78d15Sespie       // It would be nice to use _STL_auto_lock here.  But we need a
374*03a78d15Sespie       // test whether threads are in use.
375*03a78d15Sespie       struct _Lock
376*03a78d15Sespie       {
_Lock_Lock377*03a78d15Sespie         _Lock() { if (__threads) _S_node_allocator_lock._M_acquire_lock(); }
~_Lock_Lock378*03a78d15Sespie         ~_Lock() { if (__threads) _S_node_allocator_lock._M_release_lock(); }
379*03a78d15Sespie       } __attribute__ ((__unused__));
380*03a78d15Sespie       friend struct _Lock;
381*03a78d15Sespie 
382*03a78d15Sespie       static _Atomic_word _S_force_new;
383*03a78d15Sespie 
384*03a78d15Sespie     public:
385*03a78d15Sespie       // __n must be > 0
386*03a78d15Sespie       static void*
allocate(size_t __n)387*03a78d15Sespie       allocate(size_t __n)
388*03a78d15Sespie       {
389*03a78d15Sespie 	void* __ret = 0;
390*03a78d15Sespie 
391*03a78d15Sespie 	// If there is a race through here, assume answer from getenv
392*03a78d15Sespie 	// will resolve in same direction.  Inspired by techniques
393*03a78d15Sespie 	// to efficiently support threading found in basic_string.h.
394*03a78d15Sespie 	if (_S_force_new == 0)
395*03a78d15Sespie 	  {
396*03a78d15Sespie 	    if (getenv("GLIBCPP_FORCE_NEW"))
397*03a78d15Sespie 	      __atomic_add(&_S_force_new, 1);
398*03a78d15Sespie 	    else
399*03a78d15Sespie 	      __atomic_add(&_S_force_new, -1);
400*03a78d15Sespie 	  }
401*03a78d15Sespie 
402*03a78d15Sespie 	if ((__n > (size_t) _MAX_BYTES) || (_S_force_new > 0))
403*03a78d15Sespie 	  __ret = __new_alloc::allocate(__n);
404*03a78d15Sespie 	else
405*03a78d15Sespie 	  {
406*03a78d15Sespie 	    _Obj* volatile* __my_free_list = _S_free_list
407*03a78d15Sespie 	      + _S_freelist_index(__n);
408*03a78d15Sespie 	    // Acquire the lock here with a constructor call.  This
409*03a78d15Sespie 	    // ensures that it is released in exit or during stack
410*03a78d15Sespie 	    // unwinding.
411*03a78d15Sespie 	    _Lock __lock_instance;
412*03a78d15Sespie 	    _Obj* __restrict__ __result = *__my_free_list;
413*03a78d15Sespie 	    if (__builtin_expect(__result == 0, 0))
414*03a78d15Sespie 	      __ret = _S_refill(_S_round_up(__n));
415*03a78d15Sespie 	    else
416*03a78d15Sespie 	      {
417*03a78d15Sespie 		*__my_free_list = __result -> _M_free_list_link;
418*03a78d15Sespie 		__ret = __result;
419*03a78d15Sespie 	      }
420*03a78d15Sespie 	    if (__builtin_expect(__ret == 0, 0))
421*03a78d15Sespie 	      __throw_bad_alloc();
422*03a78d15Sespie 	  }
423*03a78d15Sespie 	return __ret;
424*03a78d15Sespie       }
425*03a78d15Sespie 
426*03a78d15Sespie       // __p may not be 0
427*03a78d15Sespie       static void
deallocate(void * __p,size_t __n)428*03a78d15Sespie       deallocate(void* __p, size_t __n)
429*03a78d15Sespie       {
430*03a78d15Sespie 	if ((__n > (size_t) _MAX_BYTES) || (_S_force_new > 0))
431*03a78d15Sespie 	  __new_alloc::deallocate(__p, __n);
432*03a78d15Sespie 	else
433*03a78d15Sespie 	  {
434*03a78d15Sespie 	    _Obj* volatile*  __my_free_list = _S_free_list
435*03a78d15Sespie 	      + _S_freelist_index(__n);
436*03a78d15Sespie 	    _Obj* __q = (_Obj*)__p;
437*03a78d15Sespie 
438*03a78d15Sespie 	    // Acquire the lock here with a constructor call.  This
439*03a78d15Sespie 	    // ensures that it is released in exit or during stack
440*03a78d15Sespie 	    // unwinding.
441*03a78d15Sespie 	    _Lock __lock_instance;
442*03a78d15Sespie 	    __q -> _M_free_list_link = *__my_free_list;
443*03a78d15Sespie 	    *__my_free_list = __q;
444*03a78d15Sespie 	  }
445*03a78d15Sespie       }
446*03a78d15Sespie 
447*03a78d15Sespie       static void*
448*03a78d15Sespie       reallocate(void* __p, size_t __old_sz, size_t __new_sz);
449*03a78d15Sespie     };
450*03a78d15Sespie 
451*03a78d15Sespie   template<bool __threads, int __inst> _Atomic_word
452*03a78d15Sespie   __default_alloc_template<__threads, __inst>::_S_force_new = 0;
453*03a78d15Sespie 
454*03a78d15Sespie   template<bool __threads, int __inst>
455*03a78d15Sespie     inline bool
456*03a78d15Sespie     operator==(const __default_alloc_template<__threads,__inst>&,
457*03a78d15Sespie                const __default_alloc_template<__threads,__inst>&)
458*03a78d15Sespie     { return true; }
459*03a78d15Sespie 
460*03a78d15Sespie   template<bool __threads, int __inst>
461*03a78d15Sespie     inline bool
462*03a78d15Sespie     operator!=(const __default_alloc_template<__threads,__inst>&,
463*03a78d15Sespie                const __default_alloc_template<__threads,__inst>&)
464*03a78d15Sespie     { return false; }
465*03a78d15Sespie 
466*03a78d15Sespie 
467*03a78d15Sespie   // We allocate memory in large chunks in order to avoid fragmenting the
468*03a78d15Sespie   // heap too much.  We assume that __size is properly aligned.  We hold
469*03a78d15Sespie   // the allocation lock.
470*03a78d15Sespie   template<bool __threads, int __inst>
471*03a78d15Sespie     char*
472*03a78d15Sespie     __default_alloc_template<__threads, __inst>::
_S_chunk_alloc(size_t __size,int & __nobjs)473*03a78d15Sespie     _S_chunk_alloc(size_t __size, int& __nobjs)
474*03a78d15Sespie     {
475*03a78d15Sespie       char* __result;
476*03a78d15Sespie       size_t __total_bytes = __size * __nobjs;
477*03a78d15Sespie       size_t __bytes_left = _S_end_free - _S_start_free;
478*03a78d15Sespie 
479*03a78d15Sespie       if (__bytes_left >= __total_bytes)
480*03a78d15Sespie         {
481*03a78d15Sespie           __result = _S_start_free;
482*03a78d15Sespie           _S_start_free += __total_bytes;
483*03a78d15Sespie           return __result ;
484*03a78d15Sespie         }
485*03a78d15Sespie       else if (__bytes_left >= __size)
486*03a78d15Sespie         {
487*03a78d15Sespie           __nobjs = (int)(__bytes_left/__size);
488*03a78d15Sespie           __total_bytes = __size * __nobjs;
489*03a78d15Sespie           __result = _S_start_free;
490*03a78d15Sespie           _S_start_free += __total_bytes;
491*03a78d15Sespie           return __result;
492*03a78d15Sespie         }
493*03a78d15Sespie       else
494*03a78d15Sespie         {
495*03a78d15Sespie           size_t __bytes_to_get =
496*03a78d15Sespie             2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
497*03a78d15Sespie           // Try to make use of the left-over piece.
498*03a78d15Sespie           if (__bytes_left > 0)
499*03a78d15Sespie             {
500*03a78d15Sespie               _Obj* volatile* __my_free_list =
501*03a78d15Sespie                 _S_free_list + _S_freelist_index(__bytes_left);
502*03a78d15Sespie 
503*03a78d15Sespie               ((_Obj*)(void*)_S_start_free) -> _M_free_list_link = *__my_free_list;
504*03a78d15Sespie               *__my_free_list = (_Obj*)(void*)_S_start_free;
505*03a78d15Sespie             }
506*03a78d15Sespie           _S_start_free = (char*) __new_alloc::allocate(__bytes_to_get);
507*03a78d15Sespie           if (_S_start_free == 0)
508*03a78d15Sespie             {
509*03a78d15Sespie               size_t __i;
510*03a78d15Sespie               _Obj* volatile* __my_free_list;
511*03a78d15Sespie               _Obj* __p;
512*03a78d15Sespie               // Try to make do with what we have.  That can't hurt.  We
513*03a78d15Sespie               // do not try smaller requests, since that tends to result
514*03a78d15Sespie               // in disaster on multi-process machines.
515*03a78d15Sespie               __i = __size;
516*03a78d15Sespie               for (; __i <= (size_t) _MAX_BYTES; __i += (size_t) _ALIGN)
517*03a78d15Sespie                 {
518*03a78d15Sespie                   __my_free_list = _S_free_list + _S_freelist_index(__i);
519*03a78d15Sespie                   __p = *__my_free_list;
520*03a78d15Sespie                   if (__p != 0)
521*03a78d15Sespie                     {
522*03a78d15Sespie                       *__my_free_list = __p -> _M_free_list_link;
523*03a78d15Sespie                       _S_start_free = (char*)__p;
524*03a78d15Sespie                       _S_end_free = _S_start_free + __i;
525*03a78d15Sespie                       return _S_chunk_alloc(__size, __nobjs);
526*03a78d15Sespie                       // Any leftover piece will eventually make it to the
527*03a78d15Sespie                       // right free list.
528*03a78d15Sespie                     }
529*03a78d15Sespie                 }
530*03a78d15Sespie               _S_end_free = 0;        // In case of exception.
531*03a78d15Sespie               _S_start_free = (char*)__new_alloc::allocate(__bytes_to_get);
532*03a78d15Sespie               // This should either throw an exception or remedy the situation.
533*03a78d15Sespie               // Thus we assume it succeeded.
534*03a78d15Sespie             }
535*03a78d15Sespie           _S_heap_size += __bytes_to_get;
536*03a78d15Sespie           _S_end_free = _S_start_free + __bytes_to_get;
537*03a78d15Sespie           return _S_chunk_alloc(__size, __nobjs);
538*03a78d15Sespie         }
539*03a78d15Sespie     }
540*03a78d15Sespie 
541*03a78d15Sespie 
542*03a78d15Sespie   // Returns an object of size __n, and optionally adds to "size
543*03a78d15Sespie   // __n"'s free list.  We assume that __n is properly aligned.  We
544*03a78d15Sespie   // hold the allocation lock.
545*03a78d15Sespie   template<bool __threads, int __inst>
546*03a78d15Sespie     void*
_S_refill(size_t __n)547*03a78d15Sespie     __default_alloc_template<__threads, __inst>::_S_refill(size_t __n)
548*03a78d15Sespie     {
549*03a78d15Sespie       int __nobjs = 20;
550*03a78d15Sespie       char* __chunk = _S_chunk_alloc(__n, __nobjs);
551*03a78d15Sespie       _Obj* volatile* __my_free_list;
552*03a78d15Sespie       _Obj* __result;
553*03a78d15Sespie       _Obj* __current_obj;
554*03a78d15Sespie       _Obj* __next_obj;
555*03a78d15Sespie       int __i;
556*03a78d15Sespie 
557*03a78d15Sespie       if (1 == __nobjs)
558*03a78d15Sespie         return __chunk;
559*03a78d15Sespie       __my_free_list = _S_free_list + _S_freelist_index(__n);
560*03a78d15Sespie 
561*03a78d15Sespie       // Build free list in chunk.
562*03a78d15Sespie       __result = (_Obj*)(void*)__chunk;
563*03a78d15Sespie       *__my_free_list = __next_obj = (_Obj*)(void*)(__chunk + __n);
564*03a78d15Sespie       for (__i = 1; ; __i++)
565*03a78d15Sespie         {
566*03a78d15Sespie 	  __current_obj = __next_obj;
567*03a78d15Sespie           __next_obj = (_Obj*)(void*)((char*)__next_obj + __n);
568*03a78d15Sespie 	  if (__nobjs - 1 == __i)
569*03a78d15Sespie 	    {
570*03a78d15Sespie 	      __current_obj -> _M_free_list_link = 0;
571*03a78d15Sespie 	      break;
572*03a78d15Sespie 	    }
573*03a78d15Sespie 	  else
574*03a78d15Sespie 	    __current_obj -> _M_free_list_link = __next_obj;
575*03a78d15Sespie 	}
576*03a78d15Sespie       return __result;
577*03a78d15Sespie     }
578*03a78d15Sespie 
579*03a78d15Sespie 
580*03a78d15Sespie   template<bool threads, int inst>
581*03a78d15Sespie     void*
582*03a78d15Sespie     __default_alloc_template<threads, inst>::
reallocate(void * __p,size_t __old_sz,size_t __new_sz)583*03a78d15Sespie     reallocate(void* __p, size_t __old_sz, size_t __new_sz)
584*03a78d15Sespie     {
585*03a78d15Sespie       void* __result;
586*03a78d15Sespie       size_t __copy_sz;
587*03a78d15Sespie 
588*03a78d15Sespie       if (__old_sz > (size_t) _MAX_BYTES && __new_sz > (size_t) _MAX_BYTES)
589*03a78d15Sespie         return(realloc(__p, __new_sz));
590*03a78d15Sespie       if (_S_round_up(__old_sz) == _S_round_up(__new_sz))
591*03a78d15Sespie         return(__p);
592*03a78d15Sespie       __result = allocate(__new_sz);
593*03a78d15Sespie       __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
594*03a78d15Sespie       memcpy(__result, __p, __copy_sz);
595*03a78d15Sespie       deallocate(__p, __old_sz);
596*03a78d15Sespie       return __result;
597*03a78d15Sespie     }
598*03a78d15Sespie 
599*03a78d15Sespie   template<bool __threads, int __inst>
600*03a78d15Sespie     _STL_mutex_lock
601*03a78d15Sespie     __default_alloc_template<__threads,__inst>::_S_node_allocator_lock
602*03a78d15Sespie     __STL_MUTEX_INITIALIZER;
603*03a78d15Sespie 
604*03a78d15Sespie   template<bool __threads, int __inst>
605*03a78d15Sespie     char* __default_alloc_template<__threads,__inst>::_S_start_free = 0;
606*03a78d15Sespie 
607*03a78d15Sespie   template<bool __threads, int __inst>
608*03a78d15Sespie     char* __default_alloc_template<__threads,__inst>::_S_end_free = 0;
609*03a78d15Sespie 
610*03a78d15Sespie   template<bool __threads, int __inst>
611*03a78d15Sespie     size_t __default_alloc_template<__threads,__inst>::_S_heap_size = 0;
612*03a78d15Sespie 
613*03a78d15Sespie   template<bool __threads, int __inst>
614*03a78d15Sespie     typename __default_alloc_template<__threads,__inst>::_Obj* volatile
615*03a78d15Sespie     __default_alloc_template<__threads,__inst>::_S_free_list[_NFREELISTS];
616*03a78d15Sespie 
617*03a78d15Sespie   typedef __default_alloc_template<true,0>    __alloc;
618*03a78d15Sespie   typedef __default_alloc_template<false,0>   __single_client_alloc;
619*03a78d15Sespie 
620*03a78d15Sespie 
621*03a78d15Sespie   /**
622*03a78d15Sespie    *  @brief  The "standard" allocator, as per [20.4].
623*03a78d15Sespie    *
624*03a78d15Sespie    *  The private _Alloc is "SGI" style.  (See comments at the top
625*03a78d15Sespie    *  of stl_alloc.h.)
626*03a78d15Sespie    *
627*03a78d15Sespie    *  The underlying allocator behaves as follows.
628*03a78d15Sespie    *    - __default_alloc_template is used via two typedefs
629*03a78d15Sespie    *    - "__single_client_alloc" typedef does no locking for threads
630*03a78d15Sespie    *    - "__alloc" typedef is threadsafe via the locks
631*03a78d15Sespie    *    - __new_alloc is used for memory requests
632*03a78d15Sespie    *
633*03a78d15Sespie    *  (See @link Allocators allocators info @endlink for more.)
634*03a78d15Sespie    */
635*03a78d15Sespie   template<typename _Tp>
636*03a78d15Sespie     class allocator
637*03a78d15Sespie     {
638*03a78d15Sespie       typedef __alloc _Alloc;          // The underlying allocator.
639*03a78d15Sespie     public:
640*03a78d15Sespie       typedef size_t     size_type;
641*03a78d15Sespie       typedef ptrdiff_t  difference_type;
642*03a78d15Sespie       typedef _Tp*       pointer;
643*03a78d15Sespie       typedef const _Tp* const_pointer;
644*03a78d15Sespie       typedef _Tp&       reference;
645*03a78d15Sespie       typedef const _Tp& const_reference;
646*03a78d15Sespie       typedef _Tp        value_type;
647*03a78d15Sespie 
648*03a78d15Sespie       template<typename _Tp1>
649*03a78d15Sespie         struct rebind
650*03a78d15Sespie         { typedef allocator<_Tp1> other; };
651*03a78d15Sespie 
throw()652*03a78d15Sespie       allocator() throw() {}
throw()653*03a78d15Sespie       allocator(const allocator&) throw() {}
654*03a78d15Sespie       template<typename _Tp1>
allocator(const allocator<_Tp1> &)655*03a78d15Sespie         allocator(const allocator<_Tp1>&) throw() {}
throw()656*03a78d15Sespie       ~allocator() throw() {}
657*03a78d15Sespie 
658*03a78d15Sespie       pointer
address(reference __x)659*03a78d15Sespie       address(reference __x) const { return &__x; }
660*03a78d15Sespie 
661*03a78d15Sespie       const_pointer
address(const_reference __x)662*03a78d15Sespie       address(const_reference __x) const { return &__x; }
663*03a78d15Sespie 
664*03a78d15Sespie       // NB: __n is permitted to be 0.  The C++ standard says nothing
665*03a78d15Sespie       // about what the return value is when __n == 0.
666*03a78d15Sespie       _Tp*
667*03a78d15Sespie       allocate(size_type __n, const void* = 0)
668*03a78d15Sespie       {
669*03a78d15Sespie 	_Tp* __ret = 0;
670*03a78d15Sespie 	if (__n)
671*03a78d15Sespie 	  {
672*03a78d15Sespie 	    if (__n <= this->max_size())
673*03a78d15Sespie 	      __ret = static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)));
674*03a78d15Sespie 	    else
675*03a78d15Sespie 	      __throw_bad_alloc();
676*03a78d15Sespie 	  }
677*03a78d15Sespie 	return __ret;
678*03a78d15Sespie       }
679*03a78d15Sespie 
680*03a78d15Sespie       // __p is not permitted to be a null pointer.
681*03a78d15Sespie       void
deallocate(pointer __p,size_type __n)682*03a78d15Sespie       deallocate(pointer __p, size_type __n)
683*03a78d15Sespie       { _Alloc::deallocate(__p, __n * sizeof(_Tp)); }
684*03a78d15Sespie 
685*03a78d15Sespie       size_type
max_size()686*03a78d15Sespie       max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
687*03a78d15Sespie 
construct(pointer __p,const _Tp & __val)688*03a78d15Sespie       void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
destroy(pointer __p)689*03a78d15Sespie       void destroy(pointer __p) { __p->~_Tp(); }
690*03a78d15Sespie     };
691*03a78d15Sespie 
692*03a78d15Sespie   template<>
693*03a78d15Sespie     class allocator<void>
694*03a78d15Sespie     {
695*03a78d15Sespie     public:
696*03a78d15Sespie       typedef size_t      size_type;
697*03a78d15Sespie       typedef ptrdiff_t   difference_type;
698*03a78d15Sespie       typedef void*       pointer;
699*03a78d15Sespie       typedef const void* const_pointer;
700*03a78d15Sespie       typedef void        value_type;
701*03a78d15Sespie 
702*03a78d15Sespie       template<typename _Tp1>
703*03a78d15Sespie         struct rebind
704*03a78d15Sespie         { typedef allocator<_Tp1> other; };
705*03a78d15Sespie     };
706*03a78d15Sespie 
707*03a78d15Sespie 
708*03a78d15Sespie   template<typename _T1, typename _T2>
709*03a78d15Sespie     inline bool
710*03a78d15Sespie     operator==(const allocator<_T1>&, const allocator<_T2>&)
711*03a78d15Sespie     { return true; }
712*03a78d15Sespie 
713*03a78d15Sespie   template<typename _T1, typename _T2>
714*03a78d15Sespie     inline bool
715*03a78d15Sespie     operator!=(const allocator<_T1>&, const allocator<_T2>&)
716*03a78d15Sespie     { return false; }
717*03a78d15Sespie 
718*03a78d15Sespie 
719*03a78d15Sespie   /**
720*03a78d15Sespie    *  @if maint
721*03a78d15Sespie    *  Allocator adaptor to turn an "SGI" style allocator (e.g.,
722*03a78d15Sespie    *  __alloc, __malloc_alloc_template) into a "standard" conforming
723*03a78d15Sespie    *  allocator.  Note that this adaptor does *not* assume that all
724*03a78d15Sespie    *  objects of the underlying alloc class are identical, nor does it
725*03a78d15Sespie    *  assume that all of the underlying alloc's member functions are
726*03a78d15Sespie    *  static member functions.  Note, also, that __allocator<_Tp,
727*03a78d15Sespie    *  __alloc> is essentially the same thing as allocator<_Tp>.
728*03a78d15Sespie    *  @endif
729*03a78d15Sespie    *  (See @link Allocators allocators info @endlink for more.)
730*03a78d15Sespie    */
731*03a78d15Sespie   template<typename _Tp, typename _Alloc>
732*03a78d15Sespie     struct __allocator
733*03a78d15Sespie     {
734*03a78d15Sespie       _Alloc __underlying_alloc;
735*03a78d15Sespie 
736*03a78d15Sespie       typedef size_t    size_type;
737*03a78d15Sespie       typedef ptrdiff_t difference_type;
738*03a78d15Sespie       typedef _Tp*       pointer;
739*03a78d15Sespie       typedef const _Tp* const_pointer;
740*03a78d15Sespie       typedef _Tp&       reference;
741*03a78d15Sespie       typedef const _Tp& const_reference;
742*03a78d15Sespie       typedef _Tp        value_type;
743*03a78d15Sespie 
744*03a78d15Sespie       template<typename _Tp1>
745*03a78d15Sespie         struct rebind
746*03a78d15Sespie         { typedef __allocator<_Tp1, _Alloc> other; };
747*03a78d15Sespie 
throw__allocator748*03a78d15Sespie       __allocator() throw() {}
throw__allocator749*03a78d15Sespie       __allocator(const __allocator& __a) throw()
750*03a78d15Sespie       : __underlying_alloc(__a.__underlying_alloc) {}
751*03a78d15Sespie 
752*03a78d15Sespie       template<typename _Tp1>
__allocator__allocator753*03a78d15Sespie         __allocator(const __allocator<_Tp1, _Alloc>& __a) throw()
754*03a78d15Sespie         : __underlying_alloc(__a.__underlying_alloc) {}
755*03a78d15Sespie 
throw__allocator756*03a78d15Sespie       ~__allocator() throw() {}
757*03a78d15Sespie 
758*03a78d15Sespie       pointer
address__allocator759*03a78d15Sespie       address(reference __x) const { return &__x; }
760*03a78d15Sespie 
761*03a78d15Sespie       const_pointer
address__allocator762*03a78d15Sespie       address(const_reference __x) const { return &__x; }
763*03a78d15Sespie 
764*03a78d15Sespie       // NB: __n is permitted to be 0.  The C++ standard says nothing
765*03a78d15Sespie       // about what the return value is when __n == 0.
766*03a78d15Sespie       _Tp*
767*03a78d15Sespie       allocate(size_type __n, const void* = 0)
768*03a78d15Sespie       {
769*03a78d15Sespie 	_Tp* __ret = 0;
770*03a78d15Sespie 	if (__n)
771*03a78d15Sespie 	  __ret = static_cast<_Tp*>(_Alloc::allocate(__n * sizeof(_Tp)));
772*03a78d15Sespie 	return __ret;
773*03a78d15Sespie       }
774*03a78d15Sespie 
775*03a78d15Sespie       // __p is not permitted to be a null pointer.
776*03a78d15Sespie       void
deallocate__allocator777*03a78d15Sespie       deallocate(pointer __p, size_type __n)
778*03a78d15Sespie       { __underlying_alloc.deallocate(__p, __n * sizeof(_Tp)); }
779*03a78d15Sespie 
780*03a78d15Sespie       size_type
max_size__allocator781*03a78d15Sespie       max_size() const throw() { return size_t(-1) / sizeof(_Tp); }
782*03a78d15Sespie 
783*03a78d15Sespie       void
construct__allocator784*03a78d15Sespie       construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
785*03a78d15Sespie 
786*03a78d15Sespie       void
destroy__allocator787*03a78d15Sespie       destroy(pointer __p) { __p->~_Tp(); }
788*03a78d15Sespie     };
789*03a78d15Sespie 
790*03a78d15Sespie   template<typename _Alloc>
791*03a78d15Sespie     struct __allocator<void, _Alloc>
792*03a78d15Sespie     {
793*03a78d15Sespie       typedef size_t      size_type;
794*03a78d15Sespie       typedef ptrdiff_t   difference_type;
795*03a78d15Sespie       typedef void*       pointer;
796*03a78d15Sespie       typedef const void* const_pointer;
797*03a78d15Sespie       typedef void        value_type;
798*03a78d15Sespie 
799*03a78d15Sespie       template<typename _Tp1>
800*03a78d15Sespie         struct rebind
801*03a78d15Sespie         { typedef __allocator<_Tp1, _Alloc> other; };
802*03a78d15Sespie     };
803*03a78d15Sespie 
804*03a78d15Sespie   template<typename _Tp, typename _Alloc>
805*03a78d15Sespie     inline bool
806*03a78d15Sespie     operator==(const __allocator<_Tp,_Alloc>& __a1,
807*03a78d15Sespie                const __allocator<_Tp,_Alloc>& __a2)
808*03a78d15Sespie     { return __a1.__underlying_alloc == __a2.__underlying_alloc; }
809*03a78d15Sespie 
810*03a78d15Sespie   template<typename _Tp, typename _Alloc>
811*03a78d15Sespie     inline bool
812*03a78d15Sespie     operator!=(const __allocator<_Tp, _Alloc>& __a1,
813*03a78d15Sespie                const __allocator<_Tp, _Alloc>& __a2)
814*03a78d15Sespie     { return __a1.__underlying_alloc != __a2.__underlying_alloc; }
815*03a78d15Sespie 
816*03a78d15Sespie 
817*03a78d15Sespie   //@{
818*03a78d15Sespie   /** Comparison operators for all of the predifined SGI-style allocators.
819*03a78d15Sespie    *  This ensures that __allocator<malloc_alloc> (for example) will work
820*03a78d15Sespie    *  correctly.  As required, all allocators compare equal.
821*03a78d15Sespie    */
822*03a78d15Sespie   template<int inst>
823*03a78d15Sespie     inline bool
824*03a78d15Sespie     operator==(const __malloc_alloc_template<inst>&,
825*03a78d15Sespie                const __malloc_alloc_template<inst>&)
826*03a78d15Sespie     { return true; }
827*03a78d15Sespie 
828*03a78d15Sespie   template<int __inst>
829*03a78d15Sespie     inline bool
830*03a78d15Sespie     operator!=(const __malloc_alloc_template<__inst>&,
831*03a78d15Sespie                const __malloc_alloc_template<__inst>&)
832*03a78d15Sespie     { return false; }
833*03a78d15Sespie 
834*03a78d15Sespie   template<typename _Alloc>
835*03a78d15Sespie     inline bool
836*03a78d15Sespie     operator==(const __debug_alloc<_Alloc>&, const __debug_alloc<_Alloc>&)
837*03a78d15Sespie     { return true; }
838*03a78d15Sespie 
839*03a78d15Sespie   template<typename _Alloc>
840*03a78d15Sespie     inline bool
841*03a78d15Sespie     operator!=(const __debug_alloc<_Alloc>&, const __debug_alloc<_Alloc>&)
842*03a78d15Sespie     { return false; }
843*03a78d15Sespie   //@}
844*03a78d15Sespie 
845*03a78d15Sespie 
846*03a78d15Sespie   /**
847*03a78d15Sespie    *  @if maint
848*03a78d15Sespie    *  Another allocator adaptor:  _Alloc_traits.  This serves two purposes.
849*03a78d15Sespie    *  First, make it possible to write containers that can use either "SGI"
850*03a78d15Sespie    *  style allocators or "standard" allocators.  Second, provide a mechanism
851*03a78d15Sespie    *  so that containers can query whether or not the allocator has distinct
852*03a78d15Sespie    *  instances.  If not, the container can avoid wasting a word of memory to
853*03a78d15Sespie    *  store an empty object.  For examples of use, see stl_vector.h, etc, or
854*03a78d15Sespie    *  any of the other classes derived from this one.
855*03a78d15Sespie    *
856*03a78d15Sespie    *  This adaptor uses partial specialization.  The general case of
857*03a78d15Sespie    *  _Alloc_traits<_Tp, _Alloc> assumes that _Alloc is a
858*03a78d15Sespie    *  standard-conforming allocator, possibly with non-equal instances and
859*03a78d15Sespie    *  non-static members.  (It still behaves correctly even if _Alloc has
860*03a78d15Sespie    *  static member and if all instances are equal.  Refinements affect
861*03a78d15Sespie    *  performance, not correctness.)
862*03a78d15Sespie    *
863*03a78d15Sespie    *  There are always two members:  allocator_type, which is a standard-
864*03a78d15Sespie    *  conforming allocator type for allocating objects of type _Tp, and
865*03a78d15Sespie    *  _S_instanceless, a static const member of type bool.  If
866*03a78d15Sespie    *  _S_instanceless is true, this means that there is no difference
867*03a78d15Sespie    *  between any two instances of type allocator_type.  Furthermore, if
868*03a78d15Sespie    *  _S_instanceless is true, then _Alloc_traits has one additional
869*03a78d15Sespie    *  member:  _Alloc_type.  This type encapsulates allocation and
870*03a78d15Sespie    *  deallocation of objects of type _Tp through a static interface; it
871*03a78d15Sespie    *  has two member functions, whose signatures are
872*03a78d15Sespie    *
873*03a78d15Sespie    *  -  static _Tp* allocate(size_t)
874*03a78d15Sespie    *  -  static void deallocate(_Tp*, size_t)
875*03a78d15Sespie    *
876*03a78d15Sespie    *  The size_t parameters are "standard" style (see top of stl_alloc.h) in
877*03a78d15Sespie    *  that they take counts, not sizes.
878*03a78d15Sespie    *
879*03a78d15Sespie    *  @endif
880*03a78d15Sespie    *  (See @link Allocators allocators info @endlink for more.)
881*03a78d15Sespie    */
882*03a78d15Sespie   //@{
883*03a78d15Sespie   // The fully general version.
884*03a78d15Sespie   template<typename _Tp, typename _Allocator>
885*03a78d15Sespie     struct _Alloc_traits
886*03a78d15Sespie     {
887*03a78d15Sespie       static const bool _S_instanceless = false;
888*03a78d15Sespie       typedef typename _Allocator::template rebind<_Tp>::other allocator_type;
889*03a78d15Sespie     };
890*03a78d15Sespie 
891*03a78d15Sespie   template<typename _Tp, typename _Allocator>
892*03a78d15Sespie     const bool _Alloc_traits<_Tp, _Allocator>::_S_instanceless;
893*03a78d15Sespie 
894*03a78d15Sespie   /// The version for the default allocator.
895*03a78d15Sespie   template<typename _Tp, typename _Tp1>
896*03a78d15Sespie     struct _Alloc_traits<_Tp, allocator<_Tp1> >
897*03a78d15Sespie     {
898*03a78d15Sespie       static const bool _S_instanceless = true;
899*03a78d15Sespie       typedef __simple_alloc<_Tp, __alloc> _Alloc_type;
900*03a78d15Sespie       typedef allocator<_Tp> allocator_type;
901*03a78d15Sespie     };
902*03a78d15Sespie   //@}
903*03a78d15Sespie 
904*03a78d15Sespie   //@{
905*03a78d15Sespie   /// Versions for the predefined "SGI" style allocators.
906*03a78d15Sespie   template<typename _Tp, int __inst>
907*03a78d15Sespie     struct _Alloc_traits<_Tp, __malloc_alloc_template<__inst> >
908*03a78d15Sespie     {
909*03a78d15Sespie       static const bool _S_instanceless = true;
910*03a78d15Sespie       typedef __simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
911*03a78d15Sespie       typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
912*03a78d15Sespie     };
913*03a78d15Sespie 
914*03a78d15Sespie   template<typename _Tp, bool __threads, int __inst>
915*03a78d15Sespie     struct _Alloc_traits<_Tp, __default_alloc_template<__threads, __inst> >
916*03a78d15Sespie     {
917*03a78d15Sespie       static const bool _S_instanceless = true;
918*03a78d15Sespie       typedef __simple_alloc<_Tp, __default_alloc_template<__threads, __inst> >
919*03a78d15Sespie       _Alloc_type;
920*03a78d15Sespie       typedef __allocator<_Tp, __default_alloc_template<__threads, __inst> >
921*03a78d15Sespie       allocator_type;
922*03a78d15Sespie     };
923*03a78d15Sespie 
924*03a78d15Sespie   template<typename _Tp, typename _Alloc>
925*03a78d15Sespie     struct _Alloc_traits<_Tp, __debug_alloc<_Alloc> >
926*03a78d15Sespie     {
927*03a78d15Sespie       static const bool _S_instanceless = true;
928*03a78d15Sespie       typedef __simple_alloc<_Tp, __debug_alloc<_Alloc> > _Alloc_type;
929*03a78d15Sespie       typedef __allocator<_Tp, __debug_alloc<_Alloc> > allocator_type;
930*03a78d15Sespie     };
931*03a78d15Sespie   //@}
932*03a78d15Sespie 
933*03a78d15Sespie   //@{
934*03a78d15Sespie   /// Versions for the __allocator adaptor used with the predefined
935*03a78d15Sespie   /// "SGI" style allocators.
936*03a78d15Sespie   template<typename _Tp, typename _Tp1, int __inst>
937*03a78d15Sespie     struct _Alloc_traits<_Tp,
938*03a78d15Sespie                          __allocator<_Tp1, __malloc_alloc_template<__inst> > >
939*03a78d15Sespie     {
940*03a78d15Sespie       static const bool _S_instanceless = true;
941*03a78d15Sespie       typedef __simple_alloc<_Tp, __malloc_alloc_template<__inst> > _Alloc_type;
942*03a78d15Sespie       typedef __allocator<_Tp, __malloc_alloc_template<__inst> > allocator_type;
943*03a78d15Sespie     };
944*03a78d15Sespie 
945*03a78d15Sespie   template<typename _Tp, typename _Tp1, bool __thr, int __inst>
946*03a78d15Sespie     struct _Alloc_traits<_Tp, __allocator<_Tp1, __default_alloc_template<__thr, __inst> > >
947*03a78d15Sespie     {
948*03a78d15Sespie       static const bool _S_instanceless = true;
949*03a78d15Sespie       typedef __simple_alloc<_Tp, __default_alloc_template<__thr,__inst> >
950*03a78d15Sespie       _Alloc_type;
951*03a78d15Sespie       typedef __allocator<_Tp, __default_alloc_template<__thr,__inst> >
952*03a78d15Sespie       allocator_type;
953*03a78d15Sespie     };
954*03a78d15Sespie 
955*03a78d15Sespie   template<typename _Tp, typename _Tp1, typename _Alloc>
956*03a78d15Sespie     struct _Alloc_traits<_Tp, __allocator<_Tp1, __debug_alloc<_Alloc> > >
957*03a78d15Sespie     {
958*03a78d15Sespie       static const bool _S_instanceless = true;
959*03a78d15Sespie       typedef __simple_alloc<_Tp, __debug_alloc<_Alloc> > _Alloc_type;
960*03a78d15Sespie       typedef __allocator<_Tp, __debug_alloc<_Alloc> > allocator_type;
961*03a78d15Sespie     };
962*03a78d15Sespie   //@}
963*03a78d15Sespie 
964*03a78d15Sespie   // Inhibit implicit instantiations for required instantiations,
965*03a78d15Sespie   // which are defined via explicit instantiations elsewhere.
966*03a78d15Sespie   // NB: This syntax is a GNU extension.
967*03a78d15Sespie #if _GLIBCPP_EXTERN_TEMPLATE
968*03a78d15Sespie   extern template class allocator<char>;
969*03a78d15Sespie   extern template class allocator<wchar_t>;
970*03a78d15Sespie   extern template class __default_alloc_template<true,0>;
971*03a78d15Sespie #endif
972*03a78d15Sespie } // namespace std
973*03a78d15Sespie 
974*03a78d15Sespie #endif
975