1*03a78d15Sespie // POSIX thread-related memory allocation -*- C++ -*-
2*03a78d15Sespie
3*03a78d15Sespie // Copyright (C) 2001 Free Software Foundation, Inc.
4*03a78d15Sespie //
5*03a78d15Sespie // This file is part of the GNU ISO C++ Library. This library is free
6*03a78d15Sespie // software; you can redistribute it and/or modify it under the
7*03a78d15Sespie // terms of the GNU General Public License as published by the
8*03a78d15Sespie // Free Software Foundation; either version 2, or (at your option)
9*03a78d15Sespie // any later version.
10*03a78d15Sespie
11*03a78d15Sespie // This library is distributed in the hope that it will be useful,
12*03a78d15Sespie // but WITHOUT ANY WARRANTY; without even the implied warranty of
13*03a78d15Sespie // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14*03a78d15Sespie // GNU General Public License for more details.
15*03a78d15Sespie
16*03a78d15Sespie // You should have received a copy of the GNU General Public License along
17*03a78d15Sespie // with this library; see the file COPYING. If not, write to the Free
18*03a78d15Sespie // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19*03a78d15Sespie // USA.
20*03a78d15Sespie
21*03a78d15Sespie // As a special exception, you may use this file as part of a free software
22*03a78d15Sespie // library without restriction. Specifically, if other files instantiate
23*03a78d15Sespie // templates or use macros or inline functions from this file, or you compile
24*03a78d15Sespie // this file and link it with other files to produce an executable, this
25*03a78d15Sespie // file does not by itself cause the resulting executable to be covered by
26*03a78d15Sespie // the GNU General Public License. This exception does not however
27*03a78d15Sespie // invalidate any other reasons why the executable file might be covered by
28*03a78d15Sespie // the GNU General Public License.
29*03a78d15Sespie
30*03a78d15Sespie /*
31*03a78d15Sespie * Copyright (c) 1996
32*03a78d15Sespie * Silicon Graphics Computer Systems, Inc.
33*03a78d15Sespie *
34*03a78d15Sespie * Permission to use, copy, modify, distribute and sell this software
35*03a78d15Sespie * and its documentation for any purpose is hereby granted without fee,
36*03a78d15Sespie * provided that the above copyright notice appear in all copies and
37*03a78d15Sespie * that both that copyright notice and this permission notice appear
38*03a78d15Sespie * in supporting documentation. Silicon Graphics makes no
39*03a78d15Sespie * representations about the suitability of this software for any
40*03a78d15Sespie * purpose. It is provided "as is" without express or implied warranty.
41*03a78d15Sespie */
42*03a78d15Sespie
43*03a78d15Sespie /** @file pthread_allocimpl.h
44*03a78d15Sespie * This is an internal header file, included by other library headers.
45*03a78d15Sespie * You should not attempt to use it directly.
46*03a78d15Sespie */
47*03a78d15Sespie
48*03a78d15Sespie #ifndef _CPP_BITS_PTHREAD_ALLOCIMPL_H
49*03a78d15Sespie #define _CPP_BITS_PTHREAD_ALLOCIMPL_H 1
50*03a78d15Sespie
51*03a78d15Sespie // Pthread-specific node allocator.
52*03a78d15Sespie // This is similar to the default allocator, except that free-list
53*03a78d15Sespie // information is kept separately for each thread, avoiding locking.
54*03a78d15Sespie // This should be reasonably fast even in the presence of threads.
55*03a78d15Sespie // The down side is that storage may not be well-utilized.
56*03a78d15Sespie // It is not an error to allocate memory in thread A and deallocate
57*03a78d15Sespie // it in thread B. But this effectively transfers ownership of the memory,
58*03a78d15Sespie // so that it can only be reallocated by thread B. Thus this can effectively
59*03a78d15Sespie // result in a storage leak if it's done on a regular basis.
60*03a78d15Sespie // It can also result in frequent sharing of
61*03a78d15Sespie // cache lines among processors, with potentially serious performance
62*03a78d15Sespie // consequences.
63*03a78d15Sespie
64*03a78d15Sespie #include <bits/c++config.h>
65*03a78d15Sespie #include <cerrno>
66*03a78d15Sespie #include <bits/stl_alloc.h>
67*03a78d15Sespie #ifndef __RESTRICT
68*03a78d15Sespie # define __RESTRICT
69*03a78d15Sespie #endif
70*03a78d15Sespie
71*03a78d15Sespie #include <new>
72*03a78d15Sespie
73*03a78d15Sespie namespace std
74*03a78d15Sespie {
75*03a78d15Sespie
76*03a78d15Sespie #define __STL_DATA_ALIGNMENT 8
77*03a78d15Sespie
78*03a78d15Sespie union _Pthread_alloc_obj {
79*03a78d15Sespie union _Pthread_alloc_obj * __free_list_link;
80*03a78d15Sespie char __client_data[__STL_DATA_ALIGNMENT]; /* The client sees this. */
81*03a78d15Sespie };
82*03a78d15Sespie
83*03a78d15Sespie // Pthread allocators don't appear to the client to have meaningful
84*03a78d15Sespie // instances. We do in fact need to associate some state with each
85*03a78d15Sespie // thread. That state is represented by
86*03a78d15Sespie // _Pthread_alloc_per_thread_state<_Max_size>.
87*03a78d15Sespie
88*03a78d15Sespie template<size_t _Max_size>
89*03a78d15Sespie struct _Pthread_alloc_per_thread_state {
90*03a78d15Sespie typedef _Pthread_alloc_obj __obj;
91*03a78d15Sespie enum { _S_NFREELISTS = _Max_size/__STL_DATA_ALIGNMENT };
92*03a78d15Sespie _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS];
93*03a78d15Sespie _Pthread_alloc_per_thread_state<_Max_size> * __next;
94*03a78d15Sespie // Free list link for list of available per thread structures.
95*03a78d15Sespie // When one of these becomes available for reuse due to thread
96*03a78d15Sespie // termination, any objects in its free list remain associated
97*03a78d15Sespie // with it. The whole structure may then be used by a newly
98*03a78d15Sespie // created thread.
_Pthread_alloc_per_thread_state_Pthread_alloc_per_thread_state99*03a78d15Sespie _Pthread_alloc_per_thread_state() : __next(0)
100*03a78d15Sespie {
101*03a78d15Sespie memset((void *)__free_list, 0, (size_t) _S_NFREELISTS * sizeof(__obj *));
102*03a78d15Sespie }
103*03a78d15Sespie // Returns an object of size __n, and possibly adds to size n free list.
104*03a78d15Sespie void *_M_refill(size_t __n);
105*03a78d15Sespie };
106*03a78d15Sespie
107*03a78d15Sespie // Pthread-specific allocator.
108*03a78d15Sespie // The argument specifies the largest object size allocated from per-thread
109*03a78d15Sespie // free lists. Larger objects are allocated using malloc_alloc.
110*03a78d15Sespie // Max_size must be a power of 2.
111*03a78d15Sespie template <size_t _Max_size = 128>
112*03a78d15Sespie class _Pthread_alloc_template {
113*03a78d15Sespie
114*03a78d15Sespie public: // but only for internal use:
115*03a78d15Sespie
116*03a78d15Sespie typedef _Pthread_alloc_obj __obj;
117*03a78d15Sespie
118*03a78d15Sespie // Allocates a chunk for nobjs of size size. nobjs may be reduced
119*03a78d15Sespie // if it is inconvenient to allocate the requested number.
120*03a78d15Sespie static char *_S_chunk_alloc(size_t __size, int &__nobjs);
121*03a78d15Sespie
122*03a78d15Sespie enum {_S_ALIGN = __STL_DATA_ALIGNMENT};
123*03a78d15Sespie
_S_round_up(size_t __bytes)124*03a78d15Sespie static size_t _S_round_up(size_t __bytes) {
125*03a78d15Sespie return (((__bytes) + (int) _S_ALIGN-1) & ~((int) _S_ALIGN - 1));
126*03a78d15Sespie }
_S_freelist_index(size_t __bytes)127*03a78d15Sespie static size_t _S_freelist_index(size_t __bytes) {
128*03a78d15Sespie return (((__bytes) + (int) _S_ALIGN-1)/(int)_S_ALIGN - 1);
129*03a78d15Sespie }
130*03a78d15Sespie
131*03a78d15Sespie private:
132*03a78d15Sespie // Chunk allocation state. And other shared state.
133*03a78d15Sespie // Protected by _S_chunk_allocator_lock.
134*03a78d15Sespie static pthread_mutex_t _S_chunk_allocator_lock;
135*03a78d15Sespie static char *_S_start_free;
136*03a78d15Sespie static char *_S_end_free;
137*03a78d15Sespie static size_t _S_heap_size;
138*03a78d15Sespie static _Pthread_alloc_per_thread_state<_Max_size>* _S_free_per_thread_states;
139*03a78d15Sespie static pthread_key_t _S_key;
140*03a78d15Sespie static bool _S_key_initialized;
141*03a78d15Sespie // Pthread key under which per thread state is stored.
142*03a78d15Sespie // Allocator instances that are currently unclaimed by any thread.
143*03a78d15Sespie static void _S_destructor(void *instance);
144*03a78d15Sespie // Function to be called on thread exit to reclaim per thread
145*03a78d15Sespie // state.
146*03a78d15Sespie static _Pthread_alloc_per_thread_state<_Max_size> *_S_new_per_thread_state();
147*03a78d15Sespie // Return a recycled or new per thread state.
148*03a78d15Sespie static _Pthread_alloc_per_thread_state<_Max_size> *_S_get_per_thread_state();
149*03a78d15Sespie // ensure that the current thread has an associated
150*03a78d15Sespie // per thread state.
151*03a78d15Sespie class _M_lock;
152*03a78d15Sespie friend class _M_lock;
153*03a78d15Sespie class _M_lock {
154*03a78d15Sespie public:
_M_lock()155*03a78d15Sespie _M_lock () { pthread_mutex_lock(&_S_chunk_allocator_lock); }
~_M_lock()156*03a78d15Sespie ~_M_lock () { pthread_mutex_unlock(&_S_chunk_allocator_lock); }
157*03a78d15Sespie };
158*03a78d15Sespie
159*03a78d15Sespie public:
160*03a78d15Sespie
161*03a78d15Sespie /* n must be > 0 */
allocate(size_t __n)162*03a78d15Sespie static void * allocate(size_t __n)
163*03a78d15Sespie {
164*03a78d15Sespie __obj * volatile * __my_free_list;
165*03a78d15Sespie __obj * __RESTRICT __result;
166*03a78d15Sespie _Pthread_alloc_per_thread_state<_Max_size>* __a;
167*03a78d15Sespie
168*03a78d15Sespie if (__n > _Max_size) {
169*03a78d15Sespie return(malloc_alloc::allocate(__n));
170*03a78d15Sespie }
171*03a78d15Sespie if (!_S_key_initialized ||
172*03a78d15Sespie !(__a = (_Pthread_alloc_per_thread_state<_Max_size>*)
173*03a78d15Sespie pthread_getspecific(_S_key))) {
174*03a78d15Sespie __a = _S_get_per_thread_state();
175*03a78d15Sespie }
176*03a78d15Sespie __my_free_list = __a -> __free_list + _S_freelist_index(__n);
177*03a78d15Sespie __result = *__my_free_list;
178*03a78d15Sespie if (__result == 0) {
179*03a78d15Sespie void *__r = __a -> _M_refill(_S_round_up(__n));
180*03a78d15Sespie return __r;
181*03a78d15Sespie }
182*03a78d15Sespie *__my_free_list = __result -> __free_list_link;
183*03a78d15Sespie return (__result);
184*03a78d15Sespie };
185*03a78d15Sespie
186*03a78d15Sespie /* p may not be 0 */
deallocate(void * __p,size_t __n)187*03a78d15Sespie static void deallocate(void *__p, size_t __n)
188*03a78d15Sespie {
189*03a78d15Sespie __obj *__q = (__obj *)__p;
190*03a78d15Sespie __obj * volatile * __my_free_list;
191*03a78d15Sespie _Pthread_alloc_per_thread_state<_Max_size>* __a;
192*03a78d15Sespie
193*03a78d15Sespie if (__n > _Max_size) {
194*03a78d15Sespie malloc_alloc::deallocate(__p, __n);
195*03a78d15Sespie return;
196*03a78d15Sespie }
197*03a78d15Sespie if (!_S_key_initialized ||
198*03a78d15Sespie !(__a = (_Pthread_alloc_per_thread_state<_Max_size> *)
199*03a78d15Sespie pthread_getspecific(_S_key))) {
200*03a78d15Sespie __a = _S_get_per_thread_state();
201*03a78d15Sespie }
202*03a78d15Sespie __my_free_list = __a->__free_list + _S_freelist_index(__n);
203*03a78d15Sespie __q -> __free_list_link = *__my_free_list;
204*03a78d15Sespie *__my_free_list = __q;
205*03a78d15Sespie }
206*03a78d15Sespie
207*03a78d15Sespie static void * reallocate(void *__p, size_t __old_sz, size_t __new_sz);
208*03a78d15Sespie
209*03a78d15Sespie } ;
210*03a78d15Sespie
211*03a78d15Sespie typedef _Pthread_alloc_template<> pthread_alloc;
212*03a78d15Sespie
213*03a78d15Sespie
214*03a78d15Sespie template <size_t _Max_size>
_S_destructor(void * __instance)215*03a78d15Sespie void _Pthread_alloc_template<_Max_size>::_S_destructor(void * __instance)
216*03a78d15Sespie {
217*03a78d15Sespie _M_lock __lock_instance; // Need to acquire lock here.
218*03a78d15Sespie _Pthread_alloc_per_thread_state<_Max_size>* __s =
219*03a78d15Sespie (_Pthread_alloc_per_thread_state<_Max_size> *)__instance;
220*03a78d15Sespie __s -> __next = _S_free_per_thread_states;
221*03a78d15Sespie _S_free_per_thread_states = __s;
222*03a78d15Sespie }
223*03a78d15Sespie
224*03a78d15Sespie template <size_t _Max_size>
225*03a78d15Sespie _Pthread_alloc_per_thread_state<_Max_size> *
_S_new_per_thread_state()226*03a78d15Sespie _Pthread_alloc_template<_Max_size>::_S_new_per_thread_state()
227*03a78d15Sespie {
228*03a78d15Sespie /* lock already held here. */
229*03a78d15Sespie if (0 != _S_free_per_thread_states) {
230*03a78d15Sespie _Pthread_alloc_per_thread_state<_Max_size> *__result =
231*03a78d15Sespie _S_free_per_thread_states;
232*03a78d15Sespie _S_free_per_thread_states = _S_free_per_thread_states -> __next;
233*03a78d15Sespie return __result;
234*03a78d15Sespie } else {
235*03a78d15Sespie return new _Pthread_alloc_per_thread_state<_Max_size>;
236*03a78d15Sespie }
237*03a78d15Sespie }
238*03a78d15Sespie
239*03a78d15Sespie template <size_t _Max_size>
240*03a78d15Sespie _Pthread_alloc_per_thread_state<_Max_size> *
_S_get_per_thread_state()241*03a78d15Sespie _Pthread_alloc_template<_Max_size>::_S_get_per_thread_state()
242*03a78d15Sespie {
243*03a78d15Sespie /*REFERENCED*/
244*03a78d15Sespie _M_lock __lock_instance; // Need to acquire lock here.
245*03a78d15Sespie int __ret_code;
246*03a78d15Sespie _Pthread_alloc_per_thread_state<_Max_size> * __result;
247*03a78d15Sespie if (!_S_key_initialized) {
248*03a78d15Sespie if (pthread_key_create(&_S_key, _S_destructor)) {
249*03a78d15Sespie std::__throw_bad_alloc(); // defined in funcexcept.h
250*03a78d15Sespie }
251*03a78d15Sespie _S_key_initialized = true;
252*03a78d15Sespie }
253*03a78d15Sespie __result = _S_new_per_thread_state();
254*03a78d15Sespie __ret_code = pthread_setspecific(_S_key, __result);
255*03a78d15Sespie if (__ret_code) {
256*03a78d15Sespie if (__ret_code == ENOMEM) {
257*03a78d15Sespie std::__throw_bad_alloc();
258*03a78d15Sespie } else {
259*03a78d15Sespie // EINVAL
260*03a78d15Sespie abort();
261*03a78d15Sespie }
262*03a78d15Sespie }
263*03a78d15Sespie return __result;
264*03a78d15Sespie }
265*03a78d15Sespie
266*03a78d15Sespie /* We allocate memory in large chunks in order to avoid fragmenting */
267*03a78d15Sespie /* the malloc heap too much. */
268*03a78d15Sespie /* We assume that size is properly aligned. */
269*03a78d15Sespie template <size_t _Max_size>
270*03a78d15Sespie char *_Pthread_alloc_template<_Max_size>
_S_chunk_alloc(size_t __size,int & __nobjs)271*03a78d15Sespie ::_S_chunk_alloc(size_t __size, int &__nobjs)
272*03a78d15Sespie {
273*03a78d15Sespie {
274*03a78d15Sespie char * __result;
275*03a78d15Sespie size_t __total_bytes;
276*03a78d15Sespie size_t __bytes_left;
277*03a78d15Sespie /*REFERENCED*/
278*03a78d15Sespie _M_lock __lock_instance; // Acquire lock for this routine
279*03a78d15Sespie
280*03a78d15Sespie __total_bytes = __size * __nobjs;
281*03a78d15Sespie __bytes_left = _S_end_free - _S_start_free;
282*03a78d15Sespie if (__bytes_left >= __total_bytes) {
283*03a78d15Sespie __result = _S_start_free;
284*03a78d15Sespie _S_start_free += __total_bytes;
285*03a78d15Sespie return(__result);
286*03a78d15Sespie } else if (__bytes_left >= __size) {
287*03a78d15Sespie __nobjs = __bytes_left/__size;
288*03a78d15Sespie __total_bytes = __size * __nobjs;
289*03a78d15Sespie __result = _S_start_free;
290*03a78d15Sespie _S_start_free += __total_bytes;
291*03a78d15Sespie return(__result);
292*03a78d15Sespie } else {
293*03a78d15Sespie size_t __bytes_to_get =
294*03a78d15Sespie 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
295*03a78d15Sespie // Try to make use of the left-over piece.
296*03a78d15Sespie if (__bytes_left > 0) {
297*03a78d15Sespie _Pthread_alloc_per_thread_state<_Max_size>* __a =
298*03a78d15Sespie (_Pthread_alloc_per_thread_state<_Max_size>*)
299*03a78d15Sespie pthread_getspecific(_S_key);
300*03a78d15Sespie __obj * volatile * __my_free_list =
301*03a78d15Sespie __a->__free_list + _S_freelist_index(__bytes_left);
302*03a78d15Sespie
303*03a78d15Sespie ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list;
304*03a78d15Sespie *__my_free_list = (__obj *)_S_start_free;
305*03a78d15Sespie }
306*03a78d15Sespie # ifdef _SGI_SOURCE
307*03a78d15Sespie // Try to get memory that's aligned on something like a
308*03a78d15Sespie // cache line boundary, so as to avoid parceling out
309*03a78d15Sespie // parts of the same line to different threads and thus
310*03a78d15Sespie // possibly different processors.
311*03a78d15Sespie {
312*03a78d15Sespie const int __cache_line_size = 128; // probable upper bound
313*03a78d15Sespie __bytes_to_get &= ~(__cache_line_size-1);
314*03a78d15Sespie _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get);
315*03a78d15Sespie if (0 == _S_start_free) {
316*03a78d15Sespie _S_start_free = (char *)malloc_alloc::allocate(__bytes_to_get);
317*03a78d15Sespie }
318*03a78d15Sespie }
319*03a78d15Sespie # else /* !SGI_SOURCE */
320*03a78d15Sespie _S_start_free = (char *)malloc_alloc::allocate(__bytes_to_get);
321*03a78d15Sespie # endif
322*03a78d15Sespie _S_heap_size += __bytes_to_get;
323*03a78d15Sespie _S_end_free = _S_start_free + __bytes_to_get;
324*03a78d15Sespie }
325*03a78d15Sespie }
326*03a78d15Sespie // lock is released here
327*03a78d15Sespie return(_S_chunk_alloc(__size, __nobjs));
328*03a78d15Sespie }
329*03a78d15Sespie
330*03a78d15Sespie
331*03a78d15Sespie /* Returns an object of size n, and optionally adds to size n free list.*/
332*03a78d15Sespie /* We assume that n is properly aligned. */
333*03a78d15Sespie /* We hold the allocation lock. */
334*03a78d15Sespie template <size_t _Max_size>
335*03a78d15Sespie void *_Pthread_alloc_per_thread_state<_Max_size>
_M_refill(size_t __n)336*03a78d15Sespie ::_M_refill(size_t __n)
337*03a78d15Sespie {
338*03a78d15Sespie int __nobjs = 128;
339*03a78d15Sespie char * __chunk =
340*03a78d15Sespie _Pthread_alloc_template<_Max_size>::_S_chunk_alloc(__n, __nobjs);
341*03a78d15Sespie __obj * volatile * __my_free_list;
342*03a78d15Sespie __obj * __result;
343*03a78d15Sespie __obj * __current_obj, * __next_obj;
344*03a78d15Sespie int __i;
345*03a78d15Sespie
346*03a78d15Sespie if (1 == __nobjs) {
347*03a78d15Sespie return(__chunk);
348*03a78d15Sespie }
349*03a78d15Sespie __my_free_list = __free_list
350*03a78d15Sespie + _Pthread_alloc_template<_Max_size>::_S_freelist_index(__n);
351*03a78d15Sespie
352*03a78d15Sespie /* Build free list in chunk */
353*03a78d15Sespie __result = (__obj *)__chunk;
354*03a78d15Sespie *__my_free_list = __next_obj = (__obj *)(__chunk + __n);
355*03a78d15Sespie for (__i = 1; ; __i++) {
356*03a78d15Sespie __current_obj = __next_obj;
357*03a78d15Sespie __next_obj = (__obj *)((char *)__next_obj + __n);
358*03a78d15Sespie if (__nobjs - 1 == __i) {
359*03a78d15Sespie __current_obj -> __free_list_link = 0;
360*03a78d15Sespie break;
361*03a78d15Sespie } else {
362*03a78d15Sespie __current_obj -> __free_list_link = __next_obj;
363*03a78d15Sespie }
364*03a78d15Sespie }
365*03a78d15Sespie return(__result);
366*03a78d15Sespie }
367*03a78d15Sespie
368*03a78d15Sespie template <size_t _Max_size>
369*03a78d15Sespie void *_Pthread_alloc_template<_Max_size>
reallocate(void * __p,size_t __old_sz,size_t __new_sz)370*03a78d15Sespie ::reallocate(void *__p, size_t __old_sz, size_t __new_sz)
371*03a78d15Sespie {
372*03a78d15Sespie void * __result;
373*03a78d15Sespie size_t __copy_sz;
374*03a78d15Sespie
375*03a78d15Sespie if (__old_sz > _Max_size
376*03a78d15Sespie && __new_sz > _Max_size) {
377*03a78d15Sespie return(realloc(__p, __new_sz));
378*03a78d15Sespie }
379*03a78d15Sespie if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return(__p);
380*03a78d15Sespie __result = allocate(__new_sz);
381*03a78d15Sespie __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
382*03a78d15Sespie memcpy(__result, __p, __copy_sz);
383*03a78d15Sespie deallocate(__p, __old_sz);
384*03a78d15Sespie return(__result);
385*03a78d15Sespie }
386*03a78d15Sespie
387*03a78d15Sespie template <size_t _Max_size>
388*03a78d15Sespie _Pthread_alloc_per_thread_state<_Max_size> *
389*03a78d15Sespie _Pthread_alloc_template<_Max_size>::_S_free_per_thread_states = 0;
390*03a78d15Sespie
391*03a78d15Sespie template <size_t _Max_size>
392*03a78d15Sespie pthread_key_t _Pthread_alloc_template<_Max_size>::_S_key;
393*03a78d15Sespie
394*03a78d15Sespie template <size_t _Max_size>
395*03a78d15Sespie bool _Pthread_alloc_template<_Max_size>::_S_key_initialized = false;
396*03a78d15Sespie
397*03a78d15Sespie template <size_t _Max_size>
398*03a78d15Sespie pthread_mutex_t _Pthread_alloc_template<_Max_size>::_S_chunk_allocator_lock
399*03a78d15Sespie = PTHREAD_MUTEX_INITIALIZER;
400*03a78d15Sespie
401*03a78d15Sespie template <size_t _Max_size>
402*03a78d15Sespie char *_Pthread_alloc_template<_Max_size>
403*03a78d15Sespie ::_S_start_free = 0;
404*03a78d15Sespie
405*03a78d15Sespie template <size_t _Max_size>
406*03a78d15Sespie char *_Pthread_alloc_template<_Max_size>
407*03a78d15Sespie ::_S_end_free = 0;
408*03a78d15Sespie
409*03a78d15Sespie template <size_t _Max_size>
410*03a78d15Sespie size_t _Pthread_alloc_template<_Max_size>
411*03a78d15Sespie ::_S_heap_size = 0;
412*03a78d15Sespie
413*03a78d15Sespie
414*03a78d15Sespie template <class _Tp>
415*03a78d15Sespie class pthread_allocator {
416*03a78d15Sespie typedef pthread_alloc _S_Alloc; // The underlying allocator.
417*03a78d15Sespie public:
418*03a78d15Sespie typedef size_t size_type;
419*03a78d15Sespie typedef ptrdiff_t difference_type;
420*03a78d15Sespie typedef _Tp* pointer;
421*03a78d15Sespie typedef const _Tp* const_pointer;
422*03a78d15Sespie typedef _Tp& reference;
423*03a78d15Sespie typedef const _Tp& const_reference;
424*03a78d15Sespie typedef _Tp value_type;
425*03a78d15Sespie
426*03a78d15Sespie template <class _NewType> struct rebind {
427*03a78d15Sespie typedef pthread_allocator<_NewType> other;
428*03a78d15Sespie };
429*03a78d15Sespie
throw()430*03a78d15Sespie pthread_allocator() throw() {}
throw()431*03a78d15Sespie pthread_allocator(const pthread_allocator& a) throw() {}
432*03a78d15Sespie template <class _OtherType>
pthread_allocator(const pthread_allocator<_OtherType> &)433*03a78d15Sespie pthread_allocator(const pthread_allocator<_OtherType>&)
434*03a78d15Sespie throw() {}
throw()435*03a78d15Sespie ~pthread_allocator() throw() {}
436*03a78d15Sespie
address(reference __x)437*03a78d15Sespie pointer address(reference __x) const { return &__x; }
address(const_reference __x)438*03a78d15Sespie const_pointer address(const_reference __x) const { return &__x; }
439*03a78d15Sespie
440*03a78d15Sespie // __n is permitted to be 0. The C++ standard says nothing about what
441*03a78d15Sespie // the return value is when __n == 0.
442*03a78d15Sespie _Tp* allocate(size_type __n, const void* = 0) {
443*03a78d15Sespie return __n != 0 ? static_cast<_Tp*>(_S_Alloc::allocate(__n * sizeof(_Tp)))
444*03a78d15Sespie : 0;
445*03a78d15Sespie }
446*03a78d15Sespie
447*03a78d15Sespie // p is not permitted to be a null pointer.
deallocate(pointer __p,size_type __n)448*03a78d15Sespie void deallocate(pointer __p, size_type __n)
449*03a78d15Sespie { _S_Alloc::deallocate(__p, __n * sizeof(_Tp)); }
450*03a78d15Sespie
max_size()451*03a78d15Sespie size_type max_size() const throw()
452*03a78d15Sespie { return size_t(-1) / sizeof(_Tp); }
453*03a78d15Sespie
construct(pointer __p,const _Tp & __val)454*03a78d15Sespie void construct(pointer __p, const _Tp& __val) { new(__p) _Tp(__val); }
destroy(pointer _p)455*03a78d15Sespie void destroy(pointer _p) { _p->~_Tp(); }
456*03a78d15Sespie };
457*03a78d15Sespie
458*03a78d15Sespie template<>
459*03a78d15Sespie class pthread_allocator<void> {
460*03a78d15Sespie public:
461*03a78d15Sespie typedef size_t size_type;
462*03a78d15Sespie typedef ptrdiff_t difference_type;
463*03a78d15Sespie typedef void* pointer;
464*03a78d15Sespie typedef const void* const_pointer;
465*03a78d15Sespie typedef void value_type;
466*03a78d15Sespie
467*03a78d15Sespie template <class _NewType> struct rebind {
468*03a78d15Sespie typedef pthread_allocator<_NewType> other;
469*03a78d15Sespie };
470*03a78d15Sespie };
471*03a78d15Sespie
472*03a78d15Sespie template <size_t _Max_size>
473*03a78d15Sespie inline bool operator==(const _Pthread_alloc_template<_Max_size>&,
474*03a78d15Sespie const _Pthread_alloc_template<_Max_size>&)
475*03a78d15Sespie {
476*03a78d15Sespie return true;
477*03a78d15Sespie }
478*03a78d15Sespie
479*03a78d15Sespie template <class _T1, class _T2>
480*03a78d15Sespie inline bool operator==(const pthread_allocator<_T1>&,
481*03a78d15Sespie const pthread_allocator<_T2>& a2)
482*03a78d15Sespie {
483*03a78d15Sespie return true;
484*03a78d15Sespie }
485*03a78d15Sespie
486*03a78d15Sespie template <class _T1, class _T2>
487*03a78d15Sespie inline bool operator!=(const pthread_allocator<_T1>&,
488*03a78d15Sespie const pthread_allocator<_T2>&)
489*03a78d15Sespie {
490*03a78d15Sespie return false;
491*03a78d15Sespie }
492*03a78d15Sespie
493*03a78d15Sespie template <class _Tp, size_t _Max_size>
494*03a78d15Sespie struct _Alloc_traits<_Tp, _Pthread_alloc_template<_Max_size> >
495*03a78d15Sespie {
496*03a78d15Sespie static const bool _S_instanceless = true;
497*03a78d15Sespie typedef simple_alloc<_Tp, _Pthread_alloc_template<_Max_size> > _Alloc_type;
498*03a78d15Sespie typedef __allocator<_Tp, _Pthread_alloc_template<_Max_size> >
499*03a78d15Sespie allocator_type;
500*03a78d15Sespie };
501*03a78d15Sespie
502*03a78d15Sespie template <class _Tp, class _Atype, size_t _Max>
503*03a78d15Sespie struct _Alloc_traits<_Tp, __allocator<_Atype, _Pthread_alloc_template<_Max> > >
504*03a78d15Sespie {
505*03a78d15Sespie static const bool _S_instanceless = true;
506*03a78d15Sespie typedef simple_alloc<_Tp, _Pthread_alloc_template<_Max> > _Alloc_type;
507*03a78d15Sespie typedef __allocator<_Tp, _Pthread_alloc_template<_Max> > allocator_type;
508*03a78d15Sespie };
509*03a78d15Sespie
510*03a78d15Sespie template <class _Tp, class _Atype>
511*03a78d15Sespie struct _Alloc_traits<_Tp, pthread_allocator<_Atype> >
512*03a78d15Sespie {
513*03a78d15Sespie static const bool _S_instanceless = true;
514*03a78d15Sespie typedef simple_alloc<_Tp, _Pthread_alloc_template<> > _Alloc_type;
515*03a78d15Sespie typedef pthread_allocator<_Tp> allocator_type;
516*03a78d15Sespie };
517*03a78d15Sespie
518*03a78d15Sespie
519*03a78d15Sespie } // namespace std
520*03a78d15Sespie
521*03a78d15Sespie #endif /* _CPP_BITS_PTHREAD_ALLOCIMPL_H */
522*03a78d15Sespie
523*03a78d15Sespie // Local Variables:
524*03a78d15Sespie // mode:C++
525*03a78d15Sespie // End:
526