xref: /openbsd-src/gnu/usr.bin/perl/malloc.c (revision e2a1b4748ac00cfe1e64a346f850b3c670166aef)
1 /*    malloc.c
2  *
3  */
4 
5 /*
6  * "'The Chamber of Records,' said Gimli. 'I guess that is where we now stand.'"
7  */
8 
9 /*
10   Here are some notes on configuring Perl's malloc.  (For non-perl
11   usage see below.)
12 
13   There are two macros which serve as bulk disablers of advanced
14   features of this malloc: NO_FANCY_MALLOC, PLAIN_MALLOC (undef by
15   default).  Look in the list of default values below to understand
16   their exact effect.  Defining NO_FANCY_MALLOC returns malloc.c to the
17   state of the malloc in Perl 5.004.  Additionally defining PLAIN_MALLOC
18   returns it to the state as of Perl 5.000.
19 
20   Note that some of the settings below may be ignored in the code based
21   on values of other macros.  The PERL_CORE symbol is only defined when
22   perl itself is being compiled (so malloc can make some assumptions
23   about perl's facilities being available to it).
24 
25   Each config option has a short description, followed by its name,
26   default value, and a comment about the default (if applicable).  Some
27   options take a precise value, while the others are just boolean.
28   The boolean ones are listed first.
29 
30     # Read configuration settings from malloc_cfg.h
31     HAVE_MALLOC_CFG_H		undef
32 
33     # Enable code for an emergency memory pool in $^M.  See perlvar.pod
34     # for a description of $^M.
35     PERL_EMERGENCY_SBRK		(!PLAIN_MALLOC && (PERL_CORE || !NO_MALLOC_DYNAMIC_CFG))
36 
37     # Enable code for printing memory statistics.
38     DEBUGGING_MSTATS		(!PLAIN_MALLOC && PERL_CORE)
39 
40     # Move allocation info for small buckets into separate areas.
41     # Memory optimization (especially for small allocations, of the
42     # less than 64 bytes).  Since perl usually makes a large number
43     # of small allocations, this is usually a win.
44     PACK_MALLOC			(!PLAIN_MALLOC && !RCHECK)
45 
46     # Add one page to big powers of two when calculating bucket size.
47     # This is targeted at big allocations, as are common in image
48     # processing.
49     TWO_POT_OPTIMIZE		!PLAIN_MALLOC
50 
51     # Use intermediate bucket sizes between powers-of-two.  This is
52     # generally a memory optimization, and a (small) speed pessimization.
53     BUCKETS_ROOT2		!NO_FANCY_MALLOC
54 
55     # Do not check small deallocations for bad free().  Memory
56     # and speed optimization, error reporting pessimization.
57     IGNORE_SMALL_BAD_FREE	(!NO_FANCY_MALLOC && !RCHECK)
58 
59     # Use table lookup to decide in which bucket a given allocation will go.
60     SMALL_BUCKET_VIA_TABLE	!NO_FANCY_MALLOC
61 
62     # Use a perl-defined sbrk() instead of the (presumably broken or
63     # missing) system-supplied sbrk().
64     USE_PERL_SBRK		undef
65 
66     # Use system malloc() (or calloc() etc.) to emulate sbrk(). Normally
67     # only used with broken sbrk()s.
68     PERL_SBRK_VIA_MALLOC	undef
69 
70     # Which allocator to use if PERL_SBRK_VIA_MALLOC
71     SYSTEM_ALLOC(a) 		malloc(a)
72 
73     # Minimal alignment (in bytes, should be a power of 2) of SYSTEM_ALLOC
74     SYSTEM_ALLOC_ALIGNMENT	MEM_ALIGNBYTES
75 
76     # Disable memory overwrite checking with DEBUGGING.  Memory and speed
77     # optimization, error reporting pessimization.
78     NO_RCHECK			undef
79 
80     # Enable memory overwrite checking with DEBUGGING.  Memory and speed
81     # pessimization, error reporting optimization
82     RCHECK			(DEBUGGING && !NO_RCHECK)
83 
84     # Do not overwrite uninit areas with DEBUGGING.  Speed
85     # optimization, error reporting pessimization
86     NO_MFILL			undef
87 
88     # Overwrite uninit areas with DEBUGGING.  Speed
89     # pessimization, error reporting optimization
90     MALLOC_FILL			(DEBUGGING && !NO_RCHECK && !NO_MFILL)
91 
92     # Do not check overwritten uninit areas with DEBUGGING.  Speed
93     # optimization, error reporting pessimization
94     NO_FILL_CHECK		undef
95 
96     # Check overwritten uninit areas with DEBUGGING.  Speed
97     # pessimization, error reporting optimization
98     MALLOC_FILL_CHECK		(DEBUGGING && !NO_RCHECK && !NO_FILL_CHECK)
99 
100     # Failed allocations bigger than this size croak (if
101     # PERL_EMERGENCY_SBRK is enabled) without touching $^M.  See
102     # perlvar.pod for a description of $^M.
103     BIG_SIZE			 (1<<16)	# 64K
104 
105     # Starting from this power of two, add an extra page to the
106     # size of the bucket. This enables optimized allocations of sizes
107     # close to powers of 2.  Note that the value is indexed at 0.
108     FIRST_BIG_POW2 		15		# 32K, 16K is used too often
109 
110     # Estimate of minimal memory footprint.  malloc uses this value to
111     # request the most reasonable largest blocks of memory from the system.
112     FIRST_SBRK 			(48*1024)
113 
114     # Round up sbrk()s to multiples of this.
115     MIN_SBRK 			2048
116 
117     # Round up sbrk()s to multiples of this percent of footprint.
118     MIN_SBRK_FRAC 		3
119 
120     # Round up sbrk()s to multiples of this multiple of 1/1000 of footprint.
121     MIN_SBRK_FRAC1000 		(10 * MIN_SBRK_FRAC)
122 
123     # Add this much memory to big powers of two to get the bucket size.
124     PERL_PAGESIZE 		4096
125 
126     # This many sbrk() discontinuities should be tolerated even
127     # from the start without deciding that sbrk() is usually
128     # discontinuous.
129     SBRK_ALLOW_FAILURES		3
130 
131     # This many continuous sbrk()s compensate for one discontinuous one.
132     SBRK_FAILURE_PRICE		50
133 
134     # Some configurations may ask for 12-byte-or-so allocations which
135     # require 8-byte alignment (?!).  In such situation one needs to
136     # define this to disable 12-byte bucket (will increase memory footprint)
137     STRICT_ALIGNMENT		undef
138 
139     # Do not allow configuration of runtime options at runtime
140     NO_MALLOC_DYNAMIC_CFG	undef
141 
142     # Do not allow configuration of runtime options via $ENV{PERL_MALLOC_OPT}
143     NO_PERL_MALLOC_ENV		undef
144 
145 	[The variable consists of ;-separated parts of the form CODE=VALUE
146 	 with 1-character codes F, M, f, A, P, G, d, a, c for runtime
147 	 configuration of FIRST_SBRK, MIN_SBRK, MIN_SBRK_FRAC1000,
148 	 SBRK_ALLOW_FAILURES, SBRK_FAILURE_PRICE, sbrk_goodness,
149 	 filldead, fillalive, fillcheck.  The last 3 are for DEBUGGING
150 	 build, and allow switching the tests for free()ed memory read,
151 	 uninit memory reads, and free()ed memory write.]
152 
153   This implementation assumes that calling PerlIO_printf() does not
154   result in any memory allocation calls (used during a panic).
155 
156  */
157 
158 /*
159    If used outside of Perl environment, it may be useful to redefine
160    the following macros (listed below with defaults):
161 
162      # Type of address returned by allocation functions
163      Malloc_t				void *
164 
165      # Type of size argument for allocation functions
166      MEM_SIZE				unsigned long
167 
168      # size of void*
169      PTRSIZE				4
170 
171      # Maximal value in LONG
172      LONG_MAX				0x7FFFFFFF
173 
174      # Unsigned integer type big enough to keep a pointer
175      UV					unsigned long
176 
177      # Signed integer of the same sizeof() as UV
178      IV					long
179 
180      # Type of pointer with 1-byte granularity
181      caddr_t				char *
182 
183      # Type returned by free()
184      Free_t				void
185 
186      # Conversion of pointer to integer
187      PTR2UV(ptr)			((UV)(ptr))
188 
189      # Conversion of integer to pointer
190      INT2PTR(type, i)			((type)(i))
191 
192      # printf()-%-Conversion of UV to pointer
193      UVuf				"lu"
194 
195      # printf()-%-Conversion of UV to hex pointer
196      UVxf				"lx"
197 
198      # Alignment to use
199      MEM_ALIGNBYTES			4
200 
201      # Very fatal condition reporting function (cannot call any )
202      fatalcroak(arg)			write(2,arg,strlen(arg)) + exit(2)
203 
204      # Fatal error reporting function
205      croak(format, arg)			warn(idem) + exit(1)
206 
207      # Fatal error reporting function
208      croak2(format, arg1, arg2)		warn2(idem) + exit(1)
209 
210      # Error reporting function
211      warn(format, arg)			fprintf(stderr, idem)
212 
213      # Error reporting function
214      warn2(format, arg1, arg2)		fprintf(stderr, idem)
215 
216      # Locking/unlocking for MT operation
217      MALLOC_LOCK			MUTEX_LOCK(&PL_malloc_mutex)
218      MALLOC_UNLOCK			MUTEX_UNLOCK(&PL_malloc_mutex)
219 
220      # Locking/unlocking mutex for MT operation
221      MUTEX_LOCK(l)			void
222      MUTEX_UNLOCK(l)			void
223  */
224 
225 #ifdef HAVE_MALLOC_CFG_H
226 #  include "malloc_cfg.h"
227 #endif
228 
229 #ifndef NO_FANCY_MALLOC
230 #  ifndef SMALL_BUCKET_VIA_TABLE
231 #    define SMALL_BUCKET_VIA_TABLE
232 #  endif
233 #  ifndef BUCKETS_ROOT2
234 #    define BUCKETS_ROOT2
235 #  endif
236 #  ifndef IGNORE_SMALL_BAD_FREE
237 #    define IGNORE_SMALL_BAD_FREE
238 #  endif
239 #endif
240 
241 #ifndef PLAIN_MALLOC			/* Bulk enable features */
242 #  ifndef PACK_MALLOC
243 #      define PACK_MALLOC
244 #  endif
245 #  ifndef TWO_POT_OPTIMIZE
246 #    define TWO_POT_OPTIMIZE
247 #  endif
248 #  if (defined(PERL_CORE) || !defined(NO_MALLOC_DYNAMIC_CFG)) && !defined(PERL_EMERGENCY_SBRK)
249 #    define PERL_EMERGENCY_SBRK
250 #  endif
251 #  if defined(PERL_CORE) && !defined(DEBUGGING_MSTATS)
252 #    define DEBUGGING_MSTATS
253 #  endif
254 #endif
255 
256 #define MIN_BUC_POW2 (sizeof(void*) > 4 ? 3 : 2) /* Allow for 4-byte arena. */
257 #define MIN_BUCKET (MIN_BUC_POW2 * BUCKETS_PER_POW2)
258 
259 #if !(defined(I286) || defined(atarist) || defined(__MINT__))
260 	/* take 2k unless the block is bigger than that */
261 #  define LOG_OF_MIN_ARENA 11
262 #else
263 	/* take 16k unless the block is bigger than that
264 	   (80286s like large segments!), probably good on the atari too */
265 #  define LOG_OF_MIN_ARENA 14
266 #endif
267 
268 #ifndef lint
269 #  if defined(DEBUGGING) && !defined(NO_RCHECK)
270 #    define RCHECK
271 #  endif
272 #  if defined(DEBUGGING) && !defined(NO_RCHECK) && !defined(NO_MFILL) && !defined(MALLOC_FILL)
273 #    define MALLOC_FILL
274 #  endif
275 #  if defined(DEBUGGING) && !defined(NO_RCHECK) && !defined(NO_FILL_CHECK) && !defined(MALLOC_FILL_CHECK)
276 #    define MALLOC_FILL_CHECK
277 #  endif
278 #  if defined(RCHECK) && defined(IGNORE_SMALL_BAD_FREE)
279 #    undef IGNORE_SMALL_BAD_FREE
280 #  endif
281 /*
282  * malloc.c (Caltech) 2/21/82
283  * Chris Kingsley, kingsley@cit-20.
284  *
285  * This is a very fast storage allocator.  It allocates blocks of a small
286  * number of different sizes, and keeps free lists of each size.  Blocks that
287  * don't exactly fit are passed up to the next larger size.  In this
288  * implementation, the available sizes are 2^n-4 (or 2^n-12) bytes long.
289  * If PACK_MALLOC is defined, small blocks are 2^n bytes long.
290  * This is designed for use in a program that uses vast quantities of memory,
291  * but bombs when it runs out.
292  *
293  * Modifications Copyright Ilya Zakharevich 1996-99.
294  *
295  * Still very quick, but much more thrifty.  (Std config is 10% slower
296  * than it was, and takes 67% of old heap size for typical usage.)
297  *
298  * Allocations of small blocks are now table-driven to many different
299  * buckets.  Sizes of really big buckets are increased to accomodata
300  * common size=power-of-2 blocks.  Running-out-of-memory is made into
301  * an exception.  Deeply configurable and thread-safe.
302  *
303  */
304 
305 #ifdef PERL_CORE
306 #  include "EXTERN.h"
307 #  define PERL_IN_MALLOC_C
308 #  include "perl.h"
309 #  if defined(PERL_IMPLICIT_CONTEXT)
310 #    define croak	Perl_croak_nocontext
311 #    define croak2	Perl_croak_nocontext
312 #    define warn	Perl_warn_nocontext
313 #    define warn2	Perl_warn_nocontext
314 #  else
315 #    define croak2	croak
316 #    define warn2	warn
317 #  endif
318 #  if defined(USE_5005THREADS) || defined(USE_ITHREADS)
319 #     define PERL_MAYBE_ALIVE	PL_thr_key
320 #  else
321 #     define PERL_MAYBE_ALIVE	1
322 #  endif
323 #else
324 #  ifdef PERL_FOR_X2P
325 #    include "../EXTERN.h"
326 #    include "../perl.h"
327 #  else
328 #    include <stdlib.h>
329 #    include <stdio.h>
330 #    include <memory.h>
331 #    ifdef OS2
332 #      include <io.h>
333 #    endif
334 #    include <string.h>
335 #    ifndef Malloc_t
336 #      define Malloc_t void *
337 #    endif
338 #    ifndef PTRSIZE
339 #      define PTRSIZE 4
340 #    endif
341 #    ifndef MEM_SIZE
342 #      define MEM_SIZE unsigned long
343 #    endif
344 #    ifndef LONG_MAX
345 #      define LONG_MAX 0x7FFFFFFF
346 #    endif
347 #    ifndef UV
348 #      define UV unsigned long
349 #    endif
350 #    ifndef IV
351 #      define IV long
352 #    endif
353 #    ifndef caddr_t
354 #      define caddr_t char *
355 #    endif
356 #    ifndef Free_t
357 #      define Free_t void
358 #    endif
359 #    define Copy(s,d,n,t) (void)memcpy((char*)(d),(char*)(s), (n) * sizeof(t))
360 #    define PerlEnv_getenv getenv
361 #    define PerlIO_printf fprintf
362 #    define PerlIO_stderr() stderr
363 #    define PerlIO_puts(f,s)		fputs(s,f)
364 #    ifndef INT2PTR
365 #      define INT2PTR(t,i)		((t)(i))
366 #    endif
367 #    ifndef PTR2UV
368 #      define PTR2UV(p)			((UV)(p))
369 #    endif
370 #    ifndef UVuf
371 #      define UVuf			"lu"
372 #    endif
373 #    ifndef UVxf
374 #      define UVxf			"lx"
375 #    endif
376 #    ifndef Nullch
377 #      define Nullch			NULL
378 #    endif
379 #    ifndef MEM_ALIGNBYTES
380 #      define MEM_ALIGNBYTES		4
381 #    endif
382 #  endif
383 #  ifndef croak				/* make depend */
384 #    define croak(mess, arg) (warn((mess), (arg)), exit(1))
385 #  endif
386 #  ifndef croak2			/* make depend */
387 #    define croak2(mess, arg1, arg2) (warn2((mess), (arg1), (arg2)), exit(1))
388 #  endif
389 #  ifndef warn
390 #    define warn(mess, arg) fprintf(stderr, (mess), (arg))
391 #  endif
392 #  ifndef warn2
393 #    define warn2(mess, arg1, arg2) fprintf(stderr, (mess), (arg1), (arg2))
394 #  endif
395 #  ifdef DEBUG_m
396 #    undef DEBUG_m
397 #  endif
398 #  define DEBUG_m(a)
399 #  ifdef DEBUGGING
400 #     undef DEBUGGING
401 #  endif
402 #  ifndef pTHX
403 #     define pTHX		void
404 #     define pTHX_
405 #     ifdef HASATTRIBUTE
406 #        define dTHX		extern int Perl___notused PERL_UNUSED_DECL
407 #     else
408 #        define dTHX            extern int Perl___notused
409 #     endif
410 #     define WITH_THX(s)	s
411 #  endif
412 #  ifndef PERL_GET_INTERP
413 #     define PERL_GET_INTERP	PL_curinterp
414 #  endif
415 #  define PERL_MAYBE_ALIVE	1
416 #  ifndef Perl_malloc
417 #     define Perl_malloc malloc
418 #  endif
419 #  ifndef Perl_mfree
420 #     define Perl_mfree free
421 #  endif
422 #  ifndef Perl_realloc
423 #     define Perl_realloc realloc
424 #  endif
425 #  ifndef Perl_calloc
426 #     define Perl_calloc calloc
427 #  endif
428 #  ifndef Perl_strdup
429 #     define Perl_strdup strdup
430 #  endif
431 #endif	/* defined PERL_CORE */
432 
433 #ifndef MUTEX_LOCK
434 #  define MUTEX_LOCK(l)
435 #endif
436 
437 #ifndef MUTEX_UNLOCK
438 #  define MUTEX_UNLOCK(l)
439 #endif
440 
441 #ifndef MALLOC_LOCK
442 #  define MALLOC_LOCK		MUTEX_LOCK(&PL_malloc_mutex)
443 #endif
444 
445 #ifndef MALLOC_UNLOCK
446 #  define MALLOC_UNLOCK		MUTEX_UNLOCK(&PL_malloc_mutex)
447 #endif
448 
449 #  ifndef fatalcroak				/* make depend */
450 #    define fatalcroak(mess)	(write(2, (mess), strlen(mess)), exit(2))
451 #  endif
452 
453 #ifdef DEBUGGING
454 #  undef DEBUG_m
455 #  define DEBUG_m(a) 							\
456     STMT_START {							\
457 	if (PERL_MAYBE_ALIVE && PERL_GET_THX) {						\
458 	    dTHX;							\
459 	    if (DEBUG_m_TEST) {						\
460 		PL_debug &= ~DEBUG_m_FLAG;				\
461 		a;							\
462 		PL_debug |= DEBUG_m_FLAG;				\
463 	    }								\
464 	}								\
465     } STMT_END
466 #endif
467 
468 #ifdef PERL_IMPLICIT_CONTEXT
469 #  define PERL_IS_ALIVE		aTHX
470 #else
471 #  define PERL_IS_ALIVE		TRUE
472 #endif
473 
474 
475 /*
476  * Layout of memory:
477  * ~~~~~~~~~~~~~~~~
478  * The memory is broken into "blocks" which occupy multiples of 2K (and
479  * generally speaking, have size "close" to a power of 2).  The addresses
480  * of such *unused* blocks are kept in nextf[i] with big enough i.  (nextf
481  * is an array of linked lists.)  (Addresses of used blocks are not known.)
482  *
483  * Moreover, since the algorithm may try to "bite" smaller blocks out
484  * of unused bigger ones, there are also regions of "irregular" size,
485  * managed separately, by a linked list chunk_chain.
486  *
487  * The third type of storage is the sbrk()ed-but-not-yet-used space, its
488  * end and size are kept in last_sbrk_top and sbrked_remains.
489  *
490  * Growing blocks "in place":
491  * ~~~~~~~~~~~~~~~~~~~~~~~~~
492  * The address of the block with the greatest address is kept in last_op
493  * (if not known, last_op is 0).  If it is known that the memory above
494  * last_op is not continuous, or contains a chunk from chunk_chain,
495  * last_op is set to 0.
496  *
497  * The chunk with address last_op may be grown by expanding into
498  * sbrk()ed-but-not-yet-used space, or trying to sbrk() more continuous
499  * memory.
500  *
501  * Management of last_op:
502  * ~~~~~~~~~~~~~~~~~~~~~
503  *
504  * free() never changes the boundaries of blocks, so is not relevant.
505  *
506  * The only way realloc() may change the boundaries of blocks is if it
507  * grows a block "in place".  However, in the case of success such a
508  * chunk is automatically last_op, and it remains last_op.  In the case
509  * of failure getpages_adjacent() clears last_op.
510  *
511  * malloc() may change blocks by calling morecore() only.
512  *
513  * morecore() may create new blocks by:
514  *   a) biting pieces from chunk_chain (cannot create one above last_op);
515  *   b) biting a piece from an unused block (if block was last_op, this
516  *      may create a chunk from chain above last_op, thus last_op is
517  *      invalidated in such a case).
518  *   c) biting of sbrk()ed-but-not-yet-used space.  This creates
519  *      a block which is last_op.
520  *   d) Allocating new pages by calling getpages();
521  *
522  * getpages() creates a new block.  It marks last_op at the bottom of
523  * the chunk of memory it returns.
524  *
525  * Active pages footprint:
526  * ~~~~~~~~~~~~~~~~~~~~~~
527  * Note that we do not need to traverse the lists in nextf[i], just take
528  * the first element of this list.  However, we *need* to traverse the
529  * list in chunk_chain, but most the time it should be a very short one,
530  * so we do not step on a lot of pages we are not going to use.
531  *
532  * Flaws:
533  * ~~~~~
534  * get_from_bigger_buckets(): forget to increment price => Quite
535  * aggressive.
536  */
537 
538 /* I don't much care whether these are defined in sys/types.h--LAW */
539 
540 #define u_char unsigned char
541 #define u_int unsigned int
542 /*
543  * I removed the definition of u_bigint which appeared to be u_bigint = UV
544  * u_bigint was only used in TWOK_MASKED and TWOK_SHIFT
545  * where I have used PTR2UV.  RMB
546  */
547 #define u_short unsigned short
548 
549 /* 286 and atarist like big chunks, which gives too much overhead. */
550 #if (defined(RCHECK) || defined(I286) || defined(atarist) || defined(__MINT__)) && defined(PACK_MALLOC)
551 #  undef PACK_MALLOC
552 #endif
553 
554 /*
555  * The description below is applicable if PACK_MALLOC is not defined.
556  *
557  * The overhead on a block is at least 4 bytes.  When free, this space
558  * contains a pointer to the next free block, and the bottom two bits must
559  * be zero.  When in use, the first byte is set to MAGIC, and the second
560  * byte is the size index.  The remaining bytes are for alignment.
561  * If range checking is enabled and the size of the block fits
562  * in two bytes, then the top two bytes hold the size of the requested block
563  * plus the range checking words, and the header word MINUS ONE.
564  */
565 union	overhead {
566 	union	overhead *ov_next;	/* when free */
567 #if MEM_ALIGNBYTES > 4
568 	double	strut;			/* alignment problems */
569 #  if MEM_ALIGNBYTES > 8
570 	char	sstrut[MEM_ALIGNBYTES]; /* for the sizing */
571 #  endif
572 #endif
573 	struct {
574 /*
575  * Keep the ovu_index and ovu_magic in this order, having a char
576  * field first gives alignment indigestion in some systems, such as
577  * MachTen.
578  */
579 		u_char	ovu_index;	/* bucket # */
580 		u_char	ovu_magic;	/* magic number */
581 #ifdef RCHECK
582 	    /* Subtract one to fit into u_short for an extra bucket */
583 		u_short	ovu_size;	/* block size (requested + overhead - 1) */
584 		u_int	ovu_rmagic;	/* range magic number */
585 #endif
586 	} ovu;
587 #define	ov_magic	ovu.ovu_magic
588 #define	ov_index	ovu.ovu_index
589 #define	ov_size		ovu.ovu_size
590 #define	ov_rmagic	ovu.ovu_rmagic
591 };
592 
593 #define	MAGIC		0xff		/* magic # on accounting info */
594 #define RMAGIC		0x55555555	/* magic # on range info */
595 #define RMAGIC_C	0x55		/* magic # on range info */
596 
597 #ifdef RCHECK
598 #  define	RMAGIC_SZ	sizeof (u_int) /* Overhead at end of bucket */
599 #  ifdef TWO_POT_OPTIMIZE
600 #    define MAX_SHORT_BUCKET (12 * BUCKETS_PER_POW2) /* size-1 fits in short */
601 #  else
602 #    define MAX_SHORT_BUCKET (13 * BUCKETS_PER_POW2)
603 #  endif
604 #else
605 #  define	RMAGIC_SZ	0
606 #endif
607 
608 #if !defined(PACK_MALLOC) && defined(BUCKETS_ROOT2)
609 #  undef BUCKETS_ROOT2
610 #endif
611 
612 #ifdef BUCKETS_ROOT2
613 #  define BUCKET_TABLE_SHIFT 2
614 #  define BUCKET_POW2_SHIFT 1
615 #  define BUCKETS_PER_POW2 2
616 #else
617 #  define BUCKET_TABLE_SHIFT MIN_BUC_POW2
618 #  define BUCKET_POW2_SHIFT 0
619 #  define BUCKETS_PER_POW2 1
620 #endif
621 
622 #if !defined(MEM_ALIGNBYTES) || ((MEM_ALIGNBYTES > 4) && !defined(STRICT_ALIGNMENT))
623 /* Figure out the alignment of void*. */
624 struct aligner {
625   char c;
626   void *p;
627 };
628 #  define ALIGN_SMALL ((int)((caddr_t)&(((struct aligner*)0)->p)))
629 #else
630 #  define ALIGN_SMALL MEM_ALIGNBYTES
631 #endif
632 
633 #define IF_ALIGN_8(yes,no)	((ALIGN_SMALL>4) ? (yes) : (no))
634 
635 #ifdef BUCKETS_ROOT2
636 #  define MAX_BUCKET_BY_TABLE 13
637 static u_short buck_size[MAX_BUCKET_BY_TABLE + 1] =
638   {
639       0, 0, 0, 0, 4, 4, 8, 12, 16, 24, 32, 48, 64, 80,
640   };
641 #  define BUCKET_SIZE_NO_SURPLUS(i) ((i) % 2 ? buck_size[i] : (1 << ((i) >> BUCKET_POW2_SHIFT)))
642 #  define BUCKET_SIZE_REAL(i) ((i) <= MAX_BUCKET_BY_TABLE		\
643 			       ? buck_size[i] 				\
644 			       : ((1 << ((i) >> BUCKET_POW2_SHIFT))	\
645 				  - MEM_OVERHEAD(i)			\
646 				  + POW2_OPTIMIZE_SURPLUS(i)))
647 #else
648 #  define BUCKET_SIZE_NO_SURPLUS(i) (1 << ((i) >> BUCKET_POW2_SHIFT))
649 #  define BUCKET_SIZE(i) (BUCKET_SIZE_NO_SURPLUS(i) + POW2_OPTIMIZE_SURPLUS(i))
650 #  define BUCKET_SIZE_REAL(i) (BUCKET_SIZE(i) - MEM_OVERHEAD(i))
651 #endif
652 
653 
654 #ifdef PACK_MALLOC
655 /* In this case there are several possible layout of arenas depending
656  * on the size.  Arenas are of sizes multiple to 2K, 2K-aligned, and
657  * have a size close to a power of 2.
658  *
659  * Arenas of the size >= 4K keep one chunk only.  Arenas of size 2K
660  * may keep one chunk or multiple chunks.  Here are the possible
661  * layouts of arenas:
662  *
663  *	# One chunk only, chunksize 2^k + SOMETHING - ALIGN, k >= 11
664  *
665  * INDEX MAGIC1 UNUSED CHUNK1
666  *
667  *	# Multichunk with sanity checking and chunksize 2^k-ALIGN, k>7
668  *
669  * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 CHUNK2 CHUNK3 ...
670  *
671  *	# Multichunk with sanity checking and size 2^k-ALIGN, k=7
672  *
673  * INDEX MAGIC1 MAGIC2 MAGIC3 UNUSED CHUNK1 UNUSED CHUNK2 CHUNK3 ...
674  *
675  *	# Multichunk with sanity checking and size up to 80
676  *
677  * INDEX UNUSED MAGIC1 UNUSED MAGIC2 UNUSED ... CHUNK1 CHUNK2 CHUNK3 ...
678  *
679  *	# No sanity check (usually up to 48=byte-long buckets)
680  * INDEX UNUSED CHUNK1 CHUNK2 ...
681  *
682  * Above INDEX and MAGIC are one-byte-long.  Sizes of UNUSED are
683  * appropriate to keep algorithms simple and memory aligned.  INDEX
684  * encodes the size of the chunk, while MAGICn encodes state (used,
685  * free or non-managed-by-us-so-it-indicates-a-bug) of CHUNKn.  MAGIC
686  * is used for sanity checking purposes only.  SOMETHING is 0 or 4K
687  * (to make size of big CHUNK accomodate allocations for powers of two
688  * better).
689  *
690  * [There is no need to alignment between chunks, since C rules ensure
691  *  that structs which need 2^k alignment have sizeof which is
692  *  divisible by 2^k.  Thus as far as the last chunk is aligned at the
693  *  end of the arena, and 2K-alignment does not contradict things,
694  *  everything is going to be OK for sizes of chunks 2^n and 2^n +
695  *  2^k.  Say, 80-bit buckets will be 16-bit aligned, and as far as we
696  *  put allocations for requests in 65..80 range, all is fine.
697  *
698  *  Note, however, that standard malloc() puts more strict
699  *  requirements than the above C rules.  Moreover, our algorithms of
700  *  realloc() may break this idyll, but we suppose that realloc() does
701  *  need not change alignment.]
702  *
703  * Is very important to make calculation of the offset of MAGICm as
704  * quick as possible, since it is done on each malloc()/free().  In
705  * fact it is so quick that it has quite little effect on the speed of
706  * doing malloc()/free().  [By default] We forego such calculations
707  * for small chunks, but only to save extra 3% of memory, not because
708  * of speed considerations.
709  *
710  * Here is the algorithm [which is the same for all the allocations
711  * schemes above], see OV_MAGIC(block,bucket).  Let OFFSETm be the
712  * offset of the CHUNKm from the start of ARENA.  Then offset of
713  * MAGICm is (OFFSET1 >> SHIFT) + ADDOFFSET.  Here SHIFT and ADDOFFSET
714  * are numbers which depend on the size of the chunks only.
715  *
716  * Let as check some sanity conditions.  Numbers OFFSETm>>SHIFT are
717  * different for all the chunks in the arena if 2^SHIFT is not greater
718  * than size of the chunks in the arena.  MAGIC1 will not overwrite
719  * INDEX provided ADDOFFSET is >0 if OFFSET1 < 2^SHIFT.  MAGIClast
720  * will not overwrite CHUNK1 if OFFSET1 > (OFFSETlast >> SHIFT) +
721  * ADDOFFSET.
722  *
723  * Make SHIFT the maximal possible (there is no point in making it
724  * smaller).  Since OFFSETlast is 2K - CHUNKSIZE, above restrictions
725  * give restrictions on OFFSET1 and on ADDOFFSET.
726  *
727  * In particular, for chunks of size 2^k with k>=6 we can put
728  * ADDOFFSET to be from 0 to 2^k - 2^(11-k), and have
729  * OFFSET1==chunksize.  For chunks of size 80 OFFSET1 of 2K%80=48 is
730  * large enough to have ADDOFFSET between 1 and 16 (similarly for 96,
731  * when ADDOFFSET should be 1).  In particular, keeping MAGICs for
732  * these sizes gives no additional size penalty.
733  *
734  * However, for chunks of size 2^k with k<=5 this gives OFFSET1 >=
735  * ADDOFSET + 2^(11-k).  Keeping ADDOFFSET 0 allows for 2^(11-k)-2^(11-2k)
736  * chunks per arena.  This is smaller than 2^(11-k) - 1 which are
737  * needed if no MAGIC is kept.  [In fact, having a negative ADDOFFSET
738  * would allow for slightly more buckets per arena for k=2,3.]
739  *
740  * Similarly, for chunks of size 3/2*2^k with k<=5 MAGICs would span
741  * the area up to 2^(11-k)+ADDOFFSET.  For k=4 this give optimal
742  * ADDOFFSET as -7..0.  For k=3 ADDOFFSET can go up to 4 (with tiny
743  * savings for negative ADDOFFSET).  For k=5 ADDOFFSET can go -1..16
744  * (with no savings for negative values).
745  *
746  * In particular, keeping ADDOFFSET 0 for sizes of chunks up to 2^6
747  * leads to tiny pessimizations in case of sizes 4, 8, 12, 24, and
748  * leads to no contradictions except for size=80 (or 96.)
749  *
750  * However, it also makes sense to keep no magic for sizes 48 or less.
751  * This is what we do.  In this case one needs ADDOFFSET>=1 also for
752  * chunksizes 12, 24, and 48, unless one gets one less chunk per
753  * arena.
754  *
755  * The algo of OV_MAGIC(block,bucket) keeps ADDOFFSET 0 until
756  * chunksize of 64, then makes it 1.
757  *
758  * This allows for an additional optimization: the above scheme leads
759  * to giant overheads for sizes 128 or more (one whole chunk needs to
760  * be sacrifised to keep INDEX).  Instead we use chunks not of size
761  * 2^k, but of size 2^k-ALIGN.  If we pack these chunks at the end of
762  * the arena, then the beginnings are still in different 2^k-long
763  * sections of the arena if k>=7 for ALIGN==4, and k>=8 if ALIGN=8.
764  * Thus for k>7 the above algo of calculating the offset of the magic
765  * will still give different answers for different chunks.  And to
766  * avoid the overrun of MAGIC1 into INDEX, one needs ADDOFFSET of >=1.
767  * In the case k=7 we just move the first chunk an extra ALIGN
768  * backward inside the ARENA (this is done once per arena lifetime,
769  * thus is not a big overhead).  */
770 #  define MAX_PACKED_POW2 6
771 #  define MAX_PACKED (MAX_PACKED_POW2 * BUCKETS_PER_POW2 + BUCKET_POW2_SHIFT)
772 #  define MAX_POW2_ALGO ((1<<(MAX_PACKED_POW2 + 1)) - M_OVERHEAD)
773 #  define TWOK_MASK ((1<<LOG_OF_MIN_ARENA) - 1)
774 #  define TWOK_MASKED(x) (PTR2UV(x) & ~TWOK_MASK)
775 #  define TWOK_SHIFT(x) (PTR2UV(x) & TWOK_MASK)
776 #  define OV_INDEXp(block) (INT2PTR(u_char*,TWOK_MASKED(block)))
777 #  define OV_INDEX(block) (*OV_INDEXp(block))
778 #  define OV_MAGIC(block,bucket) (*(OV_INDEXp(block) +			\
779 				    (TWOK_SHIFT(block)>>		\
780 				     (bucket>>BUCKET_POW2_SHIFT)) +	\
781 				    (bucket >= MIN_NEEDS_SHIFT ? 1 : 0)))
782     /* A bucket can have a shift smaller than it size, we need to
783        shift its magic number so it will not overwrite index: */
784 #  ifdef BUCKETS_ROOT2
785 #    define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2 - 1) /* Shift 80 greater than chunk 64. */
786 #  else
787 #    define MIN_NEEDS_SHIFT (7*BUCKETS_PER_POW2) /* Shift 128 greater than chunk 32. */
788 #  endif
789 #  define CHUNK_SHIFT 0
790 
791 /* Number of active buckets of given ordinal. */
792 #ifdef IGNORE_SMALL_BAD_FREE
793 #define FIRST_BUCKET_WITH_CHECK (6 * BUCKETS_PER_POW2) /* 64 */
794 #  define N_BLKS(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK 		\
795 			 ? ((1<<LOG_OF_MIN_ARENA) - 1)/BUCKET_SIZE_NO_SURPLUS(bucket) \
796 			 : n_blks[bucket] )
797 #else
798 #  define N_BLKS(bucket) n_blks[bucket]
799 #endif
800 
801 static u_short n_blks[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
802   {
803 #  if BUCKETS_PER_POW2==1
804       0, 0,
805       (MIN_BUC_POW2==2 ? 384 : 0),
806       224, 120, 62, 31, 16, 8, 4, 2
807 #  else
808       0, 0, 0, 0,
809       (MIN_BUC_POW2==2 ? 384 : 0), (MIN_BUC_POW2==2 ? 384 : 0),	/* 4, 4 */
810       224, 149, 120, 80, 62, 41, 31, 25, 16, 16, 8, 8, 4, 4, 2, 2
811 #  endif
812   };
813 
814 /* Shift of the first bucket with the given ordinal inside 2K chunk. */
815 #ifdef IGNORE_SMALL_BAD_FREE
816 #  define BLK_SHIFT(bucket) ( (bucket) < FIRST_BUCKET_WITH_CHECK 	\
817 			      ? ((1<<LOG_OF_MIN_ARENA)			\
818 				 - BUCKET_SIZE_NO_SURPLUS(bucket) * N_BLKS(bucket)) \
819 			      : blk_shift[bucket])
820 #else
821 #  define BLK_SHIFT(bucket) blk_shift[bucket]
822 #endif
823 
824 static u_short blk_shift[LOG_OF_MIN_ARENA * BUCKETS_PER_POW2] =
825   {
826 #  if BUCKETS_PER_POW2==1
827       0, 0,
828       (MIN_BUC_POW2==2 ? 512 : 0),
829       256, 128, 64, 64,			/* 8 to 64 */
830       16*sizeof(union overhead),
831       8*sizeof(union overhead),
832       4*sizeof(union overhead),
833       2*sizeof(union overhead),
834 #  else
835       0, 0, 0, 0,
836       (MIN_BUC_POW2==2 ? 512 : 0), (MIN_BUC_POW2==2 ? 512 : 0),
837       256, 260, 128, 128, 64, 80, 64, 48, /* 8 to 96 */
838       16*sizeof(union overhead), 16*sizeof(union overhead),
839       8*sizeof(union overhead), 8*sizeof(union overhead),
840       4*sizeof(union overhead), 4*sizeof(union overhead),
841       2*sizeof(union overhead), 2*sizeof(union overhead),
842 #  endif
843   };
844 
845 #  define NEEDED_ALIGNMENT 0x800	/* 2k boundaries */
846 #  define WANTED_ALIGNMENT 0x800	/* 2k boundaries */
847 
848 #else  /* !PACK_MALLOC */
849 
850 #  define OV_MAGIC(block,bucket) (block)->ov_magic
851 #  define OV_INDEX(block) (block)->ov_index
852 #  define CHUNK_SHIFT 1
853 #  define MAX_PACKED -1
854 #  define NEEDED_ALIGNMENT MEM_ALIGNBYTES
855 #  define WANTED_ALIGNMENT 0x400	/* 1k boundaries */
856 
857 #endif /* !PACK_MALLOC */
858 
859 #define M_OVERHEAD (sizeof(union overhead) + RMAGIC_SZ) /* overhead at start+end */
860 
861 #ifdef PACK_MALLOC
862 #  define MEM_OVERHEAD(bucket) \
863   (bucket <= MAX_PACKED ? 0 : M_OVERHEAD)
864 #  ifdef SMALL_BUCKET_VIA_TABLE
865 #    define START_SHIFTS_BUCKET ((MAX_PACKED_POW2 + 1) * BUCKETS_PER_POW2)
866 #    define START_SHIFT MAX_PACKED_POW2
867 #    ifdef BUCKETS_ROOT2		/* Chunks of size 3*2^n. */
868 #      define SIZE_TABLE_MAX 80
869 #    else
870 #      define SIZE_TABLE_MAX 64
871 #    endif
872 static char bucket_of[] =
873   {
874 #    ifdef BUCKETS_ROOT2		/* Chunks of size 3*2^n. */
875       /* 0 to 15 in 4-byte increments. */
876       (sizeof(void*) > 4 ? 6 : 5),	/* 4/8, 5-th bucket for better reports */
877       6,				/* 8 */
878       IF_ALIGN_8(8,7), 8,		/* 16/12, 16 */
879       9, 9, 10, 10,			/* 24, 32 */
880       11, 11, 11, 11,			/* 48 */
881       12, 12, 12, 12,			/* 64 */
882       13, 13, 13, 13,			/* 80 */
883       13, 13, 13, 13			/* 80 */
884 #    else /* !BUCKETS_ROOT2 */
885       /* 0 to 15 in 4-byte increments. */
886       (sizeof(void*) > 4 ? 3 : 2),
887       3,
888       4, 4,
889       5, 5, 5, 5,
890       6, 6, 6, 6,
891       6, 6, 6, 6
892 #    endif /* !BUCKETS_ROOT2 */
893   };
894 #  else  /* !SMALL_BUCKET_VIA_TABLE */
895 #    define START_SHIFTS_BUCKET MIN_BUCKET
896 #    define START_SHIFT (MIN_BUC_POW2 - 1)
897 #  endif /* !SMALL_BUCKET_VIA_TABLE */
898 #else  /* !PACK_MALLOC */
899 #  define MEM_OVERHEAD(bucket) M_OVERHEAD
900 #  ifdef SMALL_BUCKET_VIA_TABLE
901 #    undef SMALL_BUCKET_VIA_TABLE
902 #  endif
903 #  define START_SHIFTS_BUCKET MIN_BUCKET
904 #  define START_SHIFT (MIN_BUC_POW2 - 1)
905 #endif /* !PACK_MALLOC */
906 
907 /*
908  * Big allocations are often of the size 2^n bytes. To make them a
909  * little bit better, make blocks of size 2^n+pagesize for big n.
910  */
911 
912 #ifdef TWO_POT_OPTIMIZE
913 
914 #  ifndef PERL_PAGESIZE
915 #    define PERL_PAGESIZE 4096
916 #  endif
917 #  ifndef FIRST_BIG_POW2
918 #    define FIRST_BIG_POW2 15	/* 32K, 16K is used too often. */
919 #  endif
920 #  define FIRST_BIG_BLOCK (1<<FIRST_BIG_POW2)
921 /* If this value or more, check against bigger blocks. */
922 #  define FIRST_BIG_BOUND (FIRST_BIG_BLOCK - M_OVERHEAD)
923 /* If less than this value, goes into 2^n-overhead-block. */
924 #  define LAST_SMALL_BOUND ((FIRST_BIG_BLOCK>>1) - M_OVERHEAD)
925 
926 #  define POW2_OPTIMIZE_ADJUST(nbytes)				\
927    ((nbytes >= FIRST_BIG_BOUND) ? nbytes -= PERL_PAGESIZE : 0)
928 #  define POW2_OPTIMIZE_SURPLUS(bucket)				\
929    ((bucket >= FIRST_BIG_POW2 * BUCKETS_PER_POW2) ? PERL_PAGESIZE : 0)
930 
931 #else  /* !TWO_POT_OPTIMIZE */
932 #  define POW2_OPTIMIZE_ADJUST(nbytes)
933 #  define POW2_OPTIMIZE_SURPLUS(bucket) 0
934 #endif /* !TWO_POT_OPTIMIZE */
935 
936 #if defined(HAS_64K_LIMIT) && defined(PERL_CORE)
937 #  define BARK_64K_LIMIT(what,nbytes,size)				\
938 	if (nbytes > 0xffff) {						\
939 		PerlIO_printf(PerlIO_stderr(),				\
940 			      "%s too large: %lx\n", what, size);	\
941 		my_exit(1);						\
942 	}
943 #else /* !HAS_64K_LIMIT || !PERL_CORE */
944 #  define BARK_64K_LIMIT(what,nbytes,size)
945 #endif /* !HAS_64K_LIMIT || !PERL_CORE */
946 
947 #ifndef MIN_SBRK
948 #  define MIN_SBRK 2048
949 #endif
950 
951 #ifndef FIRST_SBRK
952 #  define FIRST_SBRK (48*1024)
953 #endif
954 
955 /* Minimal sbrk in percents of what is already alloced. */
956 #ifndef MIN_SBRK_FRAC
957 #  define MIN_SBRK_FRAC 3
958 #endif
959 
960 #ifndef SBRK_ALLOW_FAILURES
961 #  define SBRK_ALLOW_FAILURES 3
962 #endif
963 
964 #ifndef SBRK_FAILURE_PRICE
965 #  define SBRK_FAILURE_PRICE 50
966 #endif
967 
968 static void	morecore	(register int bucket);
969 #  if defined(DEBUGGING)
970 static void	botch		(char *diag, char *s, char *file, int line);
971 #  endif
972 static void	add_to_chain	(void *p, MEM_SIZE size, MEM_SIZE chip);
973 static void*	get_from_chain	(MEM_SIZE size);
974 static void*	get_from_bigger_buckets(int bucket, MEM_SIZE size);
975 static union overhead *getpages	(MEM_SIZE needed, int *nblksp, int bucket);
976 static int	getpages_adjacent(MEM_SIZE require);
977 
978 #ifdef PERL_CORE
979 
980 #ifdef I_MACH_CTHREADS
981 #  undef  MUTEX_LOCK
982 #  define MUTEX_LOCK(m)   STMT_START { if (*m) mutex_lock(*m);   } STMT_END
983 #  undef  MUTEX_UNLOCK
984 #  define MUTEX_UNLOCK(m) STMT_START { if (*m) mutex_unlock(*m); } STMT_END
985 #endif
986 
987 #endif	/* defined PERL_CORE */
988 
989 #ifndef PTRSIZE
990 #  define PTRSIZE	sizeof(void*)
991 #endif
992 
993 #ifndef BITS_IN_PTR
994 #  define BITS_IN_PTR (8*PTRSIZE)
995 #endif
996 
997 /*
998  * nextf[i] is the pointer to the next free block of size 2^i.  The
999  * smallest allocatable block is 8 bytes.  The overhead information
1000  * precedes the data area returned to the user.
1001  */
1002 #define	NBUCKETS (BITS_IN_PTR*BUCKETS_PER_POW2 + 1)
1003 static	union overhead *nextf[NBUCKETS];
1004 
1005 #if defined(PURIFY) && !defined(USE_PERL_SBRK)
1006 #  define USE_PERL_SBRK
1007 #endif
1008 
1009 #ifdef USE_PERL_SBRK
1010 # define sbrk(a) Perl_sbrk(a)
1011 Malloc_t Perl_sbrk (int size);
1012 #else
1013 # ifndef HAS_SBRK_PROTO /* <unistd.h> usually takes care of this */
1014 extern	Malloc_t sbrk(int);
1015 # endif
1016 #endif
1017 
1018 #ifndef MIN_SBRK_FRAC1000	/* Backward compatibility */
1019 #  define MIN_SBRK_FRAC1000	(MIN_SBRK_FRAC * 10)
1020 #endif
1021 
1022 #ifndef START_EXTERN_C
1023 #  ifdef __cplusplus
1024 #    define START_EXTERN_C	extern "C" {
1025 #  else
1026 #    define START_EXTERN_C
1027 #  endif
1028 #endif
1029 
1030 #ifndef END_EXTERN_C
1031 #  ifdef __cplusplus
1032 #    define END_EXTERN_C		};
1033 #  else
1034 #    define END_EXTERN_C
1035 #  endif
1036 #endif
1037 
1038 #include "malloc_ctl.h"
1039 
1040 #ifndef NO_MALLOC_DYNAMIC_CFG
1041 #  define PERL_MALLOC_OPT_CHARS "FMfAPGdac"
1042 
1043 #  ifndef FILL_DEAD_DEFAULT
1044 #    define FILL_DEAD_DEFAULT	1
1045 #  endif
1046 #  ifndef FILL_ALIVE_DEFAULT
1047 #    define FILL_ALIVE_DEFAULT	1
1048 #  endif
1049 #  ifndef FILL_CHECK_DEFAULT
1050 #    define FILL_CHECK_DEFAULT	1
1051 #  endif
1052 
1053 static IV MallocCfg[MallocCfg_last] = {
1054   FIRST_SBRK,
1055   MIN_SBRK,
1056   MIN_SBRK_FRAC,
1057   SBRK_ALLOW_FAILURES,
1058   SBRK_FAILURE_PRICE,
1059   SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE,	/* sbrk_goodness */
1060   FILL_DEAD_DEFAULT,	/* FILL_DEAD */
1061   FILL_ALIVE_DEFAULT,	/* FILL_ALIVE */
1062   FILL_CHECK_DEFAULT,	/* FILL_CHECK */
1063   0,			/* MallocCfg_skip_cfg_env */
1064   0,			/* MallocCfg_cfg_env_read */
1065   0,			/* MallocCfg_emergency_buffer_size */
1066   0,			/* MallocCfg_emergency_buffer_prepared_size */
1067   0			/* MallocCfg_emergency_buffer_last_req */
1068 };
1069 IV *MallocCfg_ptr = MallocCfg;
1070 
1071 static char* MallocCfgP[MallocCfg_last] = {
1072   0,			/* MallocCfgP_emergency_buffer */
1073   0,			/* MallocCfgP_emergency_buffer_prepared */
1074 };
1075 char **MallocCfgP_ptr = MallocCfgP;
1076 
1077 #  undef MIN_SBRK
1078 #  undef FIRST_SBRK
1079 #  undef MIN_SBRK_FRAC1000
1080 #  undef SBRK_ALLOW_FAILURES
1081 #  undef SBRK_FAILURE_PRICE
1082 
1083 #  define MIN_SBRK		MallocCfg[MallocCfg_MIN_SBRK]
1084 #  define FIRST_SBRK		MallocCfg[MallocCfg_FIRST_SBRK]
1085 #  define MIN_SBRK_FRAC1000	MallocCfg[MallocCfg_MIN_SBRK_FRAC1000]
1086 #  define SBRK_ALLOW_FAILURES	MallocCfg[MallocCfg_SBRK_ALLOW_FAILURES]
1087 #  define SBRK_FAILURE_PRICE	MallocCfg[MallocCfg_SBRK_FAILURE_PRICE]
1088 
1089 #  define sbrk_goodness		MallocCfg[MallocCfg_sbrk_goodness]
1090 
1091 #  define emergency_buffer_size	MallocCfg[MallocCfg_emergency_buffer_size]
1092 #  define emergency_buffer_last_req	MallocCfg[MallocCfg_emergency_buffer_last_req]
1093 
1094 #  define FILL_DEAD		MallocCfg[MallocCfg_filldead]
1095 #  define FILL_ALIVE		MallocCfg[MallocCfg_fillalive]
1096 #  define FILL_CHECK_CFG	MallocCfg[MallocCfg_fillcheck]
1097 #  define FILL_CHECK		(FILL_DEAD && FILL_CHECK_CFG)
1098 
1099 #  define emergency_buffer	MallocCfgP[MallocCfgP_emergency_buffer]
1100 #  define emergency_buffer_prepared	MallocCfgP[MallocCfgP_emergency_buffer_prepared]
1101 
1102 #else	/* defined(NO_MALLOC_DYNAMIC_CFG) */
1103 
1104 #  define FILL_DEAD	1
1105 #  define FILL_ALIVE	1
1106 #  define FILL_CHECK	1
1107 static int sbrk_goodness = SBRK_ALLOW_FAILURES * SBRK_FAILURE_PRICE;
1108 
1109 #  define NO_PERL_MALLOC_ENV
1110 
1111 #endif
1112 
1113 #ifdef DEBUGGING_MSTATS
1114 /*
1115  * nmalloc[i] is the difference between the number of mallocs and frees
1116  * for a given block size.
1117  */
1118 static	u_int nmalloc[NBUCKETS];
1119 static  u_int sbrk_slack;
1120 static  u_int start_slack;
1121 #else	/* !( defined DEBUGGING_MSTATS ) */
1122 #  define sbrk_slack	0
1123 #endif
1124 
1125 static	u_int goodsbrk;
1126 
1127 #ifdef PERL_EMERGENCY_SBRK
1128 
1129 #  ifndef BIG_SIZE
1130 #    define BIG_SIZE (1<<16)		/* 64K */
1131 #  endif
1132 
1133 #  ifdef NO_MALLOC_DYNAMIC_CFG
1134 static MEM_SIZE emergency_buffer_size;
1135 	/* 0 if the last request for more memory succeeded.
1136 	   Otherwise the size of the failing request. */
1137 static MEM_SIZE emergency_buffer_last_req;
1138 static char *emergency_buffer;
1139 static char *emergency_buffer_prepared;
1140 #  endif
1141 
1142 #  ifndef emergency_sbrk_croak
1143 #    define emergency_sbrk_croak	croak2
1144 #  endif
1145 
1146 #  ifdef PERL_CORE
1147 static char *
1148 perl_get_emergency_buffer(IV *size)
1149 {
1150     dTHX;
1151     /* First offense, give a possibility to recover by dieing. */
1152     /* No malloc involved here: */
1153     GV **gvp = (GV**)hv_fetch(PL_defstash, "^M", 2, 0);
1154     SV *sv;
1155     char *pv;
1156     STRLEN n_a;
1157 
1158     if (!gvp) gvp = (GV**)hv_fetch(PL_defstash, "\015", 1, 0);
1159     if (!gvp || !(sv = GvSV(*gvp)) || !SvPOK(sv)
1160         || (SvLEN(sv) < (1<<LOG_OF_MIN_ARENA) - M_OVERHEAD))
1161         return NULL;		/* Now die die die... */
1162     /* Got it, now detach SvPV: */
1163     pv = SvPV(sv, n_a);
1164     /* Check alignment: */
1165     if ((PTR2UV(pv) - sizeof(union overhead)) & (NEEDED_ALIGNMENT - 1)) {
1166         PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
1167         return NULL;		/* die die die */
1168     }
1169 
1170     SvPOK_off(sv);
1171     SvPVX(sv) = Nullch;
1172     SvCUR(sv) = SvLEN(sv) = 0;
1173     *size = malloced_size(pv) + M_OVERHEAD;
1174     return pv - sizeof(union overhead);
1175 }
1176 #    define PERL_GET_EMERGENCY_BUFFER(p)	perl_get_emergency_buffer(p)
1177 #  else
1178 #    define PERL_GET_EMERGENCY_BUFFER(p)	NULL
1179 #  endif	/* defined PERL_CORE */
1180 
1181 #  ifndef NO_MALLOC_DYNAMIC_CFG
1182 static char *
1183 get_emergency_buffer(IV *size)
1184 {
1185     char *pv = emergency_buffer_prepared;
1186 
1187     *size = MallocCfg[MallocCfg_emergency_buffer_prepared_size];
1188     emergency_buffer_prepared = 0;
1189     MallocCfg[MallocCfg_emergency_buffer_prepared_size] = 0;
1190     return pv;
1191 }
1192 
1193 /* Returns 0 on success, -1 on bad alignment, -2 if not implemented */
1194 int
1195 set_emergency_buffer(char *b, IV size)
1196 {
1197     if (PTR2UV(b) & (NEEDED_ALIGNMENT - 1))
1198 	return -1;
1199     if (MallocCfg[MallocCfg_emergency_buffer_prepared_size])
1200 	add_to_chain((void*)emergency_buffer_prepared,
1201 		     MallocCfg[MallocCfg_emergency_buffer_prepared_size], 0);
1202     emergency_buffer_prepared = b;
1203     MallocCfg[MallocCfg_emergency_buffer_prepared_size] = size;
1204     return 0;
1205 }
1206 #    define GET_EMERGENCY_BUFFER(p)	get_emergency_buffer(p)
1207 #  else		/* NO_MALLOC_DYNAMIC_CFG */
1208 #    define GET_EMERGENCY_BUFFER(p)	NULL
1209 int
1210 set_emergency_buffer(char *b, IV size)
1211 {
1212     return -1;
1213 }
1214 #  endif
1215 
1216 static Malloc_t
1217 emergency_sbrk(MEM_SIZE size)
1218 {
1219     MEM_SIZE rsize = (((size - 1)>>LOG_OF_MIN_ARENA) + 1)<<LOG_OF_MIN_ARENA;
1220 
1221     if (size >= BIG_SIZE
1222 	&& (!emergency_buffer_last_req || (size < emergency_buffer_last_req))) {
1223 	/* Give the possibility to recover, but avoid an infinite cycle. */
1224 	MALLOC_UNLOCK;
1225 	emergency_buffer_last_req = size;
1226 	emergency_sbrk_croak("Out of memory during \"large\" request for %"UVuf" bytes, total sbrk() is %"UVuf" bytes", (UV)size, (UV)(goodsbrk + sbrk_slack));
1227     }
1228 
1229     if (emergency_buffer_size >= rsize) {
1230 	char *old = emergency_buffer;
1231 
1232 	emergency_buffer_size -= rsize;
1233 	emergency_buffer += rsize;
1234 	return old;
1235     } else {
1236 	/* First offense, give a possibility to recover by dieing. */
1237 	/* No malloc involved here: */
1238 	IV Size;
1239 	char *pv = GET_EMERGENCY_BUFFER(&Size);
1240 	int have = 0;
1241 
1242 	if (emergency_buffer_size) {
1243 	    add_to_chain(emergency_buffer, emergency_buffer_size, 0);
1244 	    emergency_buffer_size = 0;
1245 	    emergency_buffer = Nullch;
1246 	    have = 1;
1247 	}
1248 
1249 	if (!pv)
1250 	    pv = PERL_GET_EMERGENCY_BUFFER(&Size);
1251 	if (!pv) {
1252 	    if (have)
1253 		goto do_croak;
1254 	    return (char *)-1;		/* Now die die die... */
1255 	}
1256 
1257 	/* Check alignment: */
1258 	if (PTR2UV(pv) & (NEEDED_ALIGNMENT - 1)) {
1259 	    dTHX;
1260 
1261 	    PerlIO_puts(PerlIO_stderr(),"Bad alignment of $^M!\n");
1262 	    return (char *)-1;		/* die die die */
1263 	}
1264 
1265 	emergency_buffer = pv;
1266 	emergency_buffer_size = Size;
1267     }
1268   do_croak:
1269     MALLOC_UNLOCK;
1270     emergency_sbrk_croak("Out of memory during request for %"UVuf" bytes, total sbrk() is %"UVuf" bytes", (UV)size, (UV)(goodsbrk + sbrk_slack));
1271     /* NOTREACHED */
1272     return Nullch;
1273 }
1274 
1275 #else /*  !defined(PERL_EMERGENCY_SBRK) */
1276 #  define emergency_sbrk(size)	-1
1277 #endif	/* defined PERL_EMERGENCY_SBRK */
1278 
1279 static void
1280 write2(char *mess)
1281 {
1282   write(2, mess, strlen(mess));
1283 }
1284 
1285 #ifdef DEBUGGING
1286 #undef ASSERT
1287 #define	ASSERT(p,diag)   if (!(p)) botch(diag,STRINGIFY(p),__FILE__,__LINE__);  else
1288 static void
1289 botch(char *diag, char *s, char *file, int line)
1290 {
1291     if (!(PERL_MAYBE_ALIVE && PERL_GET_THX))
1292 	goto do_write;
1293     else {
1294 	dTHX;
1295 	if (PerlIO_printf(PerlIO_stderr(),
1296 			  "assertion botched (%s?): %s %s:%d\n",
1297 			  diag, s, file, line) != 0) {
1298 	 do_write:		/* Can be initializing interpreter */
1299 	    write2("assertion botched (");
1300 	    write2(diag);
1301 	    write2("?): ");
1302 	    write2(s);
1303 	    write2(" (");
1304 	    write2(file);
1305 	    write2(":");
1306 	    {
1307 	      char linebuf[10];
1308 	      char *s = linebuf + sizeof(linebuf) - 1;
1309 	      int n = line;
1310 	      *s = 0;
1311 	      do {
1312 		*--s = '0' + (n % 10);
1313 	      } while (n /= 10);
1314 	      write2(s);
1315 	    }
1316 	    write2(")\n");
1317 	}
1318 	PerlProc_abort();
1319     }
1320 }
1321 #else
1322 #define	ASSERT(p, diag)
1323 #endif
1324 
1325 #ifdef MALLOC_FILL
1326 /* Fill should be long enough to cover long */
1327 static void
1328 fill_pat_4bytes(unsigned char *s, size_t nbytes, const unsigned char *fill)
1329 {
1330     unsigned char *e = s + nbytes;
1331     long *lp;
1332     long lfill = *(long*)fill;
1333 
1334     if (PTR2UV(s) & (sizeof(long)-1)) {		/* Align the pattern */
1335 	int shift = sizeof(long) - (PTR2UV(s) & (sizeof(long)-1));
1336 	unsigned const char *f = fill + sizeof(long) - shift;
1337 	unsigned char *e1 = s + shift;
1338 
1339 	while (s < e1)
1340 	    *s++ = *f++;
1341     }
1342     lp = (long*)s;
1343     while ((unsigned char*)(lp + 1) <= e)
1344 	*lp++ = lfill;
1345     s = (unsigned char*)lp;
1346     while (s < e)
1347 	*s++ = *fill++;
1348 }
1349 /* Just malloc()ed */
1350 static const unsigned char fill_feedadad[] =
1351  {0xFE, 0xED, 0xAD, 0xAD, 0xFE, 0xED, 0xAD, 0xAD,
1352   0xFE, 0xED, 0xAD, 0xAD, 0xFE, 0xED, 0xAD, 0xAD};
1353 /* Just free()ed */
1354 static const unsigned char fill_deadbeef[] =
1355  {0xDE, 0xAD, 0xBE, 0xEF, 0xDE, 0xAD, 0xBE, 0xEF,
1356   0xDE, 0xAD, 0xBE, 0xEF, 0xDE, 0xAD, 0xBE, 0xEF};
1357 #  define FILL_DEADBEEF(s, n)	\
1358 	(void)(FILL_DEAD?  (fill_pat_4bytes((s), (n), fill_deadbeef), 0) : 0)
1359 #  define FILL_FEEDADAD(s, n)	\
1360 	(void)(FILL_ALIVE? (fill_pat_4bytes((s), (n), fill_feedadad), 0) : 0)
1361 #else
1362 #  define FILL_DEADBEEF(s, n)	((void)0)
1363 #  define FILL_FEEDADAD(s, n)	((void)0)
1364 #  undef MALLOC_FILL_CHECK
1365 #endif
1366 
1367 #ifdef MALLOC_FILL_CHECK
1368 static int
1369 cmp_pat_4bytes(unsigned char *s, size_t nbytes, const unsigned char *fill)
1370 {
1371     unsigned char *e = s + nbytes;
1372     long *lp;
1373     long lfill = *(long*)fill;
1374 
1375     if (PTR2UV(s) & (sizeof(long)-1)) {		/* Align the pattern */
1376 	int shift = sizeof(long) - (PTR2UV(s) & (sizeof(long)-1));
1377 	unsigned const char *f = fill + sizeof(long) - shift;
1378 	unsigned char *e1 = s + shift;
1379 
1380 	while (s < e1)
1381 	    if (*s++ != *f++)
1382 		return 1;
1383     }
1384     lp = (long*)s;
1385     while ((unsigned char*)(lp + 1) <= e)
1386 	if (*lp++ != lfill)
1387 	    return 1;
1388     s = (unsigned char*)lp;
1389     while (s < e)
1390 	if (*s++ != *fill++)
1391 	    return 1;
1392     return 0;
1393 }
1394 #  define FILLCHECK_DEADBEEF(s, n)					\
1395 	ASSERT(!FILL_CHECK || !cmp_pat_4bytes(s, n, fill_deadbeef),	\
1396 	       "free()ed/realloc()ed-away memory was overwritten")
1397 #else
1398 #  define FILLCHECK_DEADBEEF(s, n)	((void)0)
1399 #endif
1400 
1401 Malloc_t
1402 Perl_malloc(register size_t nbytes)
1403 {
1404   	register union overhead *p;
1405   	register int bucket;
1406   	register MEM_SIZE shiftr;
1407 
1408 #if defined(DEBUGGING) || defined(RCHECK)
1409 	MEM_SIZE size = nbytes;
1410 #endif
1411 
1412 	BARK_64K_LIMIT("Allocation",nbytes,nbytes);
1413 #ifdef DEBUGGING
1414 	if ((long)nbytes < 0)
1415 	    croak("%s", "panic: malloc");
1416 #endif
1417 
1418 	/*
1419 	 * Convert amount of memory requested into
1420 	 * closest block size stored in hash buckets
1421 	 * which satisfies request.  Account for
1422 	 * space used per block for accounting.
1423 	 */
1424 #ifdef PACK_MALLOC
1425 #  ifdef SMALL_BUCKET_VIA_TABLE
1426 	if (nbytes == 0)
1427 	    bucket = MIN_BUCKET;
1428 	else if (nbytes <= SIZE_TABLE_MAX) {
1429 	    bucket = bucket_of[(nbytes - 1) >> BUCKET_TABLE_SHIFT];
1430 	} else
1431 #  else
1432 	if (nbytes == 0)
1433 	    nbytes = 1;
1434 	if (nbytes <= MAX_POW2_ALGO) goto do_shifts;
1435 	else
1436 #  endif
1437 #endif
1438 	{
1439 	    POW2_OPTIMIZE_ADJUST(nbytes);
1440 	    nbytes += M_OVERHEAD;
1441 	    nbytes = (nbytes + 3) &~ 3;
1442 #if defined(PACK_MALLOC) && !defined(SMALL_BUCKET_VIA_TABLE)
1443 	  do_shifts:
1444 #endif
1445 	    shiftr = (nbytes - 1) >> START_SHIFT;
1446 	    bucket = START_SHIFTS_BUCKET;
1447 	    /* apart from this loop, this is O(1) */
1448 	    while (shiftr >>= 1)
1449   		bucket += BUCKETS_PER_POW2;
1450 	}
1451 	MALLOC_LOCK;
1452 	/*
1453 	 * If nothing in hash bucket right now,
1454 	 * request more memory from the system.
1455 	 */
1456   	if (nextf[bucket] == NULL)
1457   		morecore(bucket);
1458   	if ((p = nextf[bucket]) == NULL) {
1459 		MALLOC_UNLOCK;
1460 #ifdef PERL_CORE
1461 		{
1462 		    dTHX;
1463 		    if (!PL_nomemok) {
1464 #if defined(PLAIN_MALLOC) && defined(NO_FANCY_MALLOC)
1465 		        PerlIO_puts(PerlIO_stderr(),"Out of memory!\n");
1466 #else
1467 			char buff[80];
1468 			char *eb = buff + sizeof(buff) - 1;
1469 			char *s = eb;
1470 			size_t n = nbytes;
1471 
1472 			PerlIO_puts(PerlIO_stderr(),"Out of memory during request for ");
1473 #if defined(DEBUGGING) || defined(RCHECK)
1474 			n = size;
1475 #endif
1476 			*s = 0;
1477 			do {
1478 			    *--s = '0' + (n % 10);
1479 			} while (n /= 10);
1480 			PerlIO_puts(PerlIO_stderr(),s);
1481 			PerlIO_puts(PerlIO_stderr()," bytes, total sbrk() is ");
1482 			s = eb;
1483 			n = goodsbrk + sbrk_slack;
1484 			do {
1485 			    *--s = '0' + (n % 10);
1486 			} while (n /= 10);
1487 			PerlIO_puts(PerlIO_stderr(),s);
1488 			PerlIO_puts(PerlIO_stderr()," bytes!\n");
1489 #endif /* defined(PLAIN_MALLOC) && defined(NO_FANCY_MALLOC) */
1490 			my_exit(1);
1491 		    }
1492 		}
1493 #endif
1494   		return (NULL);
1495 	}
1496 
1497 	/* remove from linked list */
1498 #ifdef DEBUGGING
1499 	if ( (PTR2UV(p) & (MEM_ALIGNBYTES - 1))
1500 						/* Can't get this low */
1501 	     || (p && PTR2UV(p) < (1<<LOG_OF_MIN_ARENA)) ) {
1502 	    dTHX;
1503 	    PerlIO_printf(PerlIO_stderr(),
1504 			  "Unaligned pointer in the free chain 0x%"UVxf"\n",
1505 			  PTR2UV(p));
1506 	}
1507 	if ( (PTR2UV(p->ov_next) & (MEM_ALIGNBYTES - 1))
1508 	     || (p->ov_next && PTR2UV(p->ov_next) < (1<<LOG_OF_MIN_ARENA)) ) {
1509 	    dTHX;
1510 	    PerlIO_printf(PerlIO_stderr(),
1511 			  "Unaligned `next' pointer in the free "
1512 			  "chain 0x%"UVxf" at 0x%"UVxf"\n",
1513 			  PTR2UV(p->ov_next), PTR2UV(p));
1514 	}
1515 #endif
1516   	nextf[bucket] = p->ov_next;
1517 
1518 	MALLOC_UNLOCK;
1519 
1520 	DEBUG_m(PerlIO_printf(Perl_debug_log,
1521 			      "0x%"UVxf": (%05lu) malloc %ld bytes\n",
1522 			      PTR2UV((Malloc_t)(p + CHUNK_SHIFT)), (unsigned long)(PL_an++),
1523 			      (long)size));
1524 
1525 	FILLCHECK_DEADBEEF((unsigned char*)(p + CHUNK_SHIFT),
1526 			   BUCKET_SIZE_REAL(bucket) + RMAGIC_SZ);
1527 
1528 #ifdef IGNORE_SMALL_BAD_FREE
1529 	if (bucket >= FIRST_BUCKET_WITH_CHECK)
1530 #endif
1531 	    OV_MAGIC(p, bucket) = MAGIC;
1532 #ifndef PACK_MALLOC
1533 	OV_INDEX(p) = bucket;
1534 #endif
1535 #ifdef RCHECK
1536 	/*
1537 	 * Record allocated size of block and
1538 	 * bound space with magic numbers.
1539 	 */
1540 	p->ov_rmagic = RMAGIC;
1541 	if (bucket <= MAX_SHORT_BUCKET) {
1542 	    int i;
1543 
1544 	    nbytes = size + M_OVERHEAD;
1545 	    p->ov_size = nbytes - 1;
1546 	    if ((i = nbytes & (RMAGIC_SZ-1))) {
1547 		i = RMAGIC_SZ - i;
1548 		while (i--) /* nbytes - RMAGIC_SZ is end of alloced area */
1549 		    ((caddr_t)p + nbytes - RMAGIC_SZ)[i] = RMAGIC_C;
1550 	    }
1551 	    /* Same at RMAGIC_SZ-aligned RMAGIC */
1552 	    nbytes = (nbytes + RMAGIC_SZ - 1) & ~(RMAGIC_SZ - 1);
1553 	    ((u_int *)((caddr_t)p + nbytes))[-1] = RMAGIC;
1554 	}
1555 	FILL_FEEDADAD((unsigned char *)(p + CHUNK_SHIFT), size);
1556 #endif
1557   	return ((Malloc_t)(p + CHUNK_SHIFT));
1558 }
1559 
1560 static char *last_sbrk_top;
1561 static char *last_op;			/* This arena can be easily extended. */
1562 static MEM_SIZE sbrked_remains;
1563 
1564 #ifdef DEBUGGING_MSTATS
1565 static int sbrks;
1566 #endif
1567 
1568 struct chunk_chain_s {
1569     struct chunk_chain_s *next;
1570     MEM_SIZE size;
1571 };
1572 static struct chunk_chain_s *chunk_chain;
1573 static int n_chunks;
1574 static char max_bucket;
1575 
1576 /* Cutoff a piece of one of the chunks in the chain.  Prefer smaller chunk. */
1577 static void *
1578 get_from_chain(MEM_SIZE size)
1579 {
1580     struct chunk_chain_s *elt = chunk_chain, **oldp = &chunk_chain;
1581     struct chunk_chain_s **oldgoodp = NULL;
1582     long min_remain = LONG_MAX;
1583 
1584     while (elt) {
1585 	if (elt->size >= size) {
1586 	    long remains = elt->size - size;
1587 	    if (remains >= 0 && remains < min_remain) {
1588 		oldgoodp = oldp;
1589 		min_remain = remains;
1590 	    }
1591 	    if (remains == 0) {
1592 		break;
1593 	    }
1594 	}
1595 	oldp = &( elt->next );
1596 	elt = elt->next;
1597     }
1598     if (!oldgoodp) return NULL;
1599     if (min_remain) {
1600 	void *ret = *oldgoodp;
1601 	struct chunk_chain_s *next = (*oldgoodp)->next;
1602 
1603 	*oldgoodp = (struct chunk_chain_s *)((char*)ret + size);
1604 	(*oldgoodp)->size = min_remain;
1605 	(*oldgoodp)->next = next;
1606 	return ret;
1607     } else {
1608 	void *ret = *oldgoodp;
1609 	*oldgoodp = (*oldgoodp)->next;
1610 	n_chunks--;
1611 	return ret;
1612     }
1613 }
1614 
1615 static void
1616 add_to_chain(void *p, MEM_SIZE size, MEM_SIZE chip)
1617 {
1618     struct chunk_chain_s *next = chunk_chain;
1619     char *cp = (char*)p;
1620 
1621     cp += chip;
1622     chunk_chain = (struct chunk_chain_s *)cp;
1623     chunk_chain->size = size - chip;
1624     chunk_chain->next = next;
1625     n_chunks++;
1626 }
1627 
1628 static void *
1629 get_from_bigger_buckets(int bucket, MEM_SIZE size)
1630 {
1631     int price = 1;
1632     static int bucketprice[NBUCKETS];
1633     while (bucket <= max_bucket) {
1634 	/* We postpone stealing from bigger buckets until we want it
1635 	   often enough. */
1636 	if (nextf[bucket] && bucketprice[bucket]++ >= price) {
1637 	    /* Steal it! */
1638 	    void *ret = (void*)(nextf[bucket] - 1 + CHUNK_SHIFT);
1639 	    bucketprice[bucket] = 0;
1640 	    if (((char*)nextf[bucket]) - M_OVERHEAD == last_op) {
1641 		last_op = NULL;		/* Disable optimization */
1642 	    }
1643 	    nextf[bucket] = nextf[bucket]->ov_next;
1644 #ifdef DEBUGGING_MSTATS
1645 	    nmalloc[bucket]--;
1646 	    start_slack -= M_OVERHEAD;
1647 #endif
1648 	    add_to_chain(ret, (BUCKET_SIZE_NO_SURPLUS(bucket) +
1649 			       POW2_OPTIMIZE_SURPLUS(bucket)),
1650 			 size);
1651 	    return ret;
1652 	}
1653 	bucket++;
1654     }
1655     return NULL;
1656 }
1657 
1658 static union overhead *
1659 getpages(MEM_SIZE needed, int *nblksp, int bucket)
1660 {
1661     /* Need to do (possibly expensive) system call. Try to
1662        optimize it for rare calling. */
1663     MEM_SIZE require = needed - sbrked_remains;
1664     char *cp;
1665     union overhead *ovp;
1666     MEM_SIZE slack = 0;
1667 
1668     if (sbrk_goodness > 0) {
1669 	if (!last_sbrk_top && require < FIRST_SBRK)
1670 	    require = FIRST_SBRK;
1671 	else if (require < MIN_SBRK) require = MIN_SBRK;
1672 
1673 	if (require < goodsbrk * MIN_SBRK_FRAC1000 / 1000)
1674 	    require = goodsbrk * MIN_SBRK_FRAC1000 / 1000;
1675 	require = ((require - 1 + MIN_SBRK) / MIN_SBRK) * MIN_SBRK;
1676     } else {
1677 	require = needed;
1678 	last_sbrk_top = 0;
1679 	sbrked_remains = 0;
1680     }
1681 
1682     DEBUG_m(PerlIO_printf(Perl_debug_log,
1683 			  "sbrk(%ld) for %ld-byte-long arena\n",
1684 			  (long)require, (long) needed));
1685     cp = (char *)sbrk(require);
1686 #ifdef DEBUGGING_MSTATS
1687     sbrks++;
1688 #endif
1689     if (cp == last_sbrk_top) {
1690 	/* Common case, anything is fine. */
1691 	sbrk_goodness++;
1692 	ovp = (union overhead *) (cp - sbrked_remains);
1693 	last_op = cp - sbrked_remains;
1694 	sbrked_remains = require - (needed - sbrked_remains);
1695     } else if (cp == (char *)-1) { /* no more room! */
1696 	ovp = (union overhead *)emergency_sbrk(needed);
1697 	if (ovp == (union overhead *)-1)
1698 	    return 0;
1699 	if (((char*)ovp) > last_op) {	/* Cannot happen with current emergency_sbrk() */
1700 	    last_op = 0;
1701 	}
1702 	return ovp;
1703     } else {			/* Non-continuous or first sbrk(). */
1704 	long add = sbrked_remains;
1705 	char *newcp;
1706 
1707 	if (sbrked_remains) {	/* Put rest into chain, we
1708 				   cannot use it right now. */
1709 	    add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1710 			 sbrked_remains, 0);
1711 	}
1712 
1713 	/* Second, check alignment. */
1714 	slack = 0;
1715 
1716 #if !defined(atarist) && !defined(__MINT__) /* on the atari we dont have to worry about this */
1717 #  ifndef I286 	/* The sbrk(0) call on the I286 always returns the next segment */
1718 	/* WANTED_ALIGNMENT may be more than NEEDED_ALIGNMENT, but this may
1719 	   improve performance of memory access. */
1720 	if (PTR2UV(cp) & (WANTED_ALIGNMENT - 1)) { /* Not aligned. */
1721 	    slack = WANTED_ALIGNMENT - (PTR2UV(cp) & (WANTED_ALIGNMENT - 1));
1722 	    add += slack;
1723 	}
1724 #  endif
1725 #endif /* !atarist && !MINT */
1726 
1727 	if (add) {
1728 	    DEBUG_m(PerlIO_printf(Perl_debug_log,
1729 				  "sbrk(%ld) to fix non-continuous/off-page sbrk:\n\t%ld for alignement,\t%ld were assumed to come from the tail of the previous sbrk\n",
1730 				  (long)add, (long) slack,
1731 				  (long) sbrked_remains));
1732 	    newcp = (char *)sbrk(add);
1733 #if defined(DEBUGGING_MSTATS)
1734 	    sbrks++;
1735 	    sbrk_slack += add;
1736 #endif
1737 	    if (newcp != cp + require) {
1738 		/* Too bad: even rounding sbrk() is not continuous.*/
1739 		DEBUG_m(PerlIO_printf(Perl_debug_log,
1740 				      "failed to fix bad sbrk()\n"));
1741 #ifdef PACK_MALLOC
1742 		if (slack) {
1743 		    MALLOC_UNLOCK;
1744 		    fatalcroak("panic: Off-page sbrk\n");
1745 		}
1746 #endif
1747 		if (sbrked_remains) {
1748 		    /* Try again. */
1749 #if defined(DEBUGGING_MSTATS)
1750 		    sbrk_slack += require;
1751 #endif
1752 		    require = needed;
1753 		    DEBUG_m(PerlIO_printf(Perl_debug_log,
1754 					  "straight sbrk(%ld)\n",
1755 					  (long)require));
1756 		    cp = (char *)sbrk(require);
1757 #ifdef DEBUGGING_MSTATS
1758 		    sbrks++;
1759 #endif
1760 		    if (cp == (char *)-1)
1761 			return 0;
1762 		}
1763 		sbrk_goodness = -1;	/* Disable optimization!
1764 				   Continue with not-aligned... */
1765 	    } else {
1766 		cp += slack;
1767 		require += sbrked_remains;
1768 	    }
1769 	}
1770 
1771 	if (last_sbrk_top) {
1772 	    sbrk_goodness -= SBRK_FAILURE_PRICE;
1773 	}
1774 
1775 	ovp = (union overhead *) cp;
1776 	/*
1777 	 * Round up to minimum allocation size boundary
1778 	 * and deduct from block count to reflect.
1779 	 */
1780 
1781 #  if NEEDED_ALIGNMENT > MEM_ALIGNBYTES
1782 	if (PTR2UV(ovp) & (NEEDED_ALIGNMENT - 1))
1783 	    fatalcroak("Misalignment of sbrk()\n");
1784 	else
1785 #  endif
1786 #ifndef I286	/* Again, this should always be ok on an 80286 */
1787 	if (PTR2UV(ovp) & (MEM_ALIGNBYTES - 1)) {
1788 	    DEBUG_m(PerlIO_printf(Perl_debug_log,
1789 				  "fixing sbrk(): %d bytes off machine alignement\n",
1790 				  (int)(PTR2UV(ovp) & (MEM_ALIGNBYTES - 1))));
1791 	    ovp = INT2PTR(union overhead *,(PTR2UV(ovp) + MEM_ALIGNBYTES) &
1792 				     (MEM_ALIGNBYTES - 1));
1793 	    (*nblksp)--;
1794 # if defined(DEBUGGING_MSTATS)
1795 	    /* This is only approx. if TWO_POT_OPTIMIZE: */
1796 	    sbrk_slack += (1 << (bucket >> BUCKET_POW2_SHIFT));
1797 # endif
1798 	}
1799 #endif
1800 	;				/* Finish `else' */
1801 	sbrked_remains = require - needed;
1802 	last_op = cp;
1803     }
1804 #if !defined(PLAIN_MALLOC) && !defined(NO_FANCY_MALLOC)
1805     emergency_buffer_last_req = 0;
1806 #endif
1807     last_sbrk_top = cp + require;
1808 #ifdef DEBUGGING_MSTATS
1809     goodsbrk += require;
1810 #endif
1811     return ovp;
1812 }
1813 
1814 static int
1815 getpages_adjacent(MEM_SIZE require)
1816 {
1817     if (require <= sbrked_remains) {
1818 	sbrked_remains -= require;
1819     } else {
1820 	char *cp;
1821 
1822 	require -= sbrked_remains;
1823 	/* We do not try to optimize sbrks here, we go for place. */
1824 	cp = (char*) sbrk(require);
1825 #ifdef DEBUGGING_MSTATS
1826 	sbrks++;
1827 	goodsbrk += require;
1828 #endif
1829 	if (cp == last_sbrk_top) {
1830 	    sbrked_remains = 0;
1831 	    last_sbrk_top = cp + require;
1832 	} else {
1833 	    if (cp == (char*)-1) {	/* Out of memory */
1834 #ifdef DEBUGGING_MSTATS
1835 		goodsbrk -= require;
1836 #endif
1837 		return 0;
1838 	    }
1839 	    /* Report the failure: */
1840 	    if (sbrked_remains)
1841 		add_to_chain((void*)(last_sbrk_top - sbrked_remains),
1842 			     sbrked_remains, 0);
1843 	    add_to_chain((void*)cp, require, 0);
1844 	    sbrk_goodness -= SBRK_FAILURE_PRICE;
1845 	    sbrked_remains = 0;
1846 	    last_sbrk_top = 0;
1847 	    last_op = 0;
1848 	    return 0;
1849 	}
1850     }
1851 
1852     return 1;
1853 }
1854 
1855 /*
1856  * Allocate more memory to the indicated bucket.
1857  */
1858 static void
1859 morecore(register int bucket)
1860 {
1861   	register union overhead *ovp;
1862   	register int rnu;       /* 2^rnu bytes will be requested */
1863   	int nblks;		/* become nblks blocks of the desired size */
1864 	register MEM_SIZE siz, needed;
1865 	static int were_called = 0;
1866 
1867   	if (nextf[bucket])
1868   		return;
1869 #ifndef NO_PERL_MALLOC_ENV
1870 	if (!were_called) {
1871 	    /* It's the our first time.  Initialize ourselves */
1872 	    were_called = 1;	/* Avoid a loop */
1873 	    if (!MallocCfg[MallocCfg_skip_cfg_env]) {
1874 		char *s = getenv("PERL_MALLOC_OPT"), *t = s, *off;
1875 		const char *opts = PERL_MALLOC_OPT_CHARS;
1876 		int changed = 0;
1877 
1878 		while ( t && t[0] && t[1] == '='
1879 			&& ((off = strchr(opts, *t))) ) {
1880 		    IV val = 0;
1881 
1882 		    t += 2;
1883 		    while (*t <= '9' && *t >= '0')
1884 			val = 10*val + *t++ - '0';
1885 		    if (!*t || *t == ';') {
1886 			if (MallocCfg[off - opts] != val)
1887 			    changed = 1;
1888 			MallocCfg[off - opts] = val;
1889 			if (*t)
1890 			    t++;
1891 		    }
1892 		}
1893 		if (t && *t) {
1894 		    write2("Unrecognized part of PERL_MALLOC_OPT: `");
1895 		    write2(t);
1896 		    write2("'\n");
1897 		}
1898 		if (changed)
1899 		    MallocCfg[MallocCfg_cfg_env_read] = 1;
1900 	    }
1901 	}
1902 #endif
1903 	if (bucket == sizeof(MEM_SIZE)*8*BUCKETS_PER_POW2) {
1904 	    MALLOC_UNLOCK;
1905 	    croak("%s", "Out of memory during ridiculously large request");
1906 	}
1907 	if (bucket > max_bucket)
1908 	    max_bucket = bucket;
1909 
1910   	rnu = ( (bucket <= (LOG_OF_MIN_ARENA << BUCKET_POW2_SHIFT))
1911 		? LOG_OF_MIN_ARENA
1912 		: (bucket >> BUCKET_POW2_SHIFT) );
1913 	/* This may be overwritten later: */
1914   	nblks = 1 << (rnu - (bucket >> BUCKET_POW2_SHIFT)); /* how many blocks to get */
1915 	needed = ((MEM_SIZE)1 << rnu) + POW2_OPTIMIZE_SURPLUS(bucket);
1916 	if (nextf[rnu << BUCKET_POW2_SHIFT]) { /* 2048b bucket. */
1917 	    ovp = nextf[rnu << BUCKET_POW2_SHIFT] - 1 + CHUNK_SHIFT;
1918 	    nextf[rnu << BUCKET_POW2_SHIFT]
1919 		= nextf[rnu << BUCKET_POW2_SHIFT]->ov_next;
1920 #ifdef DEBUGGING_MSTATS
1921 	    nmalloc[rnu << BUCKET_POW2_SHIFT]--;
1922 	    start_slack -= M_OVERHEAD;
1923 #endif
1924 	    DEBUG_m(PerlIO_printf(Perl_debug_log,
1925 				  "stealing %ld bytes from %ld arena\n",
1926 				  (long) needed, (long) rnu << BUCKET_POW2_SHIFT));
1927 	} else if (chunk_chain
1928 		   && (ovp = (union overhead*) get_from_chain(needed))) {
1929 	    DEBUG_m(PerlIO_printf(Perl_debug_log,
1930 				  "stealing %ld bytes from chain\n",
1931 				  (long) needed));
1932 	} else if ( (ovp = (union overhead*)
1933 		     get_from_bigger_buckets((rnu << BUCKET_POW2_SHIFT) + 1,
1934 					     needed)) ) {
1935 	    DEBUG_m(PerlIO_printf(Perl_debug_log,
1936 				  "stealing %ld bytes from bigger buckets\n",
1937 				  (long) needed));
1938 	} else if (needed <= sbrked_remains) {
1939 	    ovp = (union overhead *)(last_sbrk_top - sbrked_remains);
1940 	    sbrked_remains -= needed;
1941 	    last_op = (char*)ovp;
1942 	} else
1943 	    ovp = getpages(needed, &nblks, bucket);
1944 
1945 	if (!ovp)
1946 	    return;
1947 	FILL_DEADBEEF((unsigned char*)ovp, needed);
1948 
1949 	/*
1950 	 * Add new memory allocated to that on
1951 	 * free list for this hash bucket.
1952 	 */
1953   	siz = BUCKET_SIZE_NO_SURPLUS(bucket); /* No surplus if nblks > 1 */
1954 #ifdef PACK_MALLOC
1955 	*(u_char*)ovp = bucket;	/* Fill index. */
1956 	if (bucket <= MAX_PACKED) {
1957 	    ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1958 	    nblks = N_BLKS(bucket);
1959 #  ifdef DEBUGGING_MSTATS
1960 	    start_slack += BLK_SHIFT(bucket);
1961 #  endif
1962 	} else if (bucket < LOG_OF_MIN_ARENA * BUCKETS_PER_POW2) {
1963 	    ovp = (union overhead *) ((char*)ovp + BLK_SHIFT(bucket));
1964 	    siz -= sizeof(union overhead);
1965 	} else ovp++;		/* One chunk per block. */
1966 #endif /* PACK_MALLOC */
1967   	nextf[bucket] = ovp;
1968 #ifdef DEBUGGING_MSTATS
1969 	nmalloc[bucket] += nblks;
1970 	if (bucket > MAX_PACKED) {
1971 	    start_slack += M_OVERHEAD * nblks;
1972 	}
1973 #endif
1974 
1975   	while (--nblks > 0) {
1976 		ovp->ov_next = (union overhead *)((caddr_t)ovp + siz);
1977 		ovp = (union overhead *)((caddr_t)ovp + siz);
1978   	}
1979 	/* Not all sbrks return zeroed memory.*/
1980 	ovp->ov_next = (union overhead *)NULL;
1981 #ifdef PACK_MALLOC
1982 	if (bucket == 7*BUCKETS_PER_POW2) { /* Special case, explanation is above. */
1983 	    union overhead *n_op = nextf[7*BUCKETS_PER_POW2]->ov_next;
1984 	    nextf[7*BUCKETS_PER_POW2] =
1985 		(union overhead *)((caddr_t)nextf[7*BUCKETS_PER_POW2]
1986 				   - sizeof(union overhead));
1987 	    nextf[7*BUCKETS_PER_POW2]->ov_next = n_op;
1988 	}
1989 #endif /* !PACK_MALLOC */
1990 }
1991 
1992 Free_t
1993 Perl_mfree(void *mp)
1994 {
1995   	register MEM_SIZE size;
1996 	register union overhead *ovp;
1997 	char *cp = (char*)mp;
1998 #ifdef PACK_MALLOC
1999 	u_char bucket;
2000 #endif
2001 
2002 	DEBUG_m(PerlIO_printf(Perl_debug_log,
2003 			      "0x%"UVxf": (%05lu) free\n",
2004 			      PTR2UV(cp), (unsigned long)(PL_an++)));
2005 
2006 	if (cp == NULL)
2007 		return;
2008 #ifdef DEBUGGING
2009 	if (PTR2UV(cp) & (MEM_ALIGNBYTES - 1))
2010 	    croak("%s", "wrong alignment in free()");
2011 #endif
2012 	ovp = (union overhead *)((caddr_t)cp
2013 				- sizeof (union overhead) * CHUNK_SHIFT);
2014 #ifdef PACK_MALLOC
2015 	bucket = OV_INDEX(ovp);
2016 #endif
2017 #ifdef IGNORE_SMALL_BAD_FREE
2018 	if ((bucket >= FIRST_BUCKET_WITH_CHECK)
2019 	    && (OV_MAGIC(ovp, bucket) != MAGIC))
2020 #else
2021 	if (OV_MAGIC(ovp, bucket) != MAGIC)
2022 #endif
2023 	    {
2024 		static int bad_free_warn = -1;
2025 		if (bad_free_warn == -1) {
2026 		    dTHX;
2027 		    char *pbf = PerlEnv_getenv("PERL_BADFREE");
2028 		    bad_free_warn = (pbf) ? atoi(pbf) : 1;
2029 		}
2030 		if (!bad_free_warn)
2031 		    return;
2032 #ifdef RCHECK
2033 #ifdef PERL_CORE
2034 		{
2035 		    dTHX;
2036 		    if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC))
2037 			Perl_warner(aTHX_ packWARN(WARN_MALLOC), "%s free() ignored (RMAGIC, PERL_CORE)",
2038 				    ovp->ov_rmagic == RMAGIC - 1 ?
2039 				    "Duplicate" : "Bad");
2040 		}
2041 #else
2042 		warn("%s free() ignored (RMAGIC)",
2043 		    ovp->ov_rmagic == RMAGIC - 1 ? "Duplicate" : "Bad");
2044 #endif
2045 #else
2046 #ifdef PERL_CORE
2047 		{
2048 		    dTHX;
2049 		    if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC))
2050 			Perl_warner(aTHX_ packWARN(WARN_MALLOC), "%s", "Bad free() ignored (PERL_CORE)");
2051 		}
2052 #else
2053 		warn("%s", "Bad free() ignored");
2054 #endif
2055 #endif
2056 		return;				/* sanity */
2057 	    }
2058 #ifdef RCHECK
2059   	ASSERT(ovp->ov_rmagic == RMAGIC, "chunk's head overwrite");
2060 	if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
2061 	    int i;
2062 	    MEM_SIZE nbytes = ovp->ov_size + 1;
2063 
2064 	    if ((i = nbytes & (RMAGIC_SZ-1))) {
2065 		i = RMAGIC_SZ - i;
2066 		while (i--) {	/* nbytes - RMAGIC_SZ is end of alloced area */
2067 		    ASSERT(((caddr_t)ovp + nbytes - RMAGIC_SZ)[i] == RMAGIC_C,
2068 			   "chunk's tail overwrite");
2069 		}
2070 	    }
2071 	    /* Same at RMAGIC_SZ-aligned RMAGIC */
2072 	    nbytes = (nbytes + (RMAGIC_SZ-1)) & ~(RMAGIC_SZ-1);
2073 	    ASSERT(((u_int *)((caddr_t)ovp + nbytes))[-1] == RMAGIC,
2074 		   "chunk's tail overwrite");
2075 	    FILLCHECK_DEADBEEF((unsigned char*)((caddr_t)ovp + nbytes),
2076 			       BUCKET_SIZE(OV_INDEX(ovp)) - nbytes);
2077 	}
2078 	FILL_DEADBEEF((unsigned char*)(ovp+CHUNK_SHIFT),
2079 		      BUCKET_SIZE_REAL(OV_INDEX(ovp)) + RMAGIC_SZ);
2080 	ovp->ov_rmagic = RMAGIC - 1;
2081 #endif
2082   	ASSERT(OV_INDEX(ovp) < NBUCKETS, "chunk's head overwrite");
2083   	size = OV_INDEX(ovp);
2084 
2085 	MALLOC_LOCK;
2086 	ovp->ov_next = nextf[size];
2087   	nextf[size] = ovp;
2088 	MALLOC_UNLOCK;
2089 }
2090 
2091 /* There is no need to do any locking in realloc (with an exception of
2092    trying to grow in place if we are at the end of the chain).
2093    If somebody calls us from a different thread with the same address,
2094    we are sole anyway.  */
2095 
2096 Malloc_t
2097 Perl_realloc(void *mp, size_t nbytes)
2098 {
2099   	register MEM_SIZE onb;
2100 	union overhead *ovp;
2101   	char *res;
2102 	int prev_bucket;
2103 	register int bucket;
2104 	int incr;		/* 1 if does not fit, -1 if "easily" fits in a
2105 				   smaller bucket, otherwise 0.  */
2106 	char *cp = (char*)mp;
2107 
2108 #if defined(DEBUGGING) || !defined(PERL_CORE)
2109 	MEM_SIZE size = nbytes;
2110 
2111 	if ((long)nbytes < 0)
2112 	    croak("%s", "panic: realloc");
2113 #endif
2114 
2115 	BARK_64K_LIMIT("Reallocation",nbytes,size);
2116 	if (!cp)
2117 		return Perl_malloc(nbytes);
2118 
2119 	ovp = (union overhead *)((caddr_t)cp
2120 				- sizeof (union overhead) * CHUNK_SHIFT);
2121 	bucket = OV_INDEX(ovp);
2122 
2123 #ifdef IGNORE_SMALL_BAD_FREE
2124 	if ((bucket >= FIRST_BUCKET_WITH_CHECK)
2125 	    && (OV_MAGIC(ovp, bucket) != MAGIC))
2126 #else
2127 	if (OV_MAGIC(ovp, bucket) != MAGIC)
2128 #endif
2129 	    {
2130 		static int bad_free_warn = -1;
2131 		if (bad_free_warn == -1) {
2132 		    dTHX;
2133 		    char *pbf = PerlEnv_getenv("PERL_BADFREE");
2134 		    bad_free_warn = (pbf) ? atoi(pbf) : 1;
2135 		}
2136 		if (!bad_free_warn)
2137 		    return Nullch;
2138 #ifdef RCHECK
2139 #ifdef PERL_CORE
2140 		{
2141 		    dTHX;
2142 		    if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC))
2143 			Perl_warner(aTHX_ packWARN(WARN_MALLOC), "%srealloc() %signored",
2144 				    (ovp->ov_rmagic == RMAGIC - 1 ? "" : "Bad "),
2145 				    ovp->ov_rmagic == RMAGIC - 1
2146 				    ? "of freed memory " : "");
2147 		}
2148 #else
2149 		warn2("%srealloc() %signored",
2150 		      (ovp->ov_rmagic == RMAGIC - 1 ? "" : "Bad "),
2151 		      ovp->ov_rmagic == RMAGIC - 1 ? "of freed memory " : "");
2152 #endif
2153 #else
2154 #ifdef PERL_CORE
2155 		{
2156 		    dTHX;
2157 		    if (!PERL_IS_ALIVE || !PL_curcop || ckWARN_d(WARN_MALLOC))
2158 			Perl_warner(aTHX_ packWARN(WARN_MALLOC), "%s",
2159 				    "Bad realloc() ignored");
2160 		}
2161 #else
2162 		warn("%s", "Bad realloc() ignored");
2163 #endif
2164 #endif
2165 		return Nullch;			/* sanity */
2166 	    }
2167 
2168 	onb = BUCKET_SIZE_REAL(bucket);
2169 	/*
2170 	 *  avoid the copy if same size block.
2171 	 *  We are not agressive with boundary cases. Note that it might
2172 	 *  (for a small number of cases) give false negative if
2173 	 *  both new size and old one are in the bucket for
2174 	 *  FIRST_BIG_POW2, but the new one is near the lower end.
2175 	 *
2176 	 *  We do not try to go to 1.5 times smaller bucket so far.
2177 	 */
2178 	if (nbytes > onb) incr = 1;
2179 	else {
2180 #ifdef DO_NOT_TRY_HARDER_WHEN_SHRINKING
2181 	    if ( /* This is a little bit pessimal if PACK_MALLOC: */
2182 		nbytes > ( (onb >> 1) - M_OVERHEAD )
2183 #  ifdef TWO_POT_OPTIMIZE
2184 		|| (bucket == FIRST_BIG_POW2 && nbytes >= LAST_SMALL_BOUND )
2185 #  endif
2186 		)
2187 #else  /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
2188 		prev_bucket = ( (bucket > MAX_PACKED + 1)
2189 				? bucket - BUCKETS_PER_POW2
2190 				: bucket - 1);
2191 	     if (nbytes > BUCKET_SIZE_REAL(prev_bucket))
2192 #endif /* !DO_NOT_TRY_HARDER_WHEN_SHRINKING */
2193 		 incr = 0;
2194 	     else incr = -1;
2195 	}
2196 #ifdef STRESS_REALLOC
2197 	goto hard_way;
2198 #endif
2199 	if (incr == 0) {
2200 	  inplace_label:
2201 #ifdef RCHECK
2202 		/*
2203 		 * Record new allocated size of block and
2204 		 * bound space with magic numbers.
2205 		 */
2206 		if (OV_INDEX(ovp) <= MAX_SHORT_BUCKET) {
2207 		       int i, nb = ovp->ov_size + 1;
2208 
2209 		       if ((i = nb & (RMAGIC_SZ-1))) {
2210 			   i = RMAGIC_SZ - i;
2211 			   while (i--) { /* nb - RMAGIC_SZ is end of alloced area */
2212 			       ASSERT(((caddr_t)ovp + nb - RMAGIC_SZ)[i] == RMAGIC_C, "chunk's tail overwrite");
2213 			   }
2214 		       }
2215 		       /* Same at RMAGIC_SZ-aligned RMAGIC */
2216 		       nb = (nb + (RMAGIC_SZ-1)) & ~(RMAGIC_SZ-1);
2217 		       ASSERT(((u_int *)((caddr_t)ovp + nb))[-1] == RMAGIC,
2218 			      "chunk's tail overwrite");
2219 		       FILLCHECK_DEADBEEF((unsigned char*)((caddr_t)ovp + nb),
2220 					  BUCKET_SIZE(OV_INDEX(ovp)) - nb);
2221 		       if (nbytes > ovp->ov_size + 1 - M_OVERHEAD)
2222 			   FILL_FEEDADAD((unsigned char*)cp + ovp->ov_size + 1 - M_OVERHEAD,
2223 				     nbytes - (ovp->ov_size + 1 - M_OVERHEAD));
2224 		       else
2225 			   FILL_DEADBEEF((unsigned char*)cp + nbytes,
2226 					 nb - M_OVERHEAD + RMAGIC_SZ - nbytes);
2227 			/*
2228 			 * Convert amount of memory requested into
2229 			 * closest block size stored in hash buckets
2230 			 * which satisfies request.  Account for
2231 			 * space used per block for accounting.
2232 			 */
2233 			nbytes += M_OVERHEAD;
2234 			ovp->ov_size = nbytes - 1;
2235 			if ((i = nbytes & (RMAGIC_SZ-1))) {
2236 			    i = RMAGIC_SZ - i;
2237 			    while (i--)	/* nbytes - RMAGIC_SZ is end of alloced area */
2238 				((caddr_t)ovp + nbytes - RMAGIC_SZ)[i]
2239 				    = RMAGIC_C;
2240 			}
2241 			/* Same at RMAGIC_SZ-aligned RMAGIC */
2242 			nbytes = (nbytes + (RMAGIC_SZ-1)) & ~(RMAGIC_SZ - 1);
2243 			((u_int *)((caddr_t)ovp + nbytes))[-1] = RMAGIC;
2244 		}
2245 #endif
2246 		res = cp;
2247 		DEBUG_m(PerlIO_printf(Perl_debug_log,
2248 			      "0x%"UVxf": (%05lu) realloc %ld bytes inplace\n",
2249 			      PTR2UV(res),(unsigned long)(PL_an++),
2250 			      (long)size));
2251 	} else if (incr == 1 && (cp - M_OVERHEAD == last_op)
2252 		   && (onb > (1 << LOG_OF_MIN_ARENA))) {
2253 	    MEM_SIZE require, newarena = nbytes, pow;
2254 	    int shiftr;
2255 
2256 	    POW2_OPTIMIZE_ADJUST(newarena);
2257 	    newarena = newarena + M_OVERHEAD;
2258 	    /* newarena = (newarena + 3) &~ 3; */
2259 	    shiftr = (newarena - 1) >> LOG_OF_MIN_ARENA;
2260 	    pow = LOG_OF_MIN_ARENA + 1;
2261 	    /* apart from this loop, this is O(1) */
2262 	    while (shiftr >>= 1)
2263   		pow++;
2264 	    newarena = (1 << pow) + POW2_OPTIMIZE_SURPLUS(pow * BUCKETS_PER_POW2);
2265 	    require = newarena - onb - M_OVERHEAD;
2266 
2267 	    MALLOC_LOCK;
2268 	    if (cp - M_OVERHEAD == last_op /* We *still* are the last chunk */
2269 		&& getpages_adjacent(require)) {
2270 #ifdef DEBUGGING_MSTATS
2271 		nmalloc[bucket]--;
2272 		nmalloc[pow * BUCKETS_PER_POW2]++;
2273 #endif
2274 		*(cp - M_OVERHEAD) = pow * BUCKETS_PER_POW2; /* Fill index. */
2275 		MALLOC_UNLOCK;
2276 		goto inplace_label;
2277 	    } else {
2278 		MALLOC_UNLOCK;
2279 		goto hard_way;
2280 	    }
2281 	} else {
2282 	  hard_way:
2283 	    DEBUG_m(PerlIO_printf(Perl_debug_log,
2284 			      "0x%"UVxf": (%05lu) realloc %ld bytes the hard way\n",
2285 			      PTR2UV(cp),(unsigned long)(PL_an++),
2286 			      (long)size));
2287 	    if ((res = (char*)Perl_malloc(nbytes)) == NULL)
2288 		return (NULL);
2289 	    if (cp != res)			/* common optimization */
2290 		Copy(cp, res, (MEM_SIZE)(nbytes<onb?nbytes:onb), char);
2291 	    Perl_mfree(cp);
2292 	}
2293   	return ((Malloc_t)res);
2294 }
2295 
2296 Malloc_t
2297 Perl_calloc(register size_t elements, register size_t size)
2298 {
2299     long sz = elements * size;
2300     Malloc_t p = Perl_malloc(sz);
2301 
2302     if (p) {
2303 	memset((void*)p, 0, sz);
2304     }
2305     return p;
2306 }
2307 
2308 char *
2309 Perl_strdup(const char *s)
2310 {
2311     MEM_SIZE l = strlen(s);
2312     char *s1 = (char *)Perl_malloc(l+1);
2313 
2314     Copy(s, s1, (MEM_SIZE)(l+1), char);
2315     return s1;
2316 }
2317 
2318 #ifdef PERL_CORE
2319 int
2320 Perl_putenv(char *a)
2321 {
2322     /* Sometimes system's putenv conflicts with my_setenv() - this is system
2323        malloc vs Perl's free(). */
2324   dTHX;
2325   char *var;
2326   char *val = a;
2327   MEM_SIZE l;
2328   char buf[80];
2329 
2330   while (*val && *val != '=')
2331       val++;
2332   if (!*val)
2333       return -1;
2334   l = val - a;
2335   if (l < sizeof(buf))
2336       var = buf;
2337   else
2338       var = Perl_malloc(l + 1);
2339   Copy(a, var, l, char);
2340   var[l + 1] = 0;
2341   my_setenv(var, val+1);
2342   if (var != buf)
2343       Perl_mfree(var);
2344   return 0;
2345 }
2346 #  endif
2347 
2348 MEM_SIZE
2349 Perl_malloced_size(void *p)
2350 {
2351     union overhead *ovp = (union overhead *)
2352 	((caddr_t)p - sizeof (union overhead) * CHUNK_SHIFT);
2353     int bucket = OV_INDEX(ovp);
2354 #ifdef RCHECK
2355     /* The caller wants to have a complete control over the chunk,
2356        disable the memory checking inside the chunk.  */
2357     if (bucket <= MAX_SHORT_BUCKET) {
2358 	MEM_SIZE size = BUCKET_SIZE_REAL(bucket);
2359 	ovp->ov_size = size + M_OVERHEAD - 1;
2360 	*((u_int *)((caddr_t)ovp + size + M_OVERHEAD - RMAGIC_SZ)) = RMAGIC;
2361     }
2362 #endif
2363     return BUCKET_SIZE_REAL(bucket);
2364 }
2365 
2366 #  ifdef BUCKETS_ROOT2
2367 #    define MIN_EVEN_REPORT 6
2368 #  else
2369 #    define MIN_EVEN_REPORT MIN_BUCKET
2370 #  endif
2371 
2372 int
2373 Perl_get_mstats(pTHX_ perl_mstats_t *buf, int buflen, int level)
2374 {
2375 #ifdef DEBUGGING_MSTATS
2376   	register int i, j;
2377   	register union overhead *p;
2378 	struct chunk_chain_s* nextchain;
2379 
2380   	buf->topbucket = buf->topbucket_ev = buf->topbucket_odd
2381 	    = buf->totfree = buf->total = buf->total_chain = 0;
2382 
2383 	buf->minbucket = MIN_BUCKET;
2384 	MALLOC_LOCK;
2385   	for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
2386   		for (j = 0, p = nextf[i]; p; p = p->ov_next, j++)
2387   			;
2388 		if (i < buflen) {
2389 		    buf->nfree[i] = j;
2390 		    buf->ntotal[i] = nmalloc[i];
2391 		}
2392   		buf->totfree += j * BUCKET_SIZE_REAL(i);
2393   		buf->total += nmalloc[i] * BUCKET_SIZE_REAL(i);
2394 		if (nmalloc[i]) {
2395 		    i % 2 ? (buf->topbucket_odd = i) : (buf->topbucket_ev = i);
2396 		    buf->topbucket = i;
2397 		}
2398   	}
2399 	nextchain = chunk_chain;
2400 	while (nextchain) {
2401 	    buf->total_chain += nextchain->size;
2402 	    nextchain = nextchain->next;
2403 	}
2404 	buf->total_sbrk = goodsbrk + sbrk_slack;
2405 	buf->sbrks = sbrks;
2406 	buf->sbrk_good = sbrk_goodness;
2407 	buf->sbrk_slack = sbrk_slack;
2408 	buf->start_slack = start_slack;
2409 	buf->sbrked_remains = sbrked_remains;
2410 	MALLOC_UNLOCK;
2411 	buf->nbuckets = NBUCKETS;
2412 	if (level) {
2413 	    for (i = MIN_BUCKET ; i < NBUCKETS; i++) {
2414 		if (i >= buflen)
2415 		    break;
2416 		buf->bucket_mem_size[i] = BUCKET_SIZE_NO_SURPLUS(i);
2417 		buf->bucket_available_size[i] = BUCKET_SIZE_REAL(i);
2418 	    }
2419 	}
2420 #endif	/* defined DEBUGGING_MSTATS */
2421 	return 0;		/* XXX unused */
2422 }
2423 /*
2424  * mstats - print out statistics about malloc
2425  *
2426  * Prints two lines of numbers, one showing the length of the free list
2427  * for each size category, the second showing the number of mallocs -
2428  * frees for each size category.
2429  */
2430 void
2431 Perl_dump_mstats(pTHX_ char *s)
2432 {
2433 #ifdef DEBUGGING_MSTATS
2434   	register int i;
2435 	perl_mstats_t buffer;
2436 	UV nf[NBUCKETS];
2437 	UV nt[NBUCKETS];
2438 
2439 	buffer.nfree  = nf;
2440 	buffer.ntotal = nt;
2441 	get_mstats(&buffer, NBUCKETS, 0);
2442 
2443   	if (s)
2444 	    PerlIO_printf(Perl_error_log,
2445 			  "Memory allocation statistics %s (buckets %"IVdf"(%"IVdf")..%"IVdf"(%"IVdf")\n",
2446 			  s,
2447 			  (IV)BUCKET_SIZE_REAL(MIN_BUCKET),
2448 			  (IV)BUCKET_SIZE_NO_SURPLUS(MIN_BUCKET),
2449 			  (IV)BUCKET_SIZE_REAL(buffer.topbucket),
2450 			  (IV)BUCKET_SIZE_NO_SURPLUS(buffer.topbucket));
2451   	PerlIO_printf(Perl_error_log, "%8"IVdf" free:", buffer.totfree);
2452   	for (i = MIN_EVEN_REPORT; i <= buffer.topbucket; i += BUCKETS_PER_POW2) {
2453   		PerlIO_printf(Perl_error_log,
2454 			      ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
2455 			       ? " %5"UVuf
2456 			       : ((i < 12*BUCKETS_PER_POW2) ? " %3"UVuf : " %"UVuf)),
2457 			      buffer.nfree[i]);
2458   	}
2459 #ifdef BUCKETS_ROOT2
2460 	PerlIO_printf(Perl_error_log, "\n\t   ");
2461   	for (i = MIN_BUCKET + 1; i <= buffer.topbucket_odd; i += BUCKETS_PER_POW2) {
2462   		PerlIO_printf(Perl_error_log,
2463 			      ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
2464 			       ? " %5"UVuf
2465 			       : ((i < 12*BUCKETS_PER_POW2) ? " %3"UVuf : " %"UVuf)),
2466 			      buffer.nfree[i]);
2467   	}
2468 #endif
2469   	PerlIO_printf(Perl_error_log, "\n%8"IVdf" used:", buffer.total - buffer.totfree);
2470   	for (i = MIN_EVEN_REPORT; i <= buffer.topbucket; i += BUCKETS_PER_POW2) {
2471   		PerlIO_printf(Perl_error_log,
2472 			      ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
2473 			       ? " %5"IVdf
2474 			       : ((i < 12*BUCKETS_PER_POW2) ? " %3"IVdf : " %"IVdf)),
2475 			      buffer.ntotal[i] - buffer.nfree[i]);
2476   	}
2477 #ifdef BUCKETS_ROOT2
2478 	PerlIO_printf(Perl_error_log, "\n\t   ");
2479   	for (i = MIN_BUCKET + 1; i <= buffer.topbucket_odd; i += BUCKETS_PER_POW2) {
2480   		PerlIO_printf(Perl_error_log,
2481 			      ((i < 8*BUCKETS_PER_POW2 || i == 10*BUCKETS_PER_POW2)
2482 			       ? " %5"IVdf
2483 			       : ((i < 12*BUCKETS_PER_POW2) ? " %3"IVdf : " %"IVdf)),
2484 			      buffer.ntotal[i] - buffer.nfree[i]);
2485   	}
2486 #endif
2487 	PerlIO_printf(Perl_error_log, "\nTotal sbrk(): %"IVdf"/%"IVdf":%"IVdf". Odd ends: pad+heads+chain+tail: %"IVdf"+%"IVdf"+%"IVdf"+%"IVdf".\n",
2488 		      buffer.total_sbrk, buffer.sbrks, buffer.sbrk_good,
2489 		      buffer.sbrk_slack, buffer.start_slack,
2490 		      buffer.total_chain, buffer.sbrked_remains);
2491 #endif /* DEBUGGING_MSTATS */
2492 }
2493 #endif /* lint */
2494 
2495 #ifdef USE_PERL_SBRK
2496 
2497 #   if defined(__MACHTEN_PPC__) || defined(NeXT) || defined(__NeXT__) || defined(PURIFY)
2498 #      define PERL_SBRK_VIA_MALLOC
2499 #   endif
2500 
2501 #   ifdef PERL_SBRK_VIA_MALLOC
2502 
2503 /* it may seem schizophrenic to use perl's malloc and let it call system */
2504 /* malloc, the reason for that is only the 3.2 version of the OS that had */
2505 /* frequent core dumps within nxzonefreenolock. This sbrk routine put an */
2506 /* end to the cores */
2507 
2508 #      ifndef SYSTEM_ALLOC
2509 #         define SYSTEM_ALLOC(a) malloc(a)
2510 #      endif
2511 #      ifndef SYSTEM_ALLOC_ALIGNMENT
2512 #         define SYSTEM_ALLOC_ALIGNMENT MEM_ALIGNBYTES
2513 #      endif
2514 
2515 #   endif  /* PERL_SBRK_VIA_MALLOC */
2516 
2517 static IV Perl_sbrk_oldchunk;
2518 static long Perl_sbrk_oldsize;
2519 
2520 #   define PERLSBRK_32_K (1<<15)
2521 #   define PERLSBRK_64_K (1<<16)
2522 
2523 Malloc_t
2524 Perl_sbrk(int size)
2525 {
2526     IV got;
2527     int small, reqsize;
2528 
2529     if (!size) return 0;
2530 #ifdef PERL_CORE
2531     reqsize = size; /* just for the DEBUG_m statement */
2532 #endif
2533 #ifdef PACK_MALLOC
2534     size = (size + 0x7ff) & ~0x7ff;
2535 #endif
2536     if (size <= Perl_sbrk_oldsize) {
2537 	got = Perl_sbrk_oldchunk;
2538 	Perl_sbrk_oldchunk += size;
2539 	Perl_sbrk_oldsize -= size;
2540     } else {
2541       if (size >= PERLSBRK_32_K) {
2542 	small = 0;
2543       } else {
2544 	size = PERLSBRK_64_K;
2545 	small = 1;
2546       }
2547 #  if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
2548       size += NEEDED_ALIGNMENT - SYSTEM_ALLOC_ALIGNMENT;
2549 #  endif
2550       got = (IV)SYSTEM_ALLOC(size);
2551 #  if NEEDED_ALIGNMENT > SYSTEM_ALLOC_ALIGNMENT
2552       got = (got + NEEDED_ALIGNMENT - 1) & ~(NEEDED_ALIGNMENT - 1);
2553 #  endif
2554       if (small) {
2555 	/* Chunk is small, register the rest for future allocs. */
2556 	Perl_sbrk_oldchunk = got + reqsize;
2557 	Perl_sbrk_oldsize = size - reqsize;
2558       }
2559     }
2560 
2561     DEBUG_m(PerlIO_printf(Perl_debug_log, "sbrk malloc size %ld (reqsize %ld), left size %ld, give addr 0x%"UVxf"\n",
2562 		    size, reqsize, Perl_sbrk_oldsize, PTR2UV(got)));
2563 
2564     return (void *)got;
2565 }
2566 
2567 #endif /* ! defined USE_PERL_SBRK */
2568