1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright (c) 1996-1997, 2000-2001 by Sun Microsystems, Inc. 24*0Sstevel@tonic-gate * All rights reserved. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate /* Copyright (c) 1988 AT&T */ 28*0Sstevel@tonic-gate /* All Rights Reserved */ 29*0Sstevel@tonic-gate 30*0Sstevel@tonic-gate 31*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" /* SVr4.0 1.30 */ 32*0Sstevel@tonic-gate 33*0Sstevel@tonic-gate /* 34*0Sstevel@tonic-gate * Memory management: malloc(), realloc(), free(), memalign(). 35*0Sstevel@tonic-gate * 36*0Sstevel@tonic-gate * The following #-parameters may be redefined: 37*0Sstevel@tonic-gate * GETCORE: a function to get more core memory. 38*0Sstevel@tonic-gate * GETCORE(0) is assumed to return the next available 39*0Sstevel@tonic-gate * address. Default is 'sbrk'. 40*0Sstevel@tonic-gate * ERRCORE: the error code as returned by GETCORE. 41*0Sstevel@tonic-gate * Default is ((char *)(-1)). 42*0Sstevel@tonic-gate * CORESIZE: a desired unit (measured in bytes) to be used 43*0Sstevel@tonic-gate * with GETCORE. Default is (1024*ALIGN). 44*0Sstevel@tonic-gate * 45*0Sstevel@tonic-gate * This algorithm is based on a best fit strategy with lists of 46*0Sstevel@tonic-gate * free elts maintained in a self-adjusting binary tree. Each list 47*0Sstevel@tonic-gate * contains all elts of the same size. The tree is ordered by size. 48*0Sstevel@tonic-gate * For results on self-adjusting trees, see the paper: 49*0Sstevel@tonic-gate * Self-Adjusting Binary Trees, 50*0Sstevel@tonic-gate * DD Sleator & RE Tarjan, JACM 1985. 51*0Sstevel@tonic-gate * 52*0Sstevel@tonic-gate * The header of a block contains the size of the data part in bytes. 53*0Sstevel@tonic-gate * Since the size of a block is 0%4, the low two bits of the header 54*0Sstevel@tonic-gate * are free and used as follows: 55*0Sstevel@tonic-gate * 56*0Sstevel@tonic-gate * BIT0: 1 for busy (block is in use), 0 for free. 57*0Sstevel@tonic-gate * BIT1: if the block is busy, this bit is 1 if the 58*0Sstevel@tonic-gate * preceding block in contiguous memory is free. 59*0Sstevel@tonic-gate * Otherwise, it is always 0. 60*0Sstevel@tonic-gate */ 61*0Sstevel@tonic-gate 62*0Sstevel@tonic-gate #include "mallint.h" 63*0Sstevel@tonic-gate 64*0Sstevel@tonic-gate static mutex_t __watch_malloc_lock = DEFAULTMUTEX; 65*0Sstevel@tonic-gate 66*0Sstevel@tonic-gate static TREE *Root; /* root of the free tree */ 67*0Sstevel@tonic-gate static TREE *Bottom; /* the last free chunk in the arena */ 68*0Sstevel@tonic-gate static char *Baddr; /* current high address of the arena */ 69*0Sstevel@tonic-gate 70*0Sstevel@tonic-gate static void t_delete(TREE *); 71*0Sstevel@tonic-gate static void t_splay(TREE *); 72*0Sstevel@tonic-gate static void realfree(void *); 73*0Sstevel@tonic-gate static void *malloc_unlocked(size_t); 74*0Sstevel@tonic-gate static void free_unlocked(void *); 75*0Sstevel@tonic-gate static TREE *morecore(size_t); 76*0Sstevel@tonic-gate 77*0Sstevel@tonic-gate static void protect(TREE *); 78*0Sstevel@tonic-gate static void unprotect(TREE *); 79*0Sstevel@tonic-gate 80*0Sstevel@tonic-gate #define FREEPAT 0 81*0Sstevel@tonic-gate #define LIVEPAT 1 82*0Sstevel@tonic-gate 83*0Sstevel@tonic-gate /* 84*0Sstevel@tonic-gate * Patterns to be copied into freed blocks and allocated blocks. 85*0Sstevel@tonic-gate * 0xfeedbeef and 0xfeedface are invalid pointer values in all programs. 86*0Sstevel@tonic-gate */ 87*0Sstevel@tonic-gate static uint64_t patterns[2] = { 88*0Sstevel@tonic-gate 0xdeadbeefdeadbeef, /* pattern in a freed block */ 89*0Sstevel@tonic-gate 0xbaddcafebaddcafe /* pattern in an allocated block */ 90*0Sstevel@tonic-gate }; 91*0Sstevel@tonic-gate 92*0Sstevel@tonic-gate static void 93*0Sstevel@tonic-gate copy_pattern(int pat, TREE *tp) 94*0Sstevel@tonic-gate { 95*0Sstevel@tonic-gate uint64_t pattern = patterns[pat]; 96*0Sstevel@tonic-gate size_t sz = SIZE(tp) / sizeof (uint64_t); 97*0Sstevel@tonic-gate /* LINTED improper alignment */ 98*0Sstevel@tonic-gate uint64_t *datap = (uint64_t *)DATA(tp); 99*0Sstevel@tonic-gate 100*0Sstevel@tonic-gate while (sz--) 101*0Sstevel@tonic-gate *datap++ = pattern; 102*0Sstevel@tonic-gate } 103*0Sstevel@tonic-gate 104*0Sstevel@tonic-gate /* 105*0Sstevel@tonic-gate * Keep lists of small blocks, LIFO order. 106*0Sstevel@tonic-gate */ 107*0Sstevel@tonic-gate static TREE *List[MINSIZE/WORDSIZE-1]; 108*0Sstevel@tonic-gate static TREE *Last[MINSIZE/WORDSIZE-1]; 109*0Sstevel@tonic-gate 110*0Sstevel@tonic-gate /* number of blocks to get at one time */ 111*0Sstevel@tonic-gate #define NPS (WORDSIZE*8) 112*0Sstevel@tonic-gate 113*0Sstevel@tonic-gate static void * 114*0Sstevel@tonic-gate smalloc(size_t size) 115*0Sstevel@tonic-gate { 116*0Sstevel@tonic-gate TREE *tp; 117*0Sstevel@tonic-gate size_t i; 118*0Sstevel@tonic-gate 119*0Sstevel@tonic-gate ASSERT(size % WORDSIZE == 0); 120*0Sstevel@tonic-gate /* want to return a unique pointer on malloc(0) */ 121*0Sstevel@tonic-gate if (size == 0) 122*0Sstevel@tonic-gate size = WORDSIZE; 123*0Sstevel@tonic-gate 124*0Sstevel@tonic-gate /* list to use */ 125*0Sstevel@tonic-gate i = size / WORDSIZE - 1; 126*0Sstevel@tonic-gate 127*0Sstevel@tonic-gate if (List[i] == NULL) { 128*0Sstevel@tonic-gate TREE *np; 129*0Sstevel@tonic-gate int n; 130*0Sstevel@tonic-gate ASSERT((size + WORDSIZE) * NPS >= MINSIZE); 131*0Sstevel@tonic-gate 132*0Sstevel@tonic-gate /* get NPS of these block types */ 133*0Sstevel@tonic-gate if ((np = malloc_unlocked((size + WORDSIZE)*NPS)) == NULL) 134*0Sstevel@tonic-gate return (NULL); 135*0Sstevel@tonic-gate 136*0Sstevel@tonic-gate /* make them into a link list */ 137*0Sstevel@tonic-gate for (n = 0, List[i] = np; n < NPS; ++n) { 138*0Sstevel@tonic-gate tp = np; 139*0Sstevel@tonic-gate SIZE(tp) = size; 140*0Sstevel@tonic-gate copy_pattern(FREEPAT, tp); 141*0Sstevel@tonic-gate if (n == NPS - 1) { 142*0Sstevel@tonic-gate Last[i] = tp; 143*0Sstevel@tonic-gate np = NULL; 144*0Sstevel@tonic-gate } else { 145*0Sstevel@tonic-gate /* LINTED improper alignment */ 146*0Sstevel@tonic-gate np = NEXT(tp); 147*0Sstevel@tonic-gate } 148*0Sstevel@tonic-gate AFTER(tp) = np; 149*0Sstevel@tonic-gate protect(tp); 150*0Sstevel@tonic-gate } 151*0Sstevel@tonic-gate } 152*0Sstevel@tonic-gate 153*0Sstevel@tonic-gate /* allocate from the head of the queue */ 154*0Sstevel@tonic-gate tp = List[i]; 155*0Sstevel@tonic-gate unprotect(tp); 156*0Sstevel@tonic-gate if ((List[i] = AFTER(tp)) == NULL) 157*0Sstevel@tonic-gate Last[i] = NULL; 158*0Sstevel@tonic-gate copy_pattern(LIVEPAT, tp); 159*0Sstevel@tonic-gate SETBIT0(SIZE(tp)); 160*0Sstevel@tonic-gate protect(tp); 161*0Sstevel@tonic-gate return (DATA(tp)); 162*0Sstevel@tonic-gate } 163*0Sstevel@tonic-gate 164*0Sstevel@tonic-gate void * 165*0Sstevel@tonic-gate malloc(size_t size) 166*0Sstevel@tonic-gate { 167*0Sstevel@tonic-gate void *ret; 168*0Sstevel@tonic-gate _mutex_lock(&__watch_malloc_lock); 169*0Sstevel@tonic-gate ret = malloc_unlocked(size); 170*0Sstevel@tonic-gate _mutex_unlock(&__watch_malloc_lock); 171*0Sstevel@tonic-gate return (ret); 172*0Sstevel@tonic-gate } 173*0Sstevel@tonic-gate 174*0Sstevel@tonic-gate static void * 175*0Sstevel@tonic-gate malloc_unlocked(size_t size) 176*0Sstevel@tonic-gate { 177*0Sstevel@tonic-gate size_t n; 178*0Sstevel@tonic-gate TREE *tp, *sp, *tmp; 179*0Sstevel@tonic-gate 180*0Sstevel@tonic-gate COUNT(nmalloc); 181*0Sstevel@tonic-gate ASSERT(WORDSIZE == ALIGN); 182*0Sstevel@tonic-gate 183*0Sstevel@tonic-gate /* check for size that could overflow calculations */ 184*0Sstevel@tonic-gate if (size > MAX_MALLOC) { 185*0Sstevel@tonic-gate errno = ENOMEM; 186*0Sstevel@tonic-gate return (NULL); 187*0Sstevel@tonic-gate } 188*0Sstevel@tonic-gate /* make sure that size is 0 mod ALIGN */ 189*0Sstevel@tonic-gate ROUND(size); 190*0Sstevel@tonic-gate 191*0Sstevel@tonic-gate /* small blocks */ 192*0Sstevel@tonic-gate if (size < MINSIZE) 193*0Sstevel@tonic-gate return (smalloc(size)); 194*0Sstevel@tonic-gate 195*0Sstevel@tonic-gate /* search for an elt of the right size */ 196*0Sstevel@tonic-gate sp = NULL; 197*0Sstevel@tonic-gate n = 0; 198*0Sstevel@tonic-gate if (Root) { 199*0Sstevel@tonic-gate tp = Root; 200*0Sstevel@tonic-gate for (;;) { 201*0Sstevel@tonic-gate unprotect(tp); 202*0Sstevel@tonic-gate if (SIZE(tp) >= size) { /* branch left */ 203*0Sstevel@tonic-gate if (n == 0 || n >= SIZE(tp)) { 204*0Sstevel@tonic-gate sp = tp; 205*0Sstevel@tonic-gate n = SIZE(tp); 206*0Sstevel@tonic-gate } 207*0Sstevel@tonic-gate if ((tmp = LEFT(tp)) != NULL) { 208*0Sstevel@tonic-gate protect(tp); 209*0Sstevel@tonic-gate tp = tmp; 210*0Sstevel@tonic-gate } else { 211*0Sstevel@tonic-gate protect(tp); 212*0Sstevel@tonic-gate break; 213*0Sstevel@tonic-gate } 214*0Sstevel@tonic-gate } else { /* branch right */ 215*0Sstevel@tonic-gate if ((tmp = RIGHT(tp)) != NULL) { 216*0Sstevel@tonic-gate protect(tp); 217*0Sstevel@tonic-gate tp = tmp; 218*0Sstevel@tonic-gate } else { 219*0Sstevel@tonic-gate protect(tp); 220*0Sstevel@tonic-gate break; 221*0Sstevel@tonic-gate } 222*0Sstevel@tonic-gate } 223*0Sstevel@tonic-gate } 224*0Sstevel@tonic-gate 225*0Sstevel@tonic-gate if (sp) { 226*0Sstevel@tonic-gate unprotect(sp); 227*0Sstevel@tonic-gate t_delete(sp); 228*0Sstevel@tonic-gate } else if (tp != Root) { 229*0Sstevel@tonic-gate /* make the searched-to element the root */ 230*0Sstevel@tonic-gate unprotect(tp); 231*0Sstevel@tonic-gate t_splay(tp); 232*0Sstevel@tonic-gate protect(tp); 233*0Sstevel@tonic-gate Root = tp; 234*0Sstevel@tonic-gate } 235*0Sstevel@tonic-gate } 236*0Sstevel@tonic-gate 237*0Sstevel@tonic-gate /* if found none fitted in the tree */ 238*0Sstevel@tonic-gate if (sp == NULL) { 239*0Sstevel@tonic-gate if (Bottom) { 240*0Sstevel@tonic-gate unprotect(Bottom); 241*0Sstevel@tonic-gate if (size <= SIZE(Bottom)) { 242*0Sstevel@tonic-gate sp = Bottom; 243*0Sstevel@tonic-gate CLRBITS01(SIZE(sp)); 244*0Sstevel@tonic-gate } else { 245*0Sstevel@tonic-gate protect(Bottom); 246*0Sstevel@tonic-gate if ((sp = morecore(size)) == NULL) 247*0Sstevel@tonic-gate return (NULL); 248*0Sstevel@tonic-gate } 249*0Sstevel@tonic-gate } else { 250*0Sstevel@tonic-gate if ((sp = morecore(size)) == NULL) 251*0Sstevel@tonic-gate return (NULL); 252*0Sstevel@tonic-gate } 253*0Sstevel@tonic-gate } 254*0Sstevel@tonic-gate 255*0Sstevel@tonic-gate /* tell the forward neighbor that we're busy */ 256*0Sstevel@tonic-gate /* LINTED improper alignment */ 257*0Sstevel@tonic-gate tmp = NEXT(sp); 258*0Sstevel@tonic-gate unprotect(tmp); 259*0Sstevel@tonic-gate CLRBIT1(SIZE(tmp)); 260*0Sstevel@tonic-gate ASSERT(ISBIT0(SIZE(tmp))); 261*0Sstevel@tonic-gate protect(tmp); 262*0Sstevel@tonic-gate 263*0Sstevel@tonic-gate leftover: 264*0Sstevel@tonic-gate /* if the leftover is enough for a new free piece */ 265*0Sstevel@tonic-gate if ((n = (SIZE(sp) - size)) >= MINSIZE + WORDSIZE) { 266*0Sstevel@tonic-gate n -= WORDSIZE; 267*0Sstevel@tonic-gate SIZE(sp) = size; 268*0Sstevel@tonic-gate /* LINTED improper alignment */ 269*0Sstevel@tonic-gate tp = NEXT(sp); 270*0Sstevel@tonic-gate SIZE(tp) = n | BIT0; 271*0Sstevel@tonic-gate realfree(DATA(tp)); 272*0Sstevel@tonic-gate } else if (BOTTOM(sp)) 273*0Sstevel@tonic-gate Bottom = NULL; 274*0Sstevel@tonic-gate 275*0Sstevel@tonic-gate /* return the allocated space */ 276*0Sstevel@tonic-gate copy_pattern(LIVEPAT, sp); 277*0Sstevel@tonic-gate SIZE(sp) |= BIT0; 278*0Sstevel@tonic-gate protect(sp); 279*0Sstevel@tonic-gate return (DATA(sp)); 280*0Sstevel@tonic-gate } 281*0Sstevel@tonic-gate 282*0Sstevel@tonic-gate /* 283*0Sstevel@tonic-gate * realloc(). 284*0Sstevel@tonic-gate * If the block size is increasing, we try forward merging first. 285*0Sstevel@tonic-gate * This is not best-fit but it avoids some data recopying. 286*0Sstevel@tonic-gate */ 287*0Sstevel@tonic-gate void * 288*0Sstevel@tonic-gate realloc(void *old, size_t size) 289*0Sstevel@tonic-gate { 290*0Sstevel@tonic-gate TREE *tp, *np; 291*0Sstevel@tonic-gate size_t ts; 292*0Sstevel@tonic-gate char *new; 293*0Sstevel@tonic-gate 294*0Sstevel@tonic-gate COUNT(nrealloc); 295*0Sstevel@tonic-gate 296*0Sstevel@tonic-gate /* check for size that could overflow calculations */ 297*0Sstevel@tonic-gate if (size > MAX_MALLOC) { 298*0Sstevel@tonic-gate errno = ENOMEM; 299*0Sstevel@tonic-gate return (NULL); 300*0Sstevel@tonic-gate } 301*0Sstevel@tonic-gate 302*0Sstevel@tonic-gate /* pointer to the block */ 303*0Sstevel@tonic-gate _mutex_lock(&__watch_malloc_lock); 304*0Sstevel@tonic-gate if (old == NULL) { 305*0Sstevel@tonic-gate new = malloc_unlocked(size); 306*0Sstevel@tonic-gate _mutex_unlock(&__watch_malloc_lock); 307*0Sstevel@tonic-gate return (new); 308*0Sstevel@tonic-gate } 309*0Sstevel@tonic-gate 310*0Sstevel@tonic-gate /* make sure that size is 0 mod ALIGN */ 311*0Sstevel@tonic-gate ROUND(size); 312*0Sstevel@tonic-gate 313*0Sstevel@tonic-gate /* LINTED improper alignment */ 314*0Sstevel@tonic-gate tp = BLOCK(old); 315*0Sstevel@tonic-gate unprotect(tp); 316*0Sstevel@tonic-gate ts = SIZE(tp); 317*0Sstevel@tonic-gate 318*0Sstevel@tonic-gate /* if the block was freed, data has been destroyed. */ 319*0Sstevel@tonic-gate if (!ISBIT0(ts)) { 320*0Sstevel@tonic-gate /* XXX; complain here! */ 321*0Sstevel@tonic-gate protect(tp); 322*0Sstevel@tonic-gate _mutex_unlock(&__watch_malloc_lock); 323*0Sstevel@tonic-gate errno = EINVAL; 324*0Sstevel@tonic-gate return (NULL); 325*0Sstevel@tonic-gate } 326*0Sstevel@tonic-gate 327*0Sstevel@tonic-gate CLRBITS01(SIZE(tp)); 328*0Sstevel@tonic-gate if (size == SIZE(tp)) { /* nothing to do */ 329*0Sstevel@tonic-gate SIZE(tp) = ts; 330*0Sstevel@tonic-gate protect(tp); 331*0Sstevel@tonic-gate _mutex_unlock(&__watch_malloc_lock); 332*0Sstevel@tonic-gate return (old); 333*0Sstevel@tonic-gate } 334*0Sstevel@tonic-gate 335*0Sstevel@tonic-gate /* special cases involving small blocks */ 336*0Sstevel@tonic-gate if (size < MINSIZE || SIZE(tp) < MINSIZE) { 337*0Sstevel@tonic-gate if (size == 0) { 338*0Sstevel@tonic-gate SETOLD01(SIZE(tp), ts); 339*0Sstevel@tonic-gate free_unlocked(old); 340*0Sstevel@tonic-gate _mutex_unlock(&__watch_malloc_lock); 341*0Sstevel@tonic-gate return (NULL); 342*0Sstevel@tonic-gate } 343*0Sstevel@tonic-gate goto call_malloc; 344*0Sstevel@tonic-gate } 345*0Sstevel@tonic-gate 346*0Sstevel@tonic-gate /* block is increasing in size, try merging the next block */ 347*0Sstevel@tonic-gate if (size > SIZE(tp)) { 348*0Sstevel@tonic-gate /* LINTED improper alignment */ 349*0Sstevel@tonic-gate np = NEXT(tp); 350*0Sstevel@tonic-gate unprotect(np); 351*0Sstevel@tonic-gate if (ISBIT0(SIZE(np))) 352*0Sstevel@tonic-gate protect(np); 353*0Sstevel@tonic-gate else { 354*0Sstevel@tonic-gate TREE *tmp; 355*0Sstevel@tonic-gate ASSERT(SIZE(np) >= MINSIZE); 356*0Sstevel@tonic-gate ASSERT(!ISBIT1(SIZE(np))); 357*0Sstevel@tonic-gate SIZE(tp) += SIZE(np) + WORDSIZE; 358*0Sstevel@tonic-gate if (np != Bottom) 359*0Sstevel@tonic-gate t_delete(np); 360*0Sstevel@tonic-gate else 361*0Sstevel@tonic-gate Bottom = NULL; 362*0Sstevel@tonic-gate /* LINTED improper alignment */ 363*0Sstevel@tonic-gate tmp = NEXT(np); 364*0Sstevel@tonic-gate unprotect(tmp); 365*0Sstevel@tonic-gate CLRBIT1(SIZE(tmp)); 366*0Sstevel@tonic-gate protect(tmp); 367*0Sstevel@tonic-gate } 368*0Sstevel@tonic-gate 369*0Sstevel@tonic-gate /* not enough & at TRUE end of memory, try extending core */ 370*0Sstevel@tonic-gate if (size > SIZE(tp) && BOTTOM(tp) && GETCORE(0) == Baddr) { 371*0Sstevel@tonic-gate Bottom = tp; 372*0Sstevel@tonic-gate protect(Bottom); 373*0Sstevel@tonic-gate if ((tp = morecore(size)) == NULL) { 374*0Sstevel@tonic-gate tp = Bottom; 375*0Sstevel@tonic-gate Bottom = NULL; 376*0Sstevel@tonic-gate unprotect(tp); 377*0Sstevel@tonic-gate } 378*0Sstevel@tonic-gate } 379*0Sstevel@tonic-gate } 380*0Sstevel@tonic-gate 381*0Sstevel@tonic-gate /* got enough space to use */ 382*0Sstevel@tonic-gate if (size <= SIZE(tp)) { 383*0Sstevel@tonic-gate size_t n; 384*0Sstevel@tonic-gate chop_big: 385*0Sstevel@tonic-gate if ((n = (SIZE(tp) - size)) >= MINSIZE + WORDSIZE) { 386*0Sstevel@tonic-gate n -= WORDSIZE; 387*0Sstevel@tonic-gate SIZE(tp) = size; 388*0Sstevel@tonic-gate /* LINTED improper alignment */ 389*0Sstevel@tonic-gate np = NEXT(tp); 390*0Sstevel@tonic-gate SIZE(np) = n | BIT0; 391*0Sstevel@tonic-gate realfree(DATA(np)); 392*0Sstevel@tonic-gate } else if (BOTTOM(tp)) 393*0Sstevel@tonic-gate Bottom = NULL; 394*0Sstevel@tonic-gate 395*0Sstevel@tonic-gate /* the previous block may be free */ 396*0Sstevel@tonic-gate SETOLD01(SIZE(tp), ts); 397*0Sstevel@tonic-gate protect(tp); 398*0Sstevel@tonic-gate _mutex_unlock(&__watch_malloc_lock); 399*0Sstevel@tonic-gate return (old); 400*0Sstevel@tonic-gate } 401*0Sstevel@tonic-gate 402*0Sstevel@tonic-gate call_malloc: /* call malloc to get a new block */ 403*0Sstevel@tonic-gate SETOLD01(SIZE(tp), ts); 404*0Sstevel@tonic-gate if ((new = malloc_unlocked(size)) != NULL) { 405*0Sstevel@tonic-gate CLRBITS01(ts); 406*0Sstevel@tonic-gate if (ts > size) 407*0Sstevel@tonic-gate ts = size; 408*0Sstevel@tonic-gate (void) memcpy(new, old, ts); 409*0Sstevel@tonic-gate free_unlocked(old); 410*0Sstevel@tonic-gate _mutex_unlock(&__watch_malloc_lock); 411*0Sstevel@tonic-gate return (new); 412*0Sstevel@tonic-gate } 413*0Sstevel@tonic-gate 414*0Sstevel@tonic-gate /* 415*0Sstevel@tonic-gate * Attempt special case recovery allocations since malloc() failed: 416*0Sstevel@tonic-gate * 417*0Sstevel@tonic-gate * 1. size <= SIZE(tp) < MINSIZE 418*0Sstevel@tonic-gate * Simply return the existing block 419*0Sstevel@tonic-gate * 2. SIZE(tp) < size < MINSIZE 420*0Sstevel@tonic-gate * malloc() may have failed to allocate the chunk of 421*0Sstevel@tonic-gate * small blocks. Try asking for MINSIZE bytes. 422*0Sstevel@tonic-gate * 3. size < MINSIZE <= SIZE(tp) 423*0Sstevel@tonic-gate * malloc() may have failed as with 2. Change to 424*0Sstevel@tonic-gate * MINSIZE allocation which is taken from the beginning 425*0Sstevel@tonic-gate * of the current block. 426*0Sstevel@tonic-gate * 4. MINSIZE <= SIZE(tp) < size 427*0Sstevel@tonic-gate * If the previous block is free and the combination of 428*0Sstevel@tonic-gate * these two blocks has at least size bytes, then merge 429*0Sstevel@tonic-gate * the two blocks copying the existing contents backwards. 430*0Sstevel@tonic-gate */ 431*0Sstevel@tonic-gate CLRBITS01(SIZE(tp)); 432*0Sstevel@tonic-gate if (SIZE(tp) < MINSIZE) { 433*0Sstevel@tonic-gate if (size < SIZE(tp)) /* case 1. */ { 434*0Sstevel@tonic-gate SETOLD01(SIZE(tp), ts); 435*0Sstevel@tonic-gate protect(tp); 436*0Sstevel@tonic-gate _mutex_unlock(&__watch_malloc_lock); 437*0Sstevel@tonic-gate return (old); 438*0Sstevel@tonic-gate } else if (size < MINSIZE) /* case 2. */ { 439*0Sstevel@tonic-gate size = MINSIZE; 440*0Sstevel@tonic-gate goto call_malloc; 441*0Sstevel@tonic-gate } 442*0Sstevel@tonic-gate } else if (size < MINSIZE) /* case 3. */ { 443*0Sstevel@tonic-gate size = MINSIZE; 444*0Sstevel@tonic-gate goto chop_big; 445*0Sstevel@tonic-gate } else if (ISBIT1(ts)) { 446*0Sstevel@tonic-gate np = LAST(tp); 447*0Sstevel@tonic-gate unprotect(np); 448*0Sstevel@tonic-gate if ((SIZE(np) + SIZE(tp) + WORDSIZE) >= size) { 449*0Sstevel@tonic-gate ASSERT(!ISBIT0(SIZE(np))); 450*0Sstevel@tonic-gate t_delete(np); 451*0Sstevel@tonic-gate SIZE(np) += SIZE(tp) + WORDSIZE; 452*0Sstevel@tonic-gate /* 453*0Sstevel@tonic-gate * Since the copy may overlap, use memmove(). 454*0Sstevel@tonic-gate */ 455*0Sstevel@tonic-gate (void) memmove(DATA(np), old, SIZE(tp)); 456*0Sstevel@tonic-gate old = DATA(np); 457*0Sstevel@tonic-gate tp = np; 458*0Sstevel@tonic-gate CLRBIT1(ts); 459*0Sstevel@tonic-gate goto chop_big; 460*0Sstevel@tonic-gate } 461*0Sstevel@tonic-gate protect(np); 462*0Sstevel@tonic-gate } 463*0Sstevel@tonic-gate SETOLD01(SIZE(tp), ts); 464*0Sstevel@tonic-gate protect(tp); 465*0Sstevel@tonic-gate _mutex_unlock(&__watch_malloc_lock); 466*0Sstevel@tonic-gate /* malloc() sets errno */ 467*0Sstevel@tonic-gate return (NULL); 468*0Sstevel@tonic-gate } 469*0Sstevel@tonic-gate 470*0Sstevel@tonic-gate /* 471*0Sstevel@tonic-gate * realfree(). 472*0Sstevel@tonic-gate * Coalescing of adjacent free blocks is done first. 473*0Sstevel@tonic-gate * Then, the new free block is leaf-inserted into the free tree 474*0Sstevel@tonic-gate * without splaying. This strategy does not guarantee the amortized 475*0Sstevel@tonic-gate * O(nlogn) behaviour for the insert/delete/find set of operations 476*0Sstevel@tonic-gate * on the tree. In practice, however, free is much more infrequent 477*0Sstevel@tonic-gate * than malloc/realloc and the tree searches performed by these 478*0Sstevel@tonic-gate * functions adequately keep the tree in balance. 479*0Sstevel@tonic-gate */ 480*0Sstevel@tonic-gate static void 481*0Sstevel@tonic-gate realfree(void *old) 482*0Sstevel@tonic-gate { 483*0Sstevel@tonic-gate TREE *tp, *sp, *np, *tmp; 484*0Sstevel@tonic-gate size_t ts, size; 485*0Sstevel@tonic-gate 486*0Sstevel@tonic-gate COUNT(nfree); 487*0Sstevel@tonic-gate 488*0Sstevel@tonic-gate /* pointer to the block */ 489*0Sstevel@tonic-gate /* LINTED improper alignment */ 490*0Sstevel@tonic-gate tp = BLOCK(old); 491*0Sstevel@tonic-gate unprotect(tp); 492*0Sstevel@tonic-gate ts = SIZE(tp); 493*0Sstevel@tonic-gate if (!ISBIT0(ts)) { /* block is not busy; previously freed? */ 494*0Sstevel@tonic-gate protect(tp); /* force a watchpoint trap */ 495*0Sstevel@tonic-gate CLRBIT0(SIZE(tp)); 496*0Sstevel@tonic-gate return; 497*0Sstevel@tonic-gate } 498*0Sstevel@tonic-gate CLRBITS01(SIZE(tp)); 499*0Sstevel@tonic-gate copy_pattern(FREEPAT, tp); 500*0Sstevel@tonic-gate 501*0Sstevel@tonic-gate /* small block, return it to the tail of its queue */ 502*0Sstevel@tonic-gate if (SIZE(tp) < MINSIZE) { 503*0Sstevel@tonic-gate ASSERT(SIZE(tp) / WORDSIZE >= 1); 504*0Sstevel@tonic-gate ts = SIZE(tp) / WORDSIZE - 1; 505*0Sstevel@tonic-gate AFTER(tp) = NULL; 506*0Sstevel@tonic-gate protect(tp); 507*0Sstevel@tonic-gate if (List[ts] == NULL) { 508*0Sstevel@tonic-gate List[ts] = tp; 509*0Sstevel@tonic-gate Last[ts] = tp; 510*0Sstevel@tonic-gate } else { 511*0Sstevel@tonic-gate sp = Last[ts]; 512*0Sstevel@tonic-gate unprotect(sp); 513*0Sstevel@tonic-gate AFTER(sp) = tp; 514*0Sstevel@tonic-gate protect(sp); 515*0Sstevel@tonic-gate Last[ts] = tp; 516*0Sstevel@tonic-gate } 517*0Sstevel@tonic-gate return; 518*0Sstevel@tonic-gate } 519*0Sstevel@tonic-gate 520*0Sstevel@tonic-gate /* see if coalescing with next block is warranted */ 521*0Sstevel@tonic-gate /* LINTED improper alignment */ 522*0Sstevel@tonic-gate np = NEXT(tp); 523*0Sstevel@tonic-gate unprotect(np); 524*0Sstevel@tonic-gate if (ISBIT0(SIZE(np))) 525*0Sstevel@tonic-gate protect(np); 526*0Sstevel@tonic-gate else { 527*0Sstevel@tonic-gate if (np != Bottom) 528*0Sstevel@tonic-gate t_delete(np); 529*0Sstevel@tonic-gate SIZE(tp) += SIZE(np) + WORDSIZE; 530*0Sstevel@tonic-gate } 531*0Sstevel@tonic-gate 532*0Sstevel@tonic-gate /* the same with the preceding block */ 533*0Sstevel@tonic-gate if (ISBIT1(ts)) { 534*0Sstevel@tonic-gate np = LAST(tp); 535*0Sstevel@tonic-gate unprotect(np); 536*0Sstevel@tonic-gate ASSERT(!ISBIT0(SIZE(np))); 537*0Sstevel@tonic-gate ASSERT(np != Bottom); 538*0Sstevel@tonic-gate t_delete(np); 539*0Sstevel@tonic-gate SIZE(np) += SIZE(tp) + WORDSIZE; 540*0Sstevel@tonic-gate tp = np; 541*0Sstevel@tonic-gate } 542*0Sstevel@tonic-gate 543*0Sstevel@tonic-gate /* initialize tree info */ 544*0Sstevel@tonic-gate PARENT(tp) = LEFT(tp) = RIGHT(tp) = LINKFOR(tp) = NULL; 545*0Sstevel@tonic-gate 546*0Sstevel@tonic-gate /* set bottom block, or insert in the free tree */ 547*0Sstevel@tonic-gate if (BOTTOM(tp)) 548*0Sstevel@tonic-gate Bottom = tp; 549*0Sstevel@tonic-gate else { 550*0Sstevel@tonic-gate /* search for the place to insert */ 551*0Sstevel@tonic-gate if (Root) { 552*0Sstevel@tonic-gate size = SIZE(tp); 553*0Sstevel@tonic-gate np = Root; 554*0Sstevel@tonic-gate for (;;) { 555*0Sstevel@tonic-gate unprotect(np); 556*0Sstevel@tonic-gate if (SIZE(np) > size) { 557*0Sstevel@tonic-gate if ((tmp = LEFT(np)) != NULL) { 558*0Sstevel@tonic-gate protect(np); 559*0Sstevel@tonic-gate np = tmp; 560*0Sstevel@tonic-gate } else { 561*0Sstevel@tonic-gate LEFT(np) = tp; 562*0Sstevel@tonic-gate PARENT(tp) = np; 563*0Sstevel@tonic-gate protect(np); 564*0Sstevel@tonic-gate break; 565*0Sstevel@tonic-gate } 566*0Sstevel@tonic-gate } else if (SIZE(np) < size) { 567*0Sstevel@tonic-gate if ((tmp = RIGHT(np)) != NULL) { 568*0Sstevel@tonic-gate protect(np); 569*0Sstevel@tonic-gate np = tmp; 570*0Sstevel@tonic-gate } else { 571*0Sstevel@tonic-gate RIGHT(np) = tp; 572*0Sstevel@tonic-gate PARENT(tp) = np; 573*0Sstevel@tonic-gate protect(np); 574*0Sstevel@tonic-gate break; 575*0Sstevel@tonic-gate } 576*0Sstevel@tonic-gate } else { 577*0Sstevel@tonic-gate if ((sp = PARENT(np)) != NULL) { 578*0Sstevel@tonic-gate unprotect(sp); 579*0Sstevel@tonic-gate if (np == LEFT(sp)) 580*0Sstevel@tonic-gate LEFT(sp) = tp; 581*0Sstevel@tonic-gate else 582*0Sstevel@tonic-gate RIGHT(sp) = tp; 583*0Sstevel@tonic-gate PARENT(tp) = sp; 584*0Sstevel@tonic-gate protect(sp); 585*0Sstevel@tonic-gate } else 586*0Sstevel@tonic-gate Root = tp; 587*0Sstevel@tonic-gate 588*0Sstevel@tonic-gate /* insert to head of list */ 589*0Sstevel@tonic-gate if ((sp = LEFT(np)) != NULL) { 590*0Sstevel@tonic-gate unprotect(sp); 591*0Sstevel@tonic-gate PARENT(sp) = tp; 592*0Sstevel@tonic-gate protect(sp); 593*0Sstevel@tonic-gate } 594*0Sstevel@tonic-gate LEFT(tp) = sp; 595*0Sstevel@tonic-gate 596*0Sstevel@tonic-gate if ((sp = RIGHT(np)) != NULL) { 597*0Sstevel@tonic-gate unprotect(sp); 598*0Sstevel@tonic-gate PARENT(sp) = tp; 599*0Sstevel@tonic-gate protect(sp); 600*0Sstevel@tonic-gate } 601*0Sstevel@tonic-gate RIGHT(tp) = sp; 602*0Sstevel@tonic-gate 603*0Sstevel@tonic-gate /* doubly link list */ 604*0Sstevel@tonic-gate LINKFOR(tp) = np; 605*0Sstevel@tonic-gate LINKBAK(np) = tp; 606*0Sstevel@tonic-gate SETNOTREE(np); 607*0Sstevel@tonic-gate protect(np); 608*0Sstevel@tonic-gate 609*0Sstevel@tonic-gate break; 610*0Sstevel@tonic-gate } 611*0Sstevel@tonic-gate } 612*0Sstevel@tonic-gate } else { 613*0Sstevel@tonic-gate Root = tp; 614*0Sstevel@tonic-gate } 615*0Sstevel@tonic-gate } 616*0Sstevel@tonic-gate 617*0Sstevel@tonic-gate /* 618*0Sstevel@tonic-gate * Tell next block that this one is free. 619*0Sstevel@tonic-gate * The first WORD of the next block contains self's address. 620*0Sstevel@tonic-gate */ 621*0Sstevel@tonic-gate /* LINTED improper alignment */ 622*0Sstevel@tonic-gate tmp = NEXT(tp); 623*0Sstevel@tonic-gate unprotect(tmp); 624*0Sstevel@tonic-gate /* LINTED improper alignment */ 625*0Sstevel@tonic-gate *(SELFP(tp)) = tp; 626*0Sstevel@tonic-gate SETBIT1(SIZE(tmp)); 627*0Sstevel@tonic-gate ASSERT(ISBIT0(SIZE(tmp))); 628*0Sstevel@tonic-gate protect(tmp); 629*0Sstevel@tonic-gate protect(tp); 630*0Sstevel@tonic-gate } 631*0Sstevel@tonic-gate 632*0Sstevel@tonic-gate /* 633*0Sstevel@tonic-gate * Get more core. Gaps in memory are noted as busy blocks. 634*0Sstevel@tonic-gate */ 635*0Sstevel@tonic-gate static TREE * 636*0Sstevel@tonic-gate morecore(size_t size) 637*0Sstevel@tonic-gate { 638*0Sstevel@tonic-gate TREE *tp; 639*0Sstevel@tonic-gate size_t n, offset, requestsize; 640*0Sstevel@tonic-gate char *addr; 641*0Sstevel@tonic-gate 642*0Sstevel@tonic-gate /* compute new amount of memory to get */ 643*0Sstevel@tonic-gate tp = Bottom; 644*0Sstevel@tonic-gate n = size + 2 * WORDSIZE; 645*0Sstevel@tonic-gate addr = GETCORE(0); 646*0Sstevel@tonic-gate 647*0Sstevel@tonic-gate if (addr == ERRCORE) 648*0Sstevel@tonic-gate /* errno set by GETCORE sbrk */ 649*0Sstevel@tonic-gate return (NULL); 650*0Sstevel@tonic-gate 651*0Sstevel@tonic-gate /* need to pad size out so that addr is aligned */ 652*0Sstevel@tonic-gate if ((((size_t)addr) % ALIGN) != 0) 653*0Sstevel@tonic-gate offset = ALIGN - (size_t)addr % ALIGN; 654*0Sstevel@tonic-gate else 655*0Sstevel@tonic-gate offset = 0; 656*0Sstevel@tonic-gate 657*0Sstevel@tonic-gate if (tp) 658*0Sstevel@tonic-gate unprotect(tp); 659*0Sstevel@tonic-gate 660*0Sstevel@tonic-gate /* if not segmented memory, what we need may be smaller */ 661*0Sstevel@tonic-gate if (addr == Baddr) { 662*0Sstevel@tonic-gate n -= WORDSIZE; 663*0Sstevel@tonic-gate if (tp != NULL) 664*0Sstevel@tonic-gate n -= SIZE(tp); 665*0Sstevel@tonic-gate } 666*0Sstevel@tonic-gate 667*0Sstevel@tonic-gate /* get a multiple of CORESIZE */ 668*0Sstevel@tonic-gate n = ((n - 1) / CORESIZE + 1) * CORESIZE; 669*0Sstevel@tonic-gate requestsize = n + offset; 670*0Sstevel@tonic-gate 671*0Sstevel@tonic-gate /* check if nsize request could overflow in GETCORE */ 672*0Sstevel@tonic-gate if (requestsize > MAX_MALLOC - (size_t)addr) { 673*0Sstevel@tonic-gate if (tp) 674*0Sstevel@tonic-gate protect(tp); 675*0Sstevel@tonic-gate errno = ENOMEM; 676*0Sstevel@tonic-gate return (NULL); 677*0Sstevel@tonic-gate } 678*0Sstevel@tonic-gate 679*0Sstevel@tonic-gate if (requestsize > MAX_GETCORE) { 680*0Sstevel@tonic-gate intptr_t delta; 681*0Sstevel@tonic-gate /* 682*0Sstevel@tonic-gate * the value required is too big for GETCORE() to deal with 683*0Sstevel@tonic-gate * in one go, so use GETCORE() at most 2 times instead. 684*0Sstevel@tonic-gate * Argument to GETCORE() must be multiple of ALIGN. 685*0Sstevel@tonic-gate * If not, GETCORE(-MAX_GETCORE) will not return brk point 686*0Sstevel@tonic-gate * to previous value, but will be ALIGN more. 687*0Sstevel@tonic-gate * This would leave a small hole. 688*0Sstevel@tonic-gate */ 689*0Sstevel@tonic-gate delta = MAX_GETCORE; 690*0Sstevel@tonic-gate while (delta > 0) { 691*0Sstevel@tonic-gate if (GETCORE(delta) == ERRCORE) { 692*0Sstevel@tonic-gate if (tp) 693*0Sstevel@tonic-gate protect(tp); 694*0Sstevel@tonic-gate if (addr != GETCORE(0)) 695*0Sstevel@tonic-gate (void) GETCORE(-MAX_GETCORE); 696*0Sstevel@tonic-gate return (NULL); 697*0Sstevel@tonic-gate } 698*0Sstevel@tonic-gate requestsize -= MAX_GETCORE; 699*0Sstevel@tonic-gate delta = requestsize; 700*0Sstevel@tonic-gate } 701*0Sstevel@tonic-gate } else if (GETCORE(requestsize) == ERRCORE) { 702*0Sstevel@tonic-gate if (tp) 703*0Sstevel@tonic-gate protect(tp); 704*0Sstevel@tonic-gate return (NULL); 705*0Sstevel@tonic-gate } 706*0Sstevel@tonic-gate 707*0Sstevel@tonic-gate /* contiguous memory */ 708*0Sstevel@tonic-gate if (addr == Baddr) { 709*0Sstevel@tonic-gate ASSERT(offset == 0); 710*0Sstevel@tonic-gate if (tp) { 711*0Sstevel@tonic-gate addr = ((char *)tp); 712*0Sstevel@tonic-gate n += SIZE(tp) + 2 * WORDSIZE; 713*0Sstevel@tonic-gate } else { 714*0Sstevel@tonic-gate addr = Baddr - WORDSIZE; 715*0Sstevel@tonic-gate n += WORDSIZE; 716*0Sstevel@tonic-gate } 717*0Sstevel@tonic-gate } else { 718*0Sstevel@tonic-gate addr += offset; 719*0Sstevel@tonic-gate } 720*0Sstevel@tonic-gate 721*0Sstevel@tonic-gate /* new bottom address */ 722*0Sstevel@tonic-gate Baddr = addr + n; 723*0Sstevel@tonic-gate 724*0Sstevel@tonic-gate /* new bottom block */ 725*0Sstevel@tonic-gate /* LINTED improper alignment */ 726*0Sstevel@tonic-gate tp = ((TREE *)addr); 727*0Sstevel@tonic-gate SIZE(tp) = n - 2 * WORDSIZE; 728*0Sstevel@tonic-gate ASSERT((SIZE(tp) % ALIGN) == 0); 729*0Sstevel@tonic-gate 730*0Sstevel@tonic-gate /* reserved the last word to head any noncontiguous memory */ 731*0Sstevel@tonic-gate /* LINTED improper alignment */ 732*0Sstevel@tonic-gate SETBIT0(SIZE(NEXT(tp))); 733*0Sstevel@tonic-gate 734*0Sstevel@tonic-gate /* non-contiguous memory, free old bottom block */ 735*0Sstevel@tonic-gate if (Bottom && Bottom != tp) { 736*0Sstevel@tonic-gate SETBIT0(SIZE(Bottom)); 737*0Sstevel@tonic-gate realfree(DATA(Bottom)); 738*0Sstevel@tonic-gate } 739*0Sstevel@tonic-gate 740*0Sstevel@tonic-gate return (tp); 741*0Sstevel@tonic-gate } 742*0Sstevel@tonic-gate 743*0Sstevel@tonic-gate /* 744*0Sstevel@tonic-gate * Utility function to avoid protecting a tree node twice. 745*0Sstevel@tonic-gate * Return true if tp is in the NULL-terminated array of tree nodes. 746*0Sstevel@tonic-gate */ 747*0Sstevel@tonic-gate static int 748*0Sstevel@tonic-gate in_list(TREE *tp, TREE **npp) 749*0Sstevel@tonic-gate { 750*0Sstevel@tonic-gate TREE *sp; 751*0Sstevel@tonic-gate 752*0Sstevel@tonic-gate while ((sp = *npp++) != NULL) 753*0Sstevel@tonic-gate if (tp == sp) 754*0Sstevel@tonic-gate return (1); 755*0Sstevel@tonic-gate return (0); 756*0Sstevel@tonic-gate } 757*0Sstevel@tonic-gate 758*0Sstevel@tonic-gate /* 759*0Sstevel@tonic-gate * Tree rotation functions (BU: bottom-up, TD: top-down). 760*0Sstevel@tonic-gate * All functions are entered with the arguments unprotected. 761*0Sstevel@tonic-gate * They must return in the same condition, with all other elements 762*0Sstevel@tonic-gate * that have been unprotected during the operation re-protected. 763*0Sstevel@tonic-gate */ 764*0Sstevel@tonic-gate static void 765*0Sstevel@tonic-gate LEFT1(TREE *x, TREE *y) 766*0Sstevel@tonic-gate { 767*0Sstevel@tonic-gate TREE *node[3]; 768*0Sstevel@tonic-gate TREE **npp = node; 769*0Sstevel@tonic-gate TREE *tp; 770*0Sstevel@tonic-gate 771*0Sstevel@tonic-gate if ((RIGHT(x) = LEFT(y)) != NULL) { 772*0Sstevel@tonic-gate unprotect(*npp++ = RIGHT(x)); 773*0Sstevel@tonic-gate PARENT(RIGHT(x)) = x; 774*0Sstevel@tonic-gate } 775*0Sstevel@tonic-gate if ((PARENT(y) = PARENT(x)) != NULL) { 776*0Sstevel@tonic-gate unprotect(*npp++ = PARENT(x)); 777*0Sstevel@tonic-gate if (LEFT(PARENT(x)) == x) 778*0Sstevel@tonic-gate LEFT(PARENT(y)) = y; 779*0Sstevel@tonic-gate else 780*0Sstevel@tonic-gate RIGHT(PARENT(y)) = y; 781*0Sstevel@tonic-gate } 782*0Sstevel@tonic-gate LEFT(y) = x; 783*0Sstevel@tonic-gate PARENT(x) = y; 784*0Sstevel@tonic-gate 785*0Sstevel@tonic-gate *npp = NULL; 786*0Sstevel@tonic-gate npp = node; 787*0Sstevel@tonic-gate while ((tp = *npp++) != NULL) 788*0Sstevel@tonic-gate if (tp != x && tp != y && !in_list(tp, npp)) 789*0Sstevel@tonic-gate protect(tp); 790*0Sstevel@tonic-gate } 791*0Sstevel@tonic-gate 792*0Sstevel@tonic-gate static void 793*0Sstevel@tonic-gate RIGHT1(TREE *x, TREE *y) 794*0Sstevel@tonic-gate { 795*0Sstevel@tonic-gate TREE *node[3]; 796*0Sstevel@tonic-gate TREE **npp = node; 797*0Sstevel@tonic-gate TREE *tp; 798*0Sstevel@tonic-gate 799*0Sstevel@tonic-gate if ((LEFT(x) = RIGHT(y)) != NULL) { 800*0Sstevel@tonic-gate unprotect(*npp++ = LEFT(x)); 801*0Sstevel@tonic-gate PARENT(LEFT(x)) = x; 802*0Sstevel@tonic-gate } 803*0Sstevel@tonic-gate if ((PARENT(y) = PARENT(x)) != NULL) { 804*0Sstevel@tonic-gate unprotect(*npp++ = PARENT(x)); 805*0Sstevel@tonic-gate if (LEFT(PARENT(x)) == x) 806*0Sstevel@tonic-gate LEFT(PARENT(y)) = y; 807*0Sstevel@tonic-gate else 808*0Sstevel@tonic-gate RIGHT(PARENT(y)) = y; 809*0Sstevel@tonic-gate } 810*0Sstevel@tonic-gate RIGHT(y) = x; 811*0Sstevel@tonic-gate PARENT(x) = y; 812*0Sstevel@tonic-gate 813*0Sstevel@tonic-gate *npp = NULL; 814*0Sstevel@tonic-gate npp = node; 815*0Sstevel@tonic-gate while ((tp = *npp++) != NULL) 816*0Sstevel@tonic-gate if (tp != x && tp != y && !in_list(tp, npp)) 817*0Sstevel@tonic-gate protect(tp); 818*0Sstevel@tonic-gate } 819*0Sstevel@tonic-gate 820*0Sstevel@tonic-gate static void 821*0Sstevel@tonic-gate BULEFT2(TREE *x, TREE *y, TREE *z) 822*0Sstevel@tonic-gate { 823*0Sstevel@tonic-gate TREE *node[4]; 824*0Sstevel@tonic-gate TREE **npp = node; 825*0Sstevel@tonic-gate TREE *tp; 826*0Sstevel@tonic-gate 827*0Sstevel@tonic-gate if ((RIGHT(x) = LEFT(y)) != NULL) { 828*0Sstevel@tonic-gate unprotect(*npp++ = RIGHT(x)); 829*0Sstevel@tonic-gate PARENT(RIGHT(x)) = x; 830*0Sstevel@tonic-gate } 831*0Sstevel@tonic-gate if ((RIGHT(y) = LEFT(z)) != NULL) { 832*0Sstevel@tonic-gate unprotect(*npp++ = RIGHT(y)); 833*0Sstevel@tonic-gate PARENT(RIGHT(y)) = y; 834*0Sstevel@tonic-gate } 835*0Sstevel@tonic-gate if ((PARENT(z) = PARENT(x)) != NULL) { 836*0Sstevel@tonic-gate unprotect(*npp++ = PARENT(x)); 837*0Sstevel@tonic-gate if (LEFT(PARENT(x)) == x) 838*0Sstevel@tonic-gate LEFT(PARENT(z)) = z; 839*0Sstevel@tonic-gate else 840*0Sstevel@tonic-gate RIGHT(PARENT(z)) = z; 841*0Sstevel@tonic-gate } 842*0Sstevel@tonic-gate LEFT(z) = y; 843*0Sstevel@tonic-gate PARENT(y) = z; 844*0Sstevel@tonic-gate LEFT(y) = x; 845*0Sstevel@tonic-gate PARENT(x) = y; 846*0Sstevel@tonic-gate 847*0Sstevel@tonic-gate *npp = NULL; 848*0Sstevel@tonic-gate npp = node; 849*0Sstevel@tonic-gate while ((tp = *npp++) != NULL) 850*0Sstevel@tonic-gate if (tp != x && tp != y && tp != z && !in_list(tp, npp)) 851*0Sstevel@tonic-gate protect(tp); 852*0Sstevel@tonic-gate } 853*0Sstevel@tonic-gate 854*0Sstevel@tonic-gate static void 855*0Sstevel@tonic-gate BURIGHT2(TREE *x, TREE *y, TREE *z) 856*0Sstevel@tonic-gate { 857*0Sstevel@tonic-gate TREE *node[4]; 858*0Sstevel@tonic-gate TREE **npp = node; 859*0Sstevel@tonic-gate TREE *tp; 860*0Sstevel@tonic-gate 861*0Sstevel@tonic-gate if ((LEFT(x) = RIGHT(y)) != NULL) { 862*0Sstevel@tonic-gate unprotect(*npp++ = LEFT(x)); 863*0Sstevel@tonic-gate PARENT(LEFT(x)) = x; 864*0Sstevel@tonic-gate } 865*0Sstevel@tonic-gate if ((LEFT(y) = RIGHT(z)) != NULL) { 866*0Sstevel@tonic-gate unprotect(*npp++ = LEFT(y)); 867*0Sstevel@tonic-gate PARENT(LEFT(y)) = y; 868*0Sstevel@tonic-gate } 869*0Sstevel@tonic-gate if ((PARENT(z) = PARENT(x)) != NULL) { 870*0Sstevel@tonic-gate unprotect(*npp++ = PARENT(x)); 871*0Sstevel@tonic-gate if (LEFT(PARENT(x)) == x) 872*0Sstevel@tonic-gate LEFT(PARENT(z)) = z; 873*0Sstevel@tonic-gate else 874*0Sstevel@tonic-gate RIGHT(PARENT(z)) = z; 875*0Sstevel@tonic-gate } 876*0Sstevel@tonic-gate RIGHT(z) = y; 877*0Sstevel@tonic-gate PARENT(y) = z; 878*0Sstevel@tonic-gate RIGHT(y) = x; 879*0Sstevel@tonic-gate PARENT(x) = y; 880*0Sstevel@tonic-gate 881*0Sstevel@tonic-gate *npp = NULL; 882*0Sstevel@tonic-gate npp = node; 883*0Sstevel@tonic-gate while ((tp = *npp++) != NULL) 884*0Sstevel@tonic-gate if (tp != x && tp != y && tp != z && !in_list(tp, npp)) 885*0Sstevel@tonic-gate protect(tp); 886*0Sstevel@tonic-gate } 887*0Sstevel@tonic-gate 888*0Sstevel@tonic-gate static void 889*0Sstevel@tonic-gate TDLEFT2(TREE *x, TREE *y, TREE *z) 890*0Sstevel@tonic-gate { 891*0Sstevel@tonic-gate TREE *node[3]; 892*0Sstevel@tonic-gate TREE **npp = node; 893*0Sstevel@tonic-gate TREE *tp; 894*0Sstevel@tonic-gate 895*0Sstevel@tonic-gate if ((RIGHT(y) = LEFT(z)) != NULL) { 896*0Sstevel@tonic-gate unprotect(*npp++ = RIGHT(y)); 897*0Sstevel@tonic-gate PARENT(RIGHT(y)) = y; 898*0Sstevel@tonic-gate } 899*0Sstevel@tonic-gate if ((PARENT(z) = PARENT(x)) != NULL) { 900*0Sstevel@tonic-gate unprotect(*npp++ = PARENT(x)); 901*0Sstevel@tonic-gate if (LEFT(PARENT(x)) == x) 902*0Sstevel@tonic-gate LEFT(PARENT(z)) = z; 903*0Sstevel@tonic-gate else 904*0Sstevel@tonic-gate RIGHT(PARENT(z)) = z; 905*0Sstevel@tonic-gate } 906*0Sstevel@tonic-gate PARENT(x) = z; 907*0Sstevel@tonic-gate LEFT(z) = x; 908*0Sstevel@tonic-gate 909*0Sstevel@tonic-gate *npp = NULL; 910*0Sstevel@tonic-gate npp = node; 911*0Sstevel@tonic-gate while ((tp = *npp++) != NULL) 912*0Sstevel@tonic-gate if (tp != x && tp != y && tp != z && !in_list(tp, npp)) 913*0Sstevel@tonic-gate protect(tp); 914*0Sstevel@tonic-gate } 915*0Sstevel@tonic-gate 916*0Sstevel@tonic-gate #if 0 /* Not used, for now */ 917*0Sstevel@tonic-gate static void 918*0Sstevel@tonic-gate TDRIGHT2(TREE *x, TREE *y, TREE *z) 919*0Sstevel@tonic-gate { 920*0Sstevel@tonic-gate TREE *node[3]; 921*0Sstevel@tonic-gate TREE **npp = node; 922*0Sstevel@tonic-gate TREE *tp; 923*0Sstevel@tonic-gate 924*0Sstevel@tonic-gate if ((LEFT(y) = RIGHT(z)) != NULL) { 925*0Sstevel@tonic-gate unprotect(*npp++ = LEFT(y)); 926*0Sstevel@tonic-gate PARENT(LEFT(y)) = y; 927*0Sstevel@tonic-gate } 928*0Sstevel@tonic-gate if ((PARENT(z) = PARENT(x)) != NULL) { 929*0Sstevel@tonic-gate unprotect(*npp++ = PARENT(x)); 930*0Sstevel@tonic-gate if (LEFT(PARENT(x)) == x) 931*0Sstevel@tonic-gate LEFT(PARENT(z)) = z; 932*0Sstevel@tonic-gate else 933*0Sstevel@tonic-gate RIGHT(PARENT(z)) = z; 934*0Sstevel@tonic-gate } 935*0Sstevel@tonic-gate PARENT(x) = z; 936*0Sstevel@tonic-gate RIGHT(z) = x; 937*0Sstevel@tonic-gate 938*0Sstevel@tonic-gate *npp = NULL; 939*0Sstevel@tonic-gate npp = node; 940*0Sstevel@tonic-gate while ((tp = *npp++) != NULL) 941*0Sstevel@tonic-gate if (tp != x && tp != y && tp != z && !in_list(tp, npp)) 942*0Sstevel@tonic-gate protect(tp); 943*0Sstevel@tonic-gate } 944*0Sstevel@tonic-gate #endif 945*0Sstevel@tonic-gate 946*0Sstevel@tonic-gate /* 947*0Sstevel@tonic-gate * Delete a tree element 948*0Sstevel@tonic-gate */ 949*0Sstevel@tonic-gate static void 950*0Sstevel@tonic-gate t_delete(TREE *op) 951*0Sstevel@tonic-gate { 952*0Sstevel@tonic-gate TREE *tp, *sp, *gp; 953*0Sstevel@tonic-gate 954*0Sstevel@tonic-gate /* if this is a non-tree node */ 955*0Sstevel@tonic-gate if (ISNOTREE(op)) { 956*0Sstevel@tonic-gate tp = LINKBAK(op); 957*0Sstevel@tonic-gate unprotect(tp); 958*0Sstevel@tonic-gate if ((sp = LINKFOR(op)) != NULL) { 959*0Sstevel@tonic-gate unprotect(sp); 960*0Sstevel@tonic-gate LINKBAK(sp) = tp; 961*0Sstevel@tonic-gate protect(sp); 962*0Sstevel@tonic-gate } 963*0Sstevel@tonic-gate LINKFOR(tp) = sp; 964*0Sstevel@tonic-gate protect(tp); 965*0Sstevel@tonic-gate return; 966*0Sstevel@tonic-gate } 967*0Sstevel@tonic-gate 968*0Sstevel@tonic-gate /* make op the root of the tree */ 969*0Sstevel@tonic-gate if (PARENT(op)) 970*0Sstevel@tonic-gate t_splay(op); 971*0Sstevel@tonic-gate 972*0Sstevel@tonic-gate /* if this is the start of a list */ 973*0Sstevel@tonic-gate if ((tp = LINKFOR(op)) != NULL) { 974*0Sstevel@tonic-gate unprotect(tp); 975*0Sstevel@tonic-gate PARENT(tp) = NULL; 976*0Sstevel@tonic-gate if ((sp = LEFT(op)) != NULL) { 977*0Sstevel@tonic-gate unprotect(sp); 978*0Sstevel@tonic-gate PARENT(sp) = tp; 979*0Sstevel@tonic-gate protect(sp); 980*0Sstevel@tonic-gate } 981*0Sstevel@tonic-gate LEFT(tp) = sp; 982*0Sstevel@tonic-gate 983*0Sstevel@tonic-gate if ((sp = RIGHT(op)) != NULL) { 984*0Sstevel@tonic-gate unprotect(sp); 985*0Sstevel@tonic-gate PARENT(sp) = tp; 986*0Sstevel@tonic-gate protect(sp); 987*0Sstevel@tonic-gate } 988*0Sstevel@tonic-gate RIGHT(tp) = sp; 989*0Sstevel@tonic-gate 990*0Sstevel@tonic-gate Root = tp; 991*0Sstevel@tonic-gate protect(tp); 992*0Sstevel@tonic-gate return; 993*0Sstevel@tonic-gate } 994*0Sstevel@tonic-gate 995*0Sstevel@tonic-gate /* if op has a non-null left subtree */ 996*0Sstevel@tonic-gate if ((tp = LEFT(op)) != NULL) { 997*0Sstevel@tonic-gate unprotect(tp); 998*0Sstevel@tonic-gate PARENT(tp) = NULL; 999*0Sstevel@tonic-gate if (RIGHT(op)) { 1000*0Sstevel@tonic-gate /* make the right-end of the left subtree its root */ 1001*0Sstevel@tonic-gate while ((sp = RIGHT(tp)) != NULL) { 1002*0Sstevel@tonic-gate unprotect(sp); 1003*0Sstevel@tonic-gate if ((gp = RIGHT(sp)) != NULL) { 1004*0Sstevel@tonic-gate unprotect(gp); 1005*0Sstevel@tonic-gate TDLEFT2(tp, sp, gp); 1006*0Sstevel@tonic-gate protect(sp); 1007*0Sstevel@tonic-gate protect(tp); 1008*0Sstevel@tonic-gate tp = gp; 1009*0Sstevel@tonic-gate } else { 1010*0Sstevel@tonic-gate LEFT1(tp, sp); 1011*0Sstevel@tonic-gate protect(tp); 1012*0Sstevel@tonic-gate tp = sp; 1013*0Sstevel@tonic-gate } 1014*0Sstevel@tonic-gate } 1015*0Sstevel@tonic-gate 1016*0Sstevel@tonic-gate /* hook the right subtree of op to the above elt */ 1017*0Sstevel@tonic-gate RIGHT(tp) = sp = RIGHT(op); 1018*0Sstevel@tonic-gate unprotect(sp); 1019*0Sstevel@tonic-gate PARENT(sp) = tp; 1020*0Sstevel@tonic-gate protect(sp); 1021*0Sstevel@tonic-gate } 1022*0Sstevel@tonic-gate protect(tp); 1023*0Sstevel@tonic-gate } else if ((tp = RIGHT(op)) != NULL) { /* no left subtree */ 1024*0Sstevel@tonic-gate unprotect(tp); 1025*0Sstevel@tonic-gate PARENT(tp) = NULL; 1026*0Sstevel@tonic-gate protect(tp); 1027*0Sstevel@tonic-gate } 1028*0Sstevel@tonic-gate 1029*0Sstevel@tonic-gate Root = tp; 1030*0Sstevel@tonic-gate } 1031*0Sstevel@tonic-gate 1032*0Sstevel@tonic-gate /* 1033*0Sstevel@tonic-gate * Bottom up splaying (simple version). 1034*0Sstevel@tonic-gate * The basic idea is to roughly cut in half the 1035*0Sstevel@tonic-gate * path from Root to tp and make tp the new root. 1036*0Sstevel@tonic-gate */ 1037*0Sstevel@tonic-gate static void 1038*0Sstevel@tonic-gate t_splay(TREE *tp) 1039*0Sstevel@tonic-gate { 1040*0Sstevel@tonic-gate TREE *pp, *gp; 1041*0Sstevel@tonic-gate 1042*0Sstevel@tonic-gate /* iterate until tp is the root */ 1043*0Sstevel@tonic-gate while ((pp = PARENT(tp)) != NULL) { 1044*0Sstevel@tonic-gate unprotect(pp); 1045*0Sstevel@tonic-gate /* grandparent of tp */ 1046*0Sstevel@tonic-gate gp = PARENT(pp); 1047*0Sstevel@tonic-gate if (gp) 1048*0Sstevel@tonic-gate unprotect(gp); 1049*0Sstevel@tonic-gate 1050*0Sstevel@tonic-gate /* x is a left child */ 1051*0Sstevel@tonic-gate if (LEFT(pp) == tp) { 1052*0Sstevel@tonic-gate if (gp && LEFT(gp) == pp) { 1053*0Sstevel@tonic-gate BURIGHT2(gp, pp, tp); 1054*0Sstevel@tonic-gate protect(gp); 1055*0Sstevel@tonic-gate } else { 1056*0Sstevel@tonic-gate if (gp) 1057*0Sstevel@tonic-gate protect(gp); 1058*0Sstevel@tonic-gate RIGHT1(pp, tp); 1059*0Sstevel@tonic-gate } 1060*0Sstevel@tonic-gate } else { 1061*0Sstevel@tonic-gate ASSERT(RIGHT(pp) == tp); 1062*0Sstevel@tonic-gate if (gp && RIGHT(gp) == pp) { 1063*0Sstevel@tonic-gate BULEFT2(gp, pp, tp); 1064*0Sstevel@tonic-gate protect(gp); 1065*0Sstevel@tonic-gate } else { 1066*0Sstevel@tonic-gate if (gp) 1067*0Sstevel@tonic-gate protect(gp); 1068*0Sstevel@tonic-gate LEFT1(pp, tp); 1069*0Sstevel@tonic-gate } 1070*0Sstevel@tonic-gate } 1071*0Sstevel@tonic-gate protect(pp); 1072*0Sstevel@tonic-gate unprotect(tp); /* just in case */ 1073*0Sstevel@tonic-gate } 1074*0Sstevel@tonic-gate } 1075*0Sstevel@tonic-gate 1076*0Sstevel@tonic-gate void 1077*0Sstevel@tonic-gate free(void *old) 1078*0Sstevel@tonic-gate { 1079*0Sstevel@tonic-gate _mutex_lock(&__watch_malloc_lock); 1080*0Sstevel@tonic-gate free_unlocked(old); 1081*0Sstevel@tonic-gate _mutex_unlock(&__watch_malloc_lock); 1082*0Sstevel@tonic-gate } 1083*0Sstevel@tonic-gate 1084*0Sstevel@tonic-gate 1085*0Sstevel@tonic-gate static void 1086*0Sstevel@tonic-gate free_unlocked(void *old) 1087*0Sstevel@tonic-gate { 1088*0Sstevel@tonic-gate if (old != NULL) 1089*0Sstevel@tonic-gate realfree(old); 1090*0Sstevel@tonic-gate } 1091*0Sstevel@tonic-gate 1092*0Sstevel@tonic-gate 1093*0Sstevel@tonic-gate /* 1094*0Sstevel@tonic-gate * memalign(align,nbytes) 1095*0Sstevel@tonic-gate * 1096*0Sstevel@tonic-gate * Description: 1097*0Sstevel@tonic-gate * Returns a block of specified size on a specified alignment boundary. 1098*0Sstevel@tonic-gate * 1099*0Sstevel@tonic-gate * Algorithm: 1100*0Sstevel@tonic-gate * Malloc enough to ensure that a block can be aligned correctly. 1101*0Sstevel@tonic-gate * Find the alignment point and return the fragments 1102*0Sstevel@tonic-gate * before and after the block. 1103*0Sstevel@tonic-gate * 1104*0Sstevel@tonic-gate * Errors: 1105*0Sstevel@tonic-gate * Returns NULL and sets errno as follows: 1106*0Sstevel@tonic-gate * [EINVAL] 1107*0Sstevel@tonic-gate * if nbytes = 0, 1108*0Sstevel@tonic-gate * or if alignment is misaligned, 1109*0Sstevel@tonic-gate * or if the heap has been detectably corrupted. 1110*0Sstevel@tonic-gate * [ENOMEM] 1111*0Sstevel@tonic-gate * if the requested memory could not be allocated. 1112*0Sstevel@tonic-gate */ 1113*0Sstevel@tonic-gate 1114*0Sstevel@tonic-gate #pragma weak memalign = _memalign 1115*0Sstevel@tonic-gate 1116*0Sstevel@tonic-gate #define misaligned(p) ((unsigned)(p) & 3) 1117*0Sstevel@tonic-gate /* 4-byte "word" alignment is considered ok in LP64 */ 1118*0Sstevel@tonic-gate #define nextblk(p, size) ((TREE *)((char *)(p) + (size))) 1119*0Sstevel@tonic-gate 1120*0Sstevel@tonic-gate void * 1121*0Sstevel@tonic-gate _memalign(size_t align, size_t nbytes) 1122*0Sstevel@tonic-gate { 1123*0Sstevel@tonic-gate size_t reqsize; /* Num of bytes to get from malloc() */ 1124*0Sstevel@tonic-gate TREE *p; /* Ptr returned from malloc() */ 1125*0Sstevel@tonic-gate TREE *blk; /* For addressing fragment blocks */ 1126*0Sstevel@tonic-gate size_t blksize; /* Current (shrinking) block size */ 1127*0Sstevel@tonic-gate TREE *alignedp; /* Ptr to properly aligned boundary */ 1128*0Sstevel@tonic-gate TREE *aligned_blk; /* The block to be returned */ 1129*0Sstevel@tonic-gate size_t frag_size; /* size of fragments fore and aft */ 1130*0Sstevel@tonic-gate size_t x; 1131*0Sstevel@tonic-gate 1132*0Sstevel@tonic-gate /* 1133*0Sstevel@tonic-gate * check for valid size and alignment parameters 1134*0Sstevel@tonic-gate * MAX_ALIGN check prevents overflow in later calculation. 1135*0Sstevel@tonic-gate */ 1136*0Sstevel@tonic-gate if (nbytes == 0 || misaligned(align) || align == 0 || 1137*0Sstevel@tonic-gate align > MAX_ALIGN) { 1138*0Sstevel@tonic-gate errno = EINVAL; 1139*0Sstevel@tonic-gate return (NULL); 1140*0Sstevel@tonic-gate } 1141*0Sstevel@tonic-gate 1142*0Sstevel@tonic-gate /* 1143*0Sstevel@tonic-gate * Malloc enough memory to guarantee that the result can be 1144*0Sstevel@tonic-gate * aligned correctly. The worst case is when malloc returns 1145*0Sstevel@tonic-gate * a block so close to the next alignment boundary that a 1146*0Sstevel@tonic-gate * fragment of minimum size cannot be created. In order to 1147*0Sstevel@tonic-gate * make sure we can handle this, we need to force the 1148*0Sstevel@tonic-gate * alignment to be at least as large as the minimum frag size 1149*0Sstevel@tonic-gate * (MINSIZE + WORDSIZE). 1150*0Sstevel@tonic-gate */ 1151*0Sstevel@tonic-gate 1152*0Sstevel@tonic-gate /* check for size that could overflow ROUND calculation */ 1153*0Sstevel@tonic-gate if (nbytes > MAX_MALLOC) { 1154*0Sstevel@tonic-gate errno = ENOMEM; 1155*0Sstevel@tonic-gate return (NULL); 1156*0Sstevel@tonic-gate } 1157*0Sstevel@tonic-gate ROUND(nbytes); 1158*0Sstevel@tonic-gate if (nbytes < MINSIZE) 1159*0Sstevel@tonic-gate nbytes = MINSIZE; 1160*0Sstevel@tonic-gate ROUND(align); 1161*0Sstevel@tonic-gate while (align < MINSIZE + WORDSIZE) 1162*0Sstevel@tonic-gate align <<= 1; 1163*0Sstevel@tonic-gate reqsize = nbytes + align + (MINSIZE + WORDSIZE); 1164*0Sstevel@tonic-gate /* check for overflow */ 1165*0Sstevel@tonic-gate if (reqsize < nbytes) { 1166*0Sstevel@tonic-gate errno = ENOMEM; 1167*0Sstevel@tonic-gate return (NULL); 1168*0Sstevel@tonic-gate } 1169*0Sstevel@tonic-gate p = (TREE *) malloc(reqsize); 1170*0Sstevel@tonic-gate if (p == (TREE *) NULL) { 1171*0Sstevel@tonic-gate /* malloc sets errno */ 1172*0Sstevel@tonic-gate return (NULL); 1173*0Sstevel@tonic-gate } 1174*0Sstevel@tonic-gate _mutex_lock(&__watch_malloc_lock); 1175*0Sstevel@tonic-gate 1176*0Sstevel@tonic-gate /* 1177*0Sstevel@tonic-gate * get size of the entire block (overhead and all) 1178*0Sstevel@tonic-gate */ 1179*0Sstevel@tonic-gate /* LINTED improper alignment */ 1180*0Sstevel@tonic-gate blk = BLOCK(p); /* back up to get length word */ 1181*0Sstevel@tonic-gate unprotect(blk); 1182*0Sstevel@tonic-gate blksize = SIZE(blk); 1183*0Sstevel@tonic-gate CLRBITS01(blksize); 1184*0Sstevel@tonic-gate 1185*0Sstevel@tonic-gate /* 1186*0Sstevel@tonic-gate * locate the proper alignment boundary within the block. 1187*0Sstevel@tonic-gate */ 1188*0Sstevel@tonic-gate x = (size_t)p; 1189*0Sstevel@tonic-gate if (x % align != 0) 1190*0Sstevel@tonic-gate x += align - (x % align); 1191*0Sstevel@tonic-gate alignedp = (TREE *)x; 1192*0Sstevel@tonic-gate /* LINTED improper alignment */ 1193*0Sstevel@tonic-gate aligned_blk = BLOCK(alignedp); 1194*0Sstevel@tonic-gate 1195*0Sstevel@tonic-gate /* 1196*0Sstevel@tonic-gate * Check out the space to the left of the alignment 1197*0Sstevel@tonic-gate * boundary, and split off a fragment if necessary. 1198*0Sstevel@tonic-gate */ 1199*0Sstevel@tonic-gate frag_size = (size_t)aligned_blk - (size_t)blk; 1200*0Sstevel@tonic-gate if (frag_size != 0) { 1201*0Sstevel@tonic-gate /* 1202*0Sstevel@tonic-gate * Create a fragment to the left of the aligned block. 1203*0Sstevel@tonic-gate */ 1204*0Sstevel@tonic-gate if (frag_size < MINSIZE + WORDSIZE) { 1205*0Sstevel@tonic-gate /* 1206*0Sstevel@tonic-gate * Not enough space. So make the split 1207*0Sstevel@tonic-gate * at the other end of the alignment unit. 1208*0Sstevel@tonic-gate * We know this yields enough space, because 1209*0Sstevel@tonic-gate * we forced align >= MINSIZE + WORDSIZE above. 1210*0Sstevel@tonic-gate */ 1211*0Sstevel@tonic-gate frag_size += align; 1212*0Sstevel@tonic-gate /* LINTED improper alignment */ 1213*0Sstevel@tonic-gate aligned_blk = nextblk(aligned_blk, align); 1214*0Sstevel@tonic-gate } 1215*0Sstevel@tonic-gate blksize -= frag_size; 1216*0Sstevel@tonic-gate SIZE(aligned_blk) = blksize | BIT0; 1217*0Sstevel@tonic-gate frag_size -= WORDSIZE; 1218*0Sstevel@tonic-gate SIZE(blk) = frag_size | BIT0 | ISBIT1(SIZE(blk)); 1219*0Sstevel@tonic-gate free_unlocked(DATA(blk)); 1220*0Sstevel@tonic-gate /* 1221*0Sstevel@tonic-gate * free_unlocked(DATA(blk)) has the side-effect of calling 1222*0Sstevel@tonic-gate * protect() on the block following blk, that is, aligned_blk. 1223*0Sstevel@tonic-gate * We recover from this by unprotect()ing it here. 1224*0Sstevel@tonic-gate */ 1225*0Sstevel@tonic-gate unprotect(aligned_blk); 1226*0Sstevel@tonic-gate } 1227*0Sstevel@tonic-gate 1228*0Sstevel@tonic-gate /* 1229*0Sstevel@tonic-gate * Is there a (sufficiently large) fragment to the 1230*0Sstevel@tonic-gate * right of the aligned block? 1231*0Sstevel@tonic-gate */ 1232*0Sstevel@tonic-gate frag_size = blksize - nbytes; 1233*0Sstevel@tonic-gate if (frag_size >= MINSIZE + WORDSIZE) { 1234*0Sstevel@tonic-gate /* 1235*0Sstevel@tonic-gate * split and free a fragment on the right 1236*0Sstevel@tonic-gate */ 1237*0Sstevel@tonic-gate blksize = SIZE(aligned_blk); 1238*0Sstevel@tonic-gate SIZE(aligned_blk) = nbytes; 1239*0Sstevel@tonic-gate /* LINTED improper alignment */ 1240*0Sstevel@tonic-gate blk = NEXT(aligned_blk); 1241*0Sstevel@tonic-gate SETOLD01(SIZE(aligned_blk), blksize); 1242*0Sstevel@tonic-gate frag_size -= WORDSIZE; 1243*0Sstevel@tonic-gate SIZE(blk) = frag_size | BIT0; 1244*0Sstevel@tonic-gate free_unlocked(DATA(blk)); 1245*0Sstevel@tonic-gate } 1246*0Sstevel@tonic-gate copy_pattern(LIVEPAT, aligned_blk); 1247*0Sstevel@tonic-gate protect(aligned_blk); 1248*0Sstevel@tonic-gate _mutex_unlock(&__watch_malloc_lock); 1249*0Sstevel@tonic-gate return (DATA(aligned_blk)); 1250*0Sstevel@tonic-gate } 1251*0Sstevel@tonic-gate 1252*0Sstevel@tonic-gate #pragma weak valloc = _valloc 1253*0Sstevel@tonic-gate 1254*0Sstevel@tonic-gate void * 1255*0Sstevel@tonic-gate _valloc(size_t size) 1256*0Sstevel@tonic-gate { 1257*0Sstevel@tonic-gate static unsigned pagesize; 1258*0Sstevel@tonic-gate if (!pagesize) 1259*0Sstevel@tonic-gate pagesize = _sysconf(_SC_PAGESIZE); 1260*0Sstevel@tonic-gate return (memalign(pagesize, size)); 1261*0Sstevel@tonic-gate } 1262*0Sstevel@tonic-gate 1263*0Sstevel@tonic-gate /* 1264*0Sstevel@tonic-gate * libc does not define a weak calloc as _calloc 1265*0Sstevel@tonic-gate */ 1266*0Sstevel@tonic-gate void * 1267*0Sstevel@tonic-gate calloc(size_t num, size_t size) 1268*0Sstevel@tonic-gate { 1269*0Sstevel@tonic-gate void *mp; 1270*0Sstevel@tonic-gate size_t total; 1271*0Sstevel@tonic-gate 1272*0Sstevel@tonic-gate total = num * size; 1273*0Sstevel@tonic-gate 1274*0Sstevel@tonic-gate /* check for overflow */ 1275*0Sstevel@tonic-gate if (num != 0 && total / num != size) { 1276*0Sstevel@tonic-gate errno = ENOMEM; 1277*0Sstevel@tonic-gate return (NULL); 1278*0Sstevel@tonic-gate } 1279*0Sstevel@tonic-gate if ((mp = malloc(total)) != NULL) 1280*0Sstevel@tonic-gate (void) memset(mp, 0, total); 1281*0Sstevel@tonic-gate return (mp); 1282*0Sstevel@tonic-gate } 1283*0Sstevel@tonic-gate 1284*0Sstevel@tonic-gate #pragma weak cfree = _cfree 1285*0Sstevel@tonic-gate 1286*0Sstevel@tonic-gate /* ARGSUSED1 */ 1287*0Sstevel@tonic-gate void 1288*0Sstevel@tonic-gate _cfree(void *p, size_t num, size_t size) 1289*0Sstevel@tonic-gate { 1290*0Sstevel@tonic-gate free(p); 1291*0Sstevel@tonic-gate } 1292*0Sstevel@tonic-gate 1293*0Sstevel@tonic-gate /* 1294*0Sstevel@tonic-gate * mallopt -- Do nothing 1295*0Sstevel@tonic-gate */ 1296*0Sstevel@tonic-gate 1297*0Sstevel@tonic-gate #pragma weak mallopt = _mallopt 1298*0Sstevel@tonic-gate 1299*0Sstevel@tonic-gate /* ARGSUSED */ 1300*0Sstevel@tonic-gate int 1301*0Sstevel@tonic-gate _mallopt(int cmd, int value) 1302*0Sstevel@tonic-gate { 1303*0Sstevel@tonic-gate return (0); 1304*0Sstevel@tonic-gate } 1305*0Sstevel@tonic-gate 1306*0Sstevel@tonic-gate /* 1307*0Sstevel@tonic-gate * mallinfo -- Do nothing 1308*0Sstevel@tonic-gate */ 1309*0Sstevel@tonic-gate 1310*0Sstevel@tonic-gate #pragma weak mallinfo = _mallinfo 1311*0Sstevel@tonic-gate 1312*0Sstevel@tonic-gate struct mallinfo 1313*0Sstevel@tonic-gate _mallinfo() 1314*0Sstevel@tonic-gate { 1315*0Sstevel@tonic-gate static struct mallinfo __mallinfo; 1316*0Sstevel@tonic-gate 1317*0Sstevel@tonic-gate return (__mallinfo); 1318*0Sstevel@tonic-gate } 1319*0Sstevel@tonic-gate 1320*0Sstevel@tonic-gate 1321*0Sstevel@tonic-gate typedef struct { 1322*0Sstevel@tonic-gate long cmd; 1323*0Sstevel@tonic-gate prwatch_t prwatch; 1324*0Sstevel@tonic-gate } ctl_t; 1325*0Sstevel@tonic-gate 1326*0Sstevel@tonic-gate static pid_t my_pid = 0; /* to check for whether we fork()d */ 1327*0Sstevel@tonic-gate static int dont_watch = 0; 1328*0Sstevel@tonic-gate static int do_stop = 0; 1329*0Sstevel@tonic-gate static int ctlfd = -1; 1330*0Sstevel@tonic-gate struct stat ctlstatb; 1331*0Sstevel@tonic-gate static int wflags = WA_WRITE; 1332*0Sstevel@tonic-gate 1333*0Sstevel@tonic-gate static void 1334*0Sstevel@tonic-gate init_watch() 1335*0Sstevel@tonic-gate { 1336*0Sstevel@tonic-gate char str[80]; 1337*0Sstevel@tonic-gate char *s; 1338*0Sstevel@tonic-gate 1339*0Sstevel@tonic-gate my_pid = getpid(); 1340*0Sstevel@tonic-gate 1341*0Sstevel@tonic-gate dont_watch = 1; 1342*0Sstevel@tonic-gate 1343*0Sstevel@tonic-gate if ((s = getenv("MALLOC_DEBUG")) == NULL) 1344*0Sstevel@tonic-gate return; 1345*0Sstevel@tonic-gate 1346*0Sstevel@tonic-gate s = strncpy(str, s, sizeof (str)); 1347*0Sstevel@tonic-gate while (s != NULL) { 1348*0Sstevel@tonic-gate char *e = strchr(s, ','); 1349*0Sstevel@tonic-gate if (e) 1350*0Sstevel@tonic-gate *e++ = '\0'; 1351*0Sstevel@tonic-gate if (strcmp(s, "STOP") == 0) 1352*0Sstevel@tonic-gate do_stop = 1; 1353*0Sstevel@tonic-gate else if (strcmp(s, "WATCH") == 0) 1354*0Sstevel@tonic-gate dont_watch = 0; 1355*0Sstevel@tonic-gate else if (strcmp(s, "RW") == 0) { 1356*0Sstevel@tonic-gate dont_watch = 0; 1357*0Sstevel@tonic-gate wflags = WA_READ|WA_WRITE; 1358*0Sstevel@tonic-gate } 1359*0Sstevel@tonic-gate s = e; 1360*0Sstevel@tonic-gate } 1361*0Sstevel@tonic-gate 1362*0Sstevel@tonic-gate if (dont_watch) 1363*0Sstevel@tonic-gate return; 1364*0Sstevel@tonic-gate 1365*0Sstevel@tonic-gate if ((ctlfd = open("/proc/self/ctl", O_WRONLY)) < 0 || 1366*0Sstevel@tonic-gate fstat(ctlfd, &ctlstatb) != 0) { 1367*0Sstevel@tonic-gate if (ctlfd >= 0) 1368*0Sstevel@tonic-gate (void) close(ctlfd); 1369*0Sstevel@tonic-gate ctlfd = -1; 1370*0Sstevel@tonic-gate dont_watch = 1; 1371*0Sstevel@tonic-gate return; 1372*0Sstevel@tonic-gate } 1373*0Sstevel@tonic-gate /* close-on-exec */ 1374*0Sstevel@tonic-gate (void) fcntl(ctlfd, F_SETFD, 1); 1375*0Sstevel@tonic-gate 1376*0Sstevel@tonic-gate if (do_stop) { 1377*0Sstevel@tonic-gate int pfd; 1378*0Sstevel@tonic-gate pstatus_t pstatus; 1379*0Sstevel@tonic-gate struct { 1380*0Sstevel@tonic-gate long cmd; 1381*0Sstevel@tonic-gate fltset_t fltset; 1382*0Sstevel@tonic-gate } ctl; 1383*0Sstevel@tonic-gate 1384*0Sstevel@tonic-gate /* 1385*0Sstevel@tonic-gate * Play together with some /proc controller 1386*0Sstevel@tonic-gate * that has set other stop-on-fault flags. 1387*0Sstevel@tonic-gate */ 1388*0Sstevel@tonic-gate premptyset(&ctl.fltset); 1389*0Sstevel@tonic-gate if ((pfd = open("/proc/self/status", O_RDONLY)) >= 0) { 1390*0Sstevel@tonic-gate if (read(pfd, &pstatus, sizeof (pstatus)) 1391*0Sstevel@tonic-gate == sizeof (pstatus)) 1392*0Sstevel@tonic-gate ctl.fltset = pstatus.pr_flttrace; 1393*0Sstevel@tonic-gate (void) close(pfd); 1394*0Sstevel@tonic-gate } 1395*0Sstevel@tonic-gate praddset(&ctl.fltset, FLTWATCH); 1396*0Sstevel@tonic-gate ctl.cmd = PCSFAULT; 1397*0Sstevel@tonic-gate (void) write(ctlfd, &ctl, sizeof (ctl)); 1398*0Sstevel@tonic-gate } 1399*0Sstevel@tonic-gate } 1400*0Sstevel@tonic-gate 1401*0Sstevel@tonic-gate static int 1402*0Sstevel@tonic-gate nowatch() 1403*0Sstevel@tonic-gate { 1404*0Sstevel@tonic-gate struct stat statb; 1405*0Sstevel@tonic-gate 1406*0Sstevel@tonic-gate if (dont_watch) 1407*0Sstevel@tonic-gate return (1); 1408*0Sstevel@tonic-gate if (ctlfd < 0) /* first time */ 1409*0Sstevel@tonic-gate init_watch(); 1410*0Sstevel@tonic-gate else if (fstat(ctlfd, &statb) != 0 || 1411*0Sstevel@tonic-gate statb.st_dev != ctlstatb.st_dev || 1412*0Sstevel@tonic-gate statb.st_ino != ctlstatb.st_ino) { 1413*0Sstevel@tonic-gate /* 1414*0Sstevel@tonic-gate * Someone closed our file descriptor. 1415*0Sstevel@tonic-gate * Just open another one. 1416*0Sstevel@tonic-gate */ 1417*0Sstevel@tonic-gate if ((ctlfd = open("/proc/self/ctl", O_WRONLY)) < 0 || 1418*0Sstevel@tonic-gate fstat(ctlfd, &ctlstatb) != 0) { 1419*0Sstevel@tonic-gate if (ctlfd >= 0) 1420*0Sstevel@tonic-gate (void) close(ctlfd); 1421*0Sstevel@tonic-gate ctlfd = -1; 1422*0Sstevel@tonic-gate dont_watch = 1; 1423*0Sstevel@tonic-gate return (1); 1424*0Sstevel@tonic-gate } 1425*0Sstevel@tonic-gate /* close-on-exec */ 1426*0Sstevel@tonic-gate (void) fcntl(ctlfd, F_SETFD, 1); 1427*0Sstevel@tonic-gate } 1428*0Sstevel@tonic-gate if (my_pid != getpid()) { 1429*0Sstevel@tonic-gate /* 1430*0Sstevel@tonic-gate * We fork()d since the last call to the allocator. 1431*0Sstevel@tonic-gate * watchpoints are not inherited across fork(). 1432*0Sstevel@tonic-gate * XXX: how to recover from this ??? 1433*0Sstevel@tonic-gate */ 1434*0Sstevel@tonic-gate dont_watch = 1; 1435*0Sstevel@tonic-gate (void) close(ctlfd); 1436*0Sstevel@tonic-gate ctlfd = -1; 1437*0Sstevel@tonic-gate } 1438*0Sstevel@tonic-gate return (dont_watch); 1439*0Sstevel@tonic-gate } 1440*0Sstevel@tonic-gate 1441*0Sstevel@tonic-gate static void 1442*0Sstevel@tonic-gate protect(TREE *tp) 1443*0Sstevel@tonic-gate { 1444*0Sstevel@tonic-gate ctl_t ctl; 1445*0Sstevel@tonic-gate size_t size, sz; 1446*0Sstevel@tonic-gate 1447*0Sstevel@tonic-gate if (nowatch()) 1448*0Sstevel@tonic-gate return; 1449*0Sstevel@tonic-gate if (tp == NULL || DATA(tp) == Baddr) 1450*0Sstevel@tonic-gate return; 1451*0Sstevel@tonic-gate 1452*0Sstevel@tonic-gate sz = size = SIZE(tp); 1453*0Sstevel@tonic-gate CLRBITS01(size); 1454*0Sstevel@tonic-gate if (size == 0) 1455*0Sstevel@tonic-gate return; 1456*0Sstevel@tonic-gate if (ISBIT0(sz)) /* block is busy, protect only the head */ 1457*0Sstevel@tonic-gate size = 0; 1458*0Sstevel@tonic-gate ctl.cmd = PCWATCH; 1459*0Sstevel@tonic-gate ctl.prwatch.pr_vaddr = (uintptr_t)tp; 1460*0Sstevel@tonic-gate ctl.prwatch.pr_size = size + WORDSIZE; 1461*0Sstevel@tonic-gate ctl.prwatch.pr_wflags = wflags; 1462*0Sstevel@tonic-gate (void) write(ctlfd, &ctl, sizeof (ctl)); 1463*0Sstevel@tonic-gate } 1464*0Sstevel@tonic-gate 1465*0Sstevel@tonic-gate static void 1466*0Sstevel@tonic-gate unprotect(TREE *tp) 1467*0Sstevel@tonic-gate { 1468*0Sstevel@tonic-gate ctl_t ctl; 1469*0Sstevel@tonic-gate 1470*0Sstevel@tonic-gate if (nowatch()) 1471*0Sstevel@tonic-gate return; 1472*0Sstevel@tonic-gate if (tp == NULL || DATA(tp) == Baddr) 1473*0Sstevel@tonic-gate return; 1474*0Sstevel@tonic-gate 1475*0Sstevel@tonic-gate ctl.cmd = PCWATCH; 1476*0Sstevel@tonic-gate ctl.prwatch.pr_vaddr = (uintptr_t)tp; 1477*0Sstevel@tonic-gate ctl.prwatch.pr_size = WORDSIZE; /* size is arbitrary */ 1478*0Sstevel@tonic-gate ctl.prwatch.pr_wflags = 0; /* clear the watched area */ 1479*0Sstevel@tonic-gate (void) write(ctlfd, &ctl, sizeof (ctl)); 1480*0Sstevel@tonic-gate } 1481