1 /* $NetBSD: kern_proc.c,v 1.56 2003/01/22 12:52:16 yamt Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * Copyright (c) 1982, 1986, 1989, 1991, 1993 42 * The Regents of the University of California. All rights reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by the University of 55 * California, Berkeley and its contributors. 56 * 4. Neither the name of the University nor the names of its contributors 57 * may be used to endorse or promote products derived from this software 58 * without specific prior written permission. 59 * 60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 70 * SUCH DAMAGE. 71 * 72 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 73 */ 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: kern_proc.c,v 1.56 2003/01/22 12:52:16 yamt Exp $"); 77 78 #include "opt_kstack.h" 79 80 #include <sys/param.h> 81 #include <sys/systm.h> 82 #include <sys/kernel.h> 83 #include <sys/proc.h> 84 #include <sys/resourcevar.h> 85 #include <sys/buf.h> 86 #include <sys/acct.h> 87 #include <sys/wait.h> 88 #include <sys/file.h> 89 #include <ufs/ufs/quota.h> 90 #include <sys/uio.h> 91 #include <sys/malloc.h> 92 #include <sys/pool.h> 93 #include <sys/mbuf.h> 94 #include <sys/ioctl.h> 95 #include <sys/tty.h> 96 #include <sys/signalvar.h> 97 #include <sys/ras.h> 98 #include <sys/sa.h> 99 #include <sys/savar.h> 100 101 /* 102 * Structure associated with user cacheing. 103 */ 104 struct uidinfo { 105 LIST_ENTRY(uidinfo) ui_hash; 106 uid_t ui_uid; 107 long ui_proccnt; 108 }; 109 #define UIHASH(uid) (&uihashtbl[(uid) & uihash]) 110 LIST_HEAD(uihashhead, uidinfo) *uihashtbl; 111 u_long uihash; /* size of hash table - 1 */ 112 113 /* 114 * Other process lists 115 */ 116 struct pidhashhead *pidhashtbl; 117 u_long pidhash; 118 struct pgrphashhead *pgrphashtbl; 119 u_long pgrphash; 120 121 struct proclist allproc; 122 struct proclist zombproc; /* resources have been freed */ 123 124 125 /* 126 * Process list locking: 127 * 128 * We have two types of locks on the proclists: read locks and write 129 * locks. Read locks can be used in interrupt context, so while we 130 * hold the write lock, we must also block clock interrupts to 131 * lock out any scheduling changes that may happen in interrupt 132 * context. 133 * 134 * The proclist lock locks the following structures: 135 * 136 * allproc 137 * zombproc 138 * pidhashtbl 139 */ 140 struct lock proclist_lock; 141 142 /* 143 * Locking of this proclist is special; it's accessed in a 144 * critical section of process exit, and thus locking it can't 145 * modify interrupt state. We use a simple spin lock for this 146 * proclist. Processes on this proclist are also on zombproc; 147 * we use the p_hash member to linkup to deadproc. 148 */ 149 struct simplelock deadproc_slock; 150 struct proclist deadproc; /* dead, but not yet undead */ 151 152 struct pool proc_pool; 153 struct pool lwp_pool; 154 struct pool lwp_uc_pool; 155 struct pool pcred_pool; 156 struct pool plimit_pool; 157 struct pool pstats_pool; 158 struct pool pgrp_pool; 159 struct pool rusage_pool; 160 struct pool ras_pool; 161 struct pool sadata_pool; 162 struct pool saupcall_pool; 163 struct pool ptimer_pool; 164 165 /* 166 * The process list descriptors, used during pid allocation and 167 * by sysctl. No locking on this data structure is needed since 168 * it is completely static. 169 */ 170 const struct proclist_desc proclists[] = { 171 { &allproc }, 172 { &zombproc }, 173 { NULL }, 174 }; 175 176 static void orphanpg __P((struct pgrp *)); 177 #ifdef DEBUG 178 void pgrpdump __P((void)); 179 #endif 180 181 /* 182 * Initialize global process hashing structures. 183 */ 184 void 185 procinit() 186 { 187 const struct proclist_desc *pd; 188 189 for (pd = proclists; pd->pd_list != NULL; pd++) 190 LIST_INIT(pd->pd_list); 191 192 spinlockinit(&proclist_lock, "proclk", 0); 193 194 LIST_INIT(&deadproc); 195 simple_lock_init(&deadproc_slock); 196 197 LIST_INIT(&alllwp); 198 LIST_INIT(&deadlwp); 199 LIST_INIT(&zomblwp); 200 201 pidhashtbl = 202 hashinit(maxproc / 4, HASH_LIST, M_PROC, M_WAITOK, &pidhash); 203 pgrphashtbl = 204 hashinit(maxproc / 4, HASH_LIST, M_PROC, M_WAITOK, &pgrphash); 205 uihashtbl = 206 hashinit(maxproc / 16, HASH_LIST, M_PROC, M_WAITOK, &uihash); 207 208 pool_init(&proc_pool, sizeof(struct proc), 0, 0, 0, "procpl", 209 &pool_allocator_nointr); 210 pool_init(&lwp_pool, sizeof(struct lwp), 0, 0, 0, "lwppl", 211 &pool_allocator_nointr); 212 pool_init(&lwp_uc_pool, sizeof(ucontext_t), 0, 0, 0, "lwpucpl", 213 &pool_allocator_nointr); 214 pool_init(&pgrp_pool, sizeof(struct pgrp), 0, 0, 0, "pgrppl", 215 &pool_allocator_nointr); 216 pool_init(&pcred_pool, sizeof(struct pcred), 0, 0, 0, "pcredpl", 217 &pool_allocator_nointr); 218 pool_init(&plimit_pool, sizeof(struct plimit), 0, 0, 0, "plimitpl", 219 &pool_allocator_nointr); 220 pool_init(&pstats_pool, sizeof(struct pstats), 0, 0, 0, "pstatspl", 221 &pool_allocator_nointr); 222 pool_init(&rusage_pool, sizeof(struct rusage), 0, 0, 0, "rusgepl", 223 &pool_allocator_nointr); 224 pool_init(&ras_pool, sizeof(struct ras), 0, 0, 0, "raspl", 225 &pool_allocator_nointr); 226 pool_init(&sadata_pool, sizeof(struct sadata), 0, 0, 0, "sadatapl", 227 &pool_allocator_nointr); 228 pool_init(&saupcall_pool, sizeof(struct sadata_upcall), 0, 0, 0, 229 "saupcpl", 230 &pool_allocator_nointr); 231 pool_init(&ptimer_pool, sizeof(struct ptimer), 0, 0, 0, "ptimerpl", 232 &pool_allocator_nointr); 233 } 234 235 /* 236 * Acquire a read lock on the proclist. 237 */ 238 void 239 proclist_lock_read() 240 { 241 int error; 242 243 error = spinlockmgr(&proclist_lock, LK_SHARED, NULL); 244 #ifdef DIAGNOSTIC 245 if (__predict_false(error != 0)) 246 panic("proclist_lock_read: failed to acquire lock"); 247 #endif 248 } 249 250 /* 251 * Release a read lock on the proclist. 252 */ 253 void 254 proclist_unlock_read() 255 { 256 257 (void) spinlockmgr(&proclist_lock, LK_RELEASE, NULL); 258 } 259 260 /* 261 * Acquire a write lock on the proclist. 262 */ 263 int 264 proclist_lock_write() 265 { 266 int s, error; 267 268 s = splclock(); 269 error = spinlockmgr(&proclist_lock, LK_EXCLUSIVE, NULL); 270 #ifdef DIAGNOSTIC 271 if (__predict_false(error != 0)) 272 panic("proclist_lock: failed to acquire lock"); 273 #endif 274 return (s); 275 } 276 277 /* 278 * Release a write lock on the proclist. 279 */ 280 void 281 proclist_unlock_write(s) 282 int s; 283 { 284 285 (void) spinlockmgr(&proclist_lock, LK_RELEASE, NULL); 286 splx(s); 287 } 288 289 /* 290 * Change the count associated with number of processes 291 * a given user is using. 292 */ 293 int 294 chgproccnt(uid, diff) 295 uid_t uid; 296 int diff; 297 { 298 struct uidinfo *uip; 299 struct uihashhead *uipp; 300 301 uipp = UIHASH(uid); 302 303 LIST_FOREACH(uip, uipp, ui_hash) 304 if (uip->ui_uid == uid) 305 break; 306 307 if (uip) { 308 uip->ui_proccnt += diff; 309 if (uip->ui_proccnt > 0) 310 return (uip->ui_proccnt); 311 if (uip->ui_proccnt < 0) 312 panic("chgproccnt: procs < 0"); 313 LIST_REMOVE(uip, ui_hash); 314 FREE(uip, M_PROC); 315 return (0); 316 } 317 if (diff <= 0) { 318 if (diff == 0) 319 return(0); 320 panic("chgproccnt: lost user"); 321 } 322 MALLOC(uip, struct uidinfo *, sizeof(*uip), M_PROC, M_WAITOK); 323 LIST_INSERT_HEAD(uipp, uip, ui_hash); 324 uip->ui_uid = uid; 325 uip->ui_proccnt = diff; 326 return (diff); 327 } 328 329 /* 330 * Is p an inferior of q? 331 */ 332 int 333 inferior(p, q) 334 struct proc *p; 335 struct proc *q; 336 { 337 338 for (; p != q; p = p->p_pptr) 339 if (p->p_pid == 0) 340 return (0); 341 return (1); 342 } 343 344 /* 345 * Locate a process by number 346 */ 347 struct proc * 348 pfind(pid) 349 pid_t pid; 350 { 351 struct proc *p; 352 353 proclist_lock_read(); 354 LIST_FOREACH(p, PIDHASH(pid), p_hash) 355 if (p->p_pid == pid) 356 goto out; 357 out: 358 proclist_unlock_read(); 359 return (p); 360 } 361 362 /* 363 * Locate a process group by number 364 */ 365 struct pgrp * 366 pgfind(pgid) 367 pid_t pgid; 368 { 369 struct pgrp *pgrp; 370 371 LIST_FOREACH(pgrp, PGRPHASH(pgid), pg_hash) 372 if (pgrp->pg_id == pgid) 373 return (pgrp); 374 return (NULL); 375 } 376 377 /* 378 * Move p to a new or existing process group (and session) 379 */ 380 int 381 enterpgrp(p, pgid, mksess) 382 struct proc *p; 383 pid_t pgid; 384 int mksess; 385 { 386 struct pgrp *pgrp = pgfind(pgid); 387 388 #ifdef DIAGNOSTIC 389 if (__predict_false(pgrp != NULL && mksess)) /* firewalls */ 390 panic("enterpgrp: setsid into non-empty pgrp"); 391 if (__predict_false(SESS_LEADER(p))) 392 panic("enterpgrp: session leader attempted setpgrp"); 393 #endif 394 if (pgrp == NULL) { 395 pid_t savepid = p->p_pid; 396 struct proc *np; 397 /* 398 * new process group 399 */ 400 #ifdef DIAGNOSTIC 401 if (__predict_false(p->p_pid != pgid)) 402 panic("enterpgrp: new pgrp and pid != pgid"); 403 #endif 404 pgrp = pool_get(&pgrp_pool, PR_WAITOK); 405 if ((np = pfind(savepid)) == NULL || np != p) { 406 pool_put(&pgrp_pool, pgrp); 407 return (ESRCH); 408 } 409 if (mksess) { 410 struct session *sess; 411 412 /* 413 * new session 414 */ 415 MALLOC(sess, struct session *, sizeof(struct session), 416 M_SESSION, M_WAITOK); 417 if ((np = pfind(savepid)) == NULL || np != p) { 418 FREE(sess, M_SESSION); 419 pool_put(&pgrp_pool, pgrp); 420 return (ESRCH); 421 } 422 sess->s_sid = p->p_pid; 423 sess->s_leader = p; 424 sess->s_count = 1; 425 sess->s_ttyvp = NULL; 426 sess->s_ttyp = NULL; 427 memcpy(sess->s_login, p->p_session->s_login, 428 sizeof(sess->s_login)); 429 p->p_flag &= ~P_CONTROLT; 430 pgrp->pg_session = sess; 431 #ifdef DIAGNOSTIC 432 if (__predict_false(p != curproc)) 433 panic("enterpgrp: mksession and p != curlwp"); 434 #endif 435 } else { 436 SESSHOLD(p->p_session); 437 pgrp->pg_session = p->p_session; 438 } 439 pgrp->pg_id = pgid; 440 LIST_INIT(&pgrp->pg_members); 441 LIST_INSERT_HEAD(PGRPHASH(pgid), pgrp, pg_hash); 442 pgrp->pg_jobc = 0; 443 } else if (pgrp == p->p_pgrp) 444 return (0); 445 446 /* 447 * Adjust eligibility of affected pgrps to participate in job control. 448 * Increment eligibility counts before decrementing, otherwise we 449 * could reach 0 spuriously during the first call. 450 */ 451 fixjobc(p, pgrp, 1); 452 fixjobc(p, p->p_pgrp, 0); 453 454 LIST_REMOVE(p, p_pglist); 455 if (LIST_EMPTY(&p->p_pgrp->pg_members)) 456 pgdelete(p->p_pgrp); 457 p->p_pgrp = pgrp; 458 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 459 return (0); 460 } 461 462 /* 463 * remove process from process group 464 */ 465 int 466 leavepgrp(p) 467 struct proc *p; 468 { 469 470 LIST_REMOVE(p, p_pglist); 471 if (LIST_EMPTY(&p->p_pgrp->pg_members)) 472 pgdelete(p->p_pgrp); 473 p->p_pgrp = 0; 474 return (0); 475 } 476 477 /* 478 * delete a process group 479 */ 480 void 481 pgdelete(pgrp) 482 struct pgrp *pgrp; 483 { 484 485 /* Remove reference (if any) from tty to this process group */ 486 if (pgrp->pg_session->s_ttyp != NULL && 487 pgrp->pg_session->s_ttyp->t_pgrp == pgrp) 488 pgrp->pg_session->s_ttyp->t_pgrp = NULL; 489 LIST_REMOVE(pgrp, pg_hash); 490 SESSRELE(pgrp->pg_session); 491 pool_put(&pgrp_pool, pgrp); 492 } 493 494 /* 495 * Adjust pgrp jobc counters when specified process changes process group. 496 * We count the number of processes in each process group that "qualify" 497 * the group for terminal job control (those with a parent in a different 498 * process group of the same session). If that count reaches zero, the 499 * process group becomes orphaned. Check both the specified process' 500 * process group and that of its children. 501 * entering == 0 => p is leaving specified group. 502 * entering == 1 => p is entering specified group. 503 */ 504 void 505 fixjobc(p, pgrp, entering) 506 struct proc *p; 507 struct pgrp *pgrp; 508 int entering; 509 { 510 struct pgrp *hispgrp; 511 struct session *mysession = pgrp->pg_session; 512 513 /* 514 * Check p's parent to see whether p qualifies its own process 515 * group; if so, adjust count for p's process group. 516 */ 517 if ((hispgrp = p->p_pptr->p_pgrp) != pgrp && 518 hispgrp->pg_session == mysession) { 519 if (entering) 520 pgrp->pg_jobc++; 521 else if (--pgrp->pg_jobc == 0) 522 orphanpg(pgrp); 523 } 524 525 /* 526 * Check this process' children to see whether they qualify 527 * their process groups; if so, adjust counts for children's 528 * process groups. 529 */ 530 LIST_FOREACH(p, &p->p_children, p_sibling) { 531 if ((hispgrp = p->p_pgrp) != pgrp && 532 hispgrp->pg_session == mysession && 533 P_ZOMBIE(p) == 0) { 534 if (entering) 535 hispgrp->pg_jobc++; 536 else if (--hispgrp->pg_jobc == 0) 537 orphanpg(hispgrp); 538 } 539 } 540 } 541 542 /* 543 * A process group has become orphaned; 544 * if there are any stopped processes in the group, 545 * hang-up all process in that group. 546 */ 547 static void 548 orphanpg(pg) 549 struct pgrp *pg; 550 { 551 struct proc *p; 552 553 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 554 if (p->p_stat == SSTOP) { 555 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 556 psignal(p, SIGHUP); 557 psignal(p, SIGCONT); 558 } 559 return; 560 } 561 } 562 } 563 564 /* mark process as suid/sgid, reset some values do defaults */ 565 void 566 p_sugid(p) 567 struct proc *p; 568 { 569 struct plimit *newlim; 570 571 p->p_flag |= P_SUGID; 572 /* reset what needs to be reset in plimit */ 573 if (p->p_limit->pl_corename != defcorename) { 574 if (p->p_limit->p_refcnt > 1 && 575 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) { 576 newlim = limcopy(p->p_limit); 577 limfree(p->p_limit); 578 p->p_limit = newlim; 579 } 580 free(p->p_limit->pl_corename, M_TEMP); 581 p->p_limit->pl_corename = defcorename; 582 } 583 } 584 585 #ifdef DEBUG 586 void 587 pgrpdump() 588 { 589 struct pgrp *pgrp; 590 struct proc *p; 591 int i; 592 593 for (i = 0; i <= pgrphash; i++) { 594 if ((pgrp = LIST_FIRST(&pgrphashtbl[i])) != NULL) { 595 printf("\tindx %d\n", i); 596 for (; pgrp != 0; pgrp = pgrp->pg_hash.le_next) { 597 printf("\tpgrp %p, pgid %d, sess %p, " 598 "sesscnt %d, mem %p\n", 599 pgrp, pgrp->pg_id, pgrp->pg_session, 600 pgrp->pg_session->s_count, 601 LIST_FIRST(&pgrp->pg_members)); 602 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 603 printf("\t\tpid %d addr %p pgrp %p\n", 604 p->p_pid, p, p->p_pgrp); 605 } 606 } 607 } 608 } 609 } 610 #endif /* DEBUG */ 611 612 #ifdef KSTACK_CHECK_MAGIC 613 #include <sys/user.h> 614 615 #define KSTACK_MAGIC 0xdeadbeaf 616 617 /* XXX should be per process basis? */ 618 int kstackleftmin = KSTACK_SIZE; 619 int kstackleftthres = KSTACK_SIZE / 8; /* warn if remaining stack is 620 less than this */ 621 622 void 623 kstack_setup_magic(const struct lwp *l) 624 { 625 u_int32_t *ip; 626 u_int32_t const *end; 627 628 KASSERT(l != NULL); 629 KASSERT(l != &lwp0); 630 631 /* 632 * fill all the stack with magic number 633 * so that later modification on it can be detected. 634 */ 635 ip = (u_int32_t *)KSTACK_LOWEST_ADDR(l); 636 end = (u_int32_t *)((caddr_t)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE); 637 for (; ip < end; ip++) { 638 *ip = KSTACK_MAGIC; 639 } 640 } 641 642 void 643 kstack_check_magic(const struct lwp *l) 644 { 645 u_int32_t const *ip, *end; 646 int stackleft; 647 648 KASSERT(l != NULL); 649 650 /* don't check proc0 */ /*XXX*/ 651 if (l == &lwp0) 652 return; 653 654 #ifdef __MACHINE_STACK_GROWS_UP 655 /* stack grows upwards (eg. hppa) */ 656 ip = (u_int32_t *)((caddr_t)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE); 657 end = (u_int32_t *)KSTACK_LOWEST_ADDR(l); 658 for (ip--; ip >= end; ip--) 659 if (*ip != KSTACK_MAGIC) 660 break; 661 662 stackleft = (caddr_t)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE - (caddr_t)ip; 663 #else /* __MACHINE_STACK_GROWS_UP */ 664 /* stack grows downwards (eg. i386) */ 665 ip = (u_int32_t *)KSTACK_LOWEST_ADDR(l); 666 end = (u_int32_t *)((caddr_t)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE); 667 for (; ip < end; ip++) 668 if (*ip != KSTACK_MAGIC) 669 break; 670 671 stackleft = (caddr_t)ip - KSTACK_LOWEST_ADDR(l); 672 #endif /* __MACHINE_STACK_GROWS_UP */ 673 674 if (kstackleftmin > stackleft) { 675 kstackleftmin = stackleft; 676 if (stackleft < kstackleftthres) 677 printf("warning: kernel stack left %d bytes" 678 "(pid %u:lid %u)\n", stackleft, 679 (u_int)l->l_proc->p_pid, (u_int)l->l_lid); 680 } 681 682 if (stackleft <= 0) { 683 panic("magic on the top of kernel stack changed for " 684 "pid %u, lid %u: maybe kernel stack overflow", 685 (u_int)l->l_proc->p_pid, (u_int)l->l_lid); 686 } 687 } 688 #endif /* KSTACK_CHECK_MAGIC */ 689