1 /* $NetBSD: kern_proc.c,v 1.152 2009/05/23 18:28:06 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, and by Andrew Doran. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1982, 1986, 1989, 1991, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. Neither the name of the University nor the names of its contributors 46 * may be used to endorse or promote products derived from this software 47 * without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 * SUCH DAMAGE. 60 * 61 * @(#)kern_proc.c 8.7 (Berkeley) 2/14/95 62 */ 63 64 #include <sys/cdefs.h> 65 __KERNEL_RCSID(0, "$NetBSD: kern_proc.c,v 1.152 2009/05/23 18:28:06 ad Exp $"); 66 67 #include "opt_kstack.h" 68 #include "opt_maxuprc.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/proc.h> 74 #include <sys/resourcevar.h> 75 #include <sys/buf.h> 76 #include <sys/acct.h> 77 #include <sys/wait.h> 78 #include <sys/file.h> 79 #include <ufs/ufs/quota.h> 80 #include <sys/uio.h> 81 #include <sys/pool.h> 82 #include <sys/pset.h> 83 #include <sys/mbuf.h> 84 #include <sys/ioctl.h> 85 #include <sys/tty.h> 86 #include <sys/signalvar.h> 87 #include <sys/ras.h> 88 #include <sys/sa.h> 89 #include <sys/savar.h> 90 #include <sys/filedesc.h> 91 #include "sys/syscall_stats.h" 92 #include <sys/kauth.h> 93 #include <sys/sleepq.h> 94 #include <sys/atomic.h> 95 #include <sys/kmem.h> 96 97 #include <uvm/uvm.h> 98 #include <uvm/uvm_extern.h> 99 100 /* 101 * Other process lists 102 */ 103 104 struct proclist allproc; 105 struct proclist zombproc; /* resources have been freed */ 106 107 kmutex_t *proc_lock; 108 109 /* 110 * pid to proc lookup is done by indexing the pid_table array. 111 * Since pid numbers are only allocated when an empty slot 112 * has been found, there is no need to search any lists ever. 113 * (an orphaned pgrp will lock the slot, a session will lock 114 * the pgrp with the same number.) 115 * If the table is too small it is reallocated with twice the 116 * previous size and the entries 'unzipped' into the two halves. 117 * A linked list of free entries is passed through the pt_proc 118 * field of 'free' items - set odd to be an invalid ptr. 119 */ 120 121 struct pid_table { 122 struct proc *pt_proc; 123 struct pgrp *pt_pgrp; 124 }; 125 #if 1 /* strongly typed cast - should be a noop */ 126 static inline uint p2u(struct proc *p) { return (uint)(uintptr_t)p; } 127 #else 128 #define p2u(p) ((uint)p) 129 #endif 130 #define P_VALID(p) (!(p2u(p) & 1)) 131 #define P_NEXT(p) (p2u(p) >> 1) 132 #define P_FREE(pid) ((struct proc *)(uintptr_t)((pid) << 1 | 1)) 133 134 #define INITIAL_PID_TABLE_SIZE (1 << 5) 135 static struct pid_table *pid_table; 136 static uint pid_tbl_mask = INITIAL_PID_TABLE_SIZE - 1; 137 static uint pid_alloc_lim; /* max we allocate before growing table */ 138 static uint pid_alloc_cnt; /* number of allocated pids */ 139 140 /* links through free slots - never empty! */ 141 static uint next_free_pt, last_free_pt; 142 static pid_t pid_max = PID_MAX; /* largest value we allocate */ 143 144 /* Components of the first process -- never freed. */ 145 146 extern struct emul emul_netbsd; /* defined in kern_exec.c */ 147 148 struct session session0 = { 149 .s_count = 1, 150 .s_sid = 0, 151 }; 152 struct pgrp pgrp0 = { 153 .pg_members = LIST_HEAD_INITIALIZER(&pgrp0.pg_members), 154 .pg_session = &session0, 155 }; 156 filedesc_t filedesc0; 157 struct cwdinfo cwdi0 = { 158 .cwdi_cmask = CMASK, /* see cmask below */ 159 .cwdi_refcnt = 1, 160 }; 161 struct plimit limit0; 162 struct pstats pstat0; 163 struct vmspace vmspace0; 164 struct sigacts sigacts0; 165 struct turnstile turnstile0; 166 struct proc proc0 = { 167 .p_lwps = LIST_HEAD_INITIALIZER(&proc0.p_lwps), 168 .p_sigwaiters = LIST_HEAD_INITIALIZER(&proc0.p_sigwaiters), 169 .p_nlwps = 1, 170 .p_nrlwps = 1, 171 .p_nlwpid = 1, /* must match lwp0.l_lid */ 172 .p_pgrp = &pgrp0, 173 .p_comm = "system", 174 /* 175 * Set P_NOCLDWAIT so that kernel threads are reparented to init(8) 176 * when they exit. init(8) can easily wait them out for us. 177 */ 178 .p_flag = PK_SYSTEM | PK_NOCLDWAIT, 179 .p_stat = SACTIVE, 180 .p_nice = NZERO, 181 .p_emul = &emul_netbsd, 182 .p_cwdi = &cwdi0, 183 .p_limit = &limit0, 184 .p_fd = &filedesc0, 185 .p_vmspace = &vmspace0, 186 .p_stats = &pstat0, 187 .p_sigacts = &sigacts0, 188 }; 189 struct lwp lwp0 __aligned(MIN_LWP_ALIGNMENT) = { 190 #ifdef LWP0_CPU_INFO 191 .l_cpu = LWP0_CPU_INFO, 192 #endif 193 .l_proc = &proc0, 194 .l_lid = 1, 195 .l_flag = LW_INMEM | LW_SYSTEM, 196 .l_stat = LSONPROC, 197 .l_ts = &turnstile0, 198 .l_syncobj = &sched_syncobj, 199 .l_refcnt = 1, 200 .l_priority = PRI_USER + NPRI_USER - 1, 201 .l_inheritedprio = -1, 202 .l_class = SCHED_OTHER, 203 .l_psid = PS_NONE, 204 .l_pi_lenders = SLIST_HEAD_INITIALIZER(&lwp0.l_pi_lenders), 205 .l_name = __UNCONST("swapper"), 206 .l_fd = &filedesc0, 207 }; 208 kauth_cred_t cred0; 209 210 extern struct user *proc0paddr; 211 212 int nofile = NOFILE; 213 int maxuprc = MAXUPRC; 214 int cmask = CMASK; 215 216 MALLOC_DEFINE(M_EMULDATA, "emuldata", "Per-process emulation data"); 217 MALLOC_DEFINE(M_SUBPROC, "subproc", "Proc sub-structures"); 218 219 /* 220 * The process list descriptors, used during pid allocation and 221 * by sysctl. No locking on this data structure is needed since 222 * it is completely static. 223 */ 224 const struct proclist_desc proclists[] = { 225 { &allproc }, 226 { &zombproc }, 227 { NULL }, 228 }; 229 230 static struct pgrp * pg_remove(pid_t); 231 static void pg_delete(pid_t); 232 static void orphanpg(struct pgrp *); 233 234 static specificdata_domain_t proc_specificdata_domain; 235 236 static pool_cache_t proc_cache; 237 238 /* 239 * Initialize global process hashing structures. 240 */ 241 void 242 procinit(void) 243 { 244 const struct proclist_desc *pd; 245 u_int i; 246 #define LINK_EMPTY ((PID_MAX + INITIAL_PID_TABLE_SIZE) & ~(INITIAL_PID_TABLE_SIZE - 1)) 247 248 for (pd = proclists; pd->pd_list != NULL; pd++) 249 LIST_INIT(pd->pd_list); 250 251 proc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 252 pid_table = kmem_alloc(INITIAL_PID_TABLE_SIZE 253 * sizeof(struct pid_table), KM_SLEEP); 254 255 /* Set free list running through table... 256 Preset 'use count' above PID_MAX so we allocate pid 1 next. */ 257 for (i = 0; i <= pid_tbl_mask; i++) { 258 pid_table[i].pt_proc = P_FREE(LINK_EMPTY + i + 1); 259 pid_table[i].pt_pgrp = 0; 260 } 261 /* slot 0 is just grabbed */ 262 next_free_pt = 1; 263 /* Need to fix last entry. */ 264 last_free_pt = pid_tbl_mask; 265 pid_table[last_free_pt].pt_proc = P_FREE(LINK_EMPTY); 266 /* point at which we grow table - to avoid reusing pids too often */ 267 pid_alloc_lim = pid_tbl_mask - 1; 268 #undef LINK_EMPTY 269 270 proc_specificdata_domain = specificdata_domain_create(); 271 KASSERT(proc_specificdata_domain != NULL); 272 273 proc_cache = pool_cache_init(sizeof(struct proc), 0, 0, 0, 274 "procpl", NULL, IPL_NONE, NULL, NULL, NULL); 275 } 276 277 /* 278 * Initialize process 0. 279 */ 280 void 281 proc0_init(void) 282 { 283 struct proc *p; 284 struct pgrp *pg; 285 struct session *sess; 286 struct lwp *l; 287 rlim_t lim; 288 int i; 289 290 p = &proc0; 291 pg = &pgrp0; 292 sess = &session0; 293 l = &lwp0; 294 295 KASSERT(l->l_lid == p->p_nlwpid); 296 297 mutex_init(&p->p_stmutex, MUTEX_DEFAULT, IPL_HIGH); 298 mutex_init(&p->p_auxlock, MUTEX_DEFAULT, IPL_NONE); 299 mutex_init(&l->l_swaplock, MUTEX_DEFAULT, IPL_NONE); 300 p->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE); 301 302 rw_init(&p->p_reflock); 303 cv_init(&p->p_waitcv, "wait"); 304 cv_init(&p->p_lwpcv, "lwpwait"); 305 306 LIST_INSERT_HEAD(&p->p_lwps, l, l_sibling); 307 308 pid_table[0].pt_proc = p; 309 LIST_INSERT_HEAD(&allproc, p, p_list); 310 LIST_INSERT_HEAD(&alllwp, l, l_list); 311 312 pid_table[0].pt_pgrp = pg; 313 LIST_INSERT_HEAD(&pg->pg_members, p, p_pglist); 314 315 #ifdef __HAVE_SYSCALL_INTERN 316 (*p->p_emul->e_syscall_intern)(p); 317 #endif 318 319 callout_init(&l->l_timeout_ch, CALLOUT_MPSAFE); 320 callout_setfunc(&l->l_timeout_ch, sleepq_timeout, l); 321 cv_init(&l->l_sigcv, "sigwait"); 322 323 /* Create credentials. */ 324 cred0 = kauth_cred_alloc(); 325 p->p_cred = cred0; 326 kauth_cred_hold(cred0); 327 l->l_cred = cred0; 328 329 /* Create the CWD info. */ 330 rw_init(&cwdi0.cwdi_lock); 331 332 /* Create the limits structures. */ 333 mutex_init(&limit0.pl_lock, MUTEX_DEFAULT, IPL_NONE); 334 for (i = 0; i < __arraycount(limit0.pl_rlimit); i++) 335 limit0.pl_rlimit[i].rlim_cur = 336 limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; 337 338 limit0.pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles; 339 limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = 340 maxfiles < nofile ? maxfiles : nofile; 341 342 limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; 343 limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = 344 maxproc < maxuprc ? maxproc : maxuprc; 345 346 lim = ptoa(uvmexp.free); 347 limit0.pl_rlimit[RLIMIT_RSS].rlim_max = lim; 348 limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_max = lim; 349 limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = lim / 3; 350 limit0.pl_corename = defcorename; 351 limit0.pl_refcnt = 1; 352 limit0.pl_sv_limit = NULL; 353 354 /* Configure virtual memory system, set vm rlimits. */ 355 uvm_init_limits(p); 356 357 /* Initialize file descriptor table for proc0. */ 358 fd_init(&filedesc0); 359 360 /* 361 * Initialize proc0's vmspace, which uses the kernel pmap. 362 * All kernel processes (which never have user space mappings) 363 * share proc0's vmspace, and thus, the kernel pmap. 364 */ 365 uvmspace_init(&vmspace0, pmap_kernel(), round_page(VM_MIN_ADDRESS), 366 trunc_page(VM_MAX_ADDRESS)); 367 368 l->l_addr = proc0paddr; /* XXX */ 369 370 /* Initialize signal state for proc0. XXX IPL_SCHED */ 371 mutex_init(&p->p_sigacts->sa_mutex, MUTEX_DEFAULT, IPL_SCHED); 372 siginit(p); 373 374 proc_initspecific(p); 375 lwp_initspecific(l); 376 377 SYSCALL_TIME_LWP_INIT(l); 378 } 379 380 /* 381 * Session reference counting. 382 */ 383 384 void 385 proc_sesshold(struct session *ss) 386 { 387 388 KASSERT(mutex_owned(proc_lock)); 389 ss->s_count++; 390 } 391 392 void 393 proc_sessrele(struct session *ss) 394 { 395 396 KASSERT(mutex_owned(proc_lock)); 397 /* 398 * We keep the pgrp with the same id as the session in order to 399 * stop a process being given the same pid. Since the pgrp holds 400 * a reference to the session, it must be a 'zombie' pgrp by now. 401 */ 402 if (--ss->s_count == 0) { 403 struct pgrp *pg; 404 405 pg = pg_remove(ss->s_sid); 406 mutex_exit(proc_lock); 407 408 kmem_free(pg, sizeof(struct pgrp)); 409 kmem_free(ss, sizeof(struct session)); 410 } else { 411 mutex_exit(proc_lock); 412 } 413 } 414 415 /* 416 * Check that the specified process group is in the session of the 417 * specified process. 418 * Treats -ve ids as process ids. 419 * Used to validate TIOCSPGRP requests. 420 */ 421 int 422 pgid_in_session(struct proc *p, pid_t pg_id) 423 { 424 struct pgrp *pgrp; 425 struct session *session; 426 int error; 427 428 mutex_enter(proc_lock); 429 if (pg_id < 0) { 430 struct proc *p1 = p_find(-pg_id, PFIND_LOCKED | PFIND_UNLOCK_FAIL); 431 if (p1 == NULL) 432 return EINVAL; 433 pgrp = p1->p_pgrp; 434 } else { 435 pgrp = pg_find(pg_id, PFIND_LOCKED | PFIND_UNLOCK_FAIL); 436 if (pgrp == NULL) 437 return EINVAL; 438 } 439 session = pgrp->pg_session; 440 if (session != p->p_pgrp->pg_session) 441 error = EPERM; 442 else 443 error = 0; 444 mutex_exit(proc_lock); 445 446 return error; 447 } 448 449 /* 450 * p_inferior: is p an inferior of q? 451 */ 452 static inline bool 453 p_inferior(struct proc *p, struct proc *q) 454 { 455 456 KASSERT(mutex_owned(proc_lock)); 457 458 for (; p != q; p = p->p_pptr) 459 if (p->p_pid == 0) 460 return false; 461 return true; 462 } 463 464 /* 465 * Locate a process by number 466 */ 467 struct proc * 468 p_find(pid_t pid, uint flags) 469 { 470 struct proc *p; 471 char stat; 472 473 if (!(flags & PFIND_LOCKED)) 474 mutex_enter(proc_lock); 475 476 p = pid_table[pid & pid_tbl_mask].pt_proc; 477 478 /* Only allow live processes to be found by pid. */ 479 /* XXXSMP p_stat */ 480 if (P_VALID(p) && p->p_pid == pid && ((stat = p->p_stat) == SACTIVE || 481 stat == SSTOP || ((flags & PFIND_ZOMBIE) && 482 (stat == SZOMB || stat == SDEAD || stat == SDYING)))) { 483 if (flags & PFIND_UNLOCK_OK) 484 mutex_exit(proc_lock); 485 return p; 486 } 487 if (flags & PFIND_UNLOCK_FAIL) 488 mutex_exit(proc_lock); 489 return NULL; 490 } 491 492 493 /* 494 * Locate a process group by number 495 */ 496 struct pgrp * 497 pg_find(pid_t pgid, uint flags) 498 { 499 struct pgrp *pg; 500 501 if (!(flags & PFIND_LOCKED)) 502 mutex_enter(proc_lock); 503 pg = pid_table[pgid & pid_tbl_mask].pt_pgrp; 504 /* 505 * Can't look up a pgrp that only exists because the session 506 * hasn't died yet (traditional) 507 */ 508 if (pg == NULL || pg->pg_id != pgid || LIST_EMPTY(&pg->pg_members)) { 509 if (flags & PFIND_UNLOCK_FAIL) 510 mutex_exit(proc_lock); 511 return NULL; 512 } 513 514 if (flags & PFIND_UNLOCK_OK) 515 mutex_exit(proc_lock); 516 return pg; 517 } 518 519 static void 520 expand_pid_table(void) 521 { 522 size_t pt_size, tsz; 523 struct pid_table *n_pt, *new_pt; 524 struct proc *proc; 525 struct pgrp *pgrp; 526 pid_t pid; 527 u_int i; 528 529 pt_size = pid_tbl_mask + 1; 530 tsz = pt_size * 2 * sizeof(struct pid_table); 531 new_pt = kmem_alloc(tsz, KM_SLEEP); 532 533 mutex_enter(proc_lock); 534 if (pt_size != pid_tbl_mask + 1) { 535 /* Another process beat us to it... */ 536 mutex_exit(proc_lock); 537 kmem_free(new_pt, tsz); 538 return; 539 } 540 541 /* 542 * Copy entries from old table into new one. 543 * If 'pid' is 'odd' we need to place in the upper half, 544 * even pid's to the lower half. 545 * Free items stay in the low half so we don't have to 546 * fixup the reference to them. 547 * We stuff free items on the front of the freelist 548 * because we can't write to unmodified entries. 549 * Processing the table backwards maintains a semblance 550 * of issueing pid numbers that increase with time. 551 */ 552 i = pt_size - 1; 553 n_pt = new_pt + i; 554 for (; ; i--, n_pt--) { 555 proc = pid_table[i].pt_proc; 556 pgrp = pid_table[i].pt_pgrp; 557 if (!P_VALID(proc)) { 558 /* Up 'use count' so that link is valid */ 559 pid = (P_NEXT(proc) + pt_size) & ~pt_size; 560 proc = P_FREE(pid); 561 if (pgrp) 562 pid = pgrp->pg_id; 563 } else 564 pid = proc->p_pid; 565 566 /* Save entry in appropriate half of table */ 567 n_pt[pid & pt_size].pt_proc = proc; 568 n_pt[pid & pt_size].pt_pgrp = pgrp; 569 570 /* Put other piece on start of free list */ 571 pid = (pid ^ pt_size) & ~pid_tbl_mask; 572 n_pt[pid & pt_size].pt_proc = 573 P_FREE((pid & ~pt_size) | next_free_pt); 574 n_pt[pid & pt_size].pt_pgrp = 0; 575 next_free_pt = i | (pid & pt_size); 576 if (i == 0) 577 break; 578 } 579 580 /* Save old table size and switch tables */ 581 tsz = pt_size * sizeof(struct pid_table); 582 n_pt = pid_table; 583 pid_table = new_pt; 584 pid_tbl_mask = pt_size * 2 - 1; 585 586 /* 587 * pid_max starts as PID_MAX (= 30000), once we have 16384 588 * allocated pids we need it to be larger! 589 */ 590 if (pid_tbl_mask > PID_MAX) { 591 pid_max = pid_tbl_mask * 2 + 1; 592 pid_alloc_lim |= pid_alloc_lim << 1; 593 } else 594 pid_alloc_lim <<= 1; /* doubles number of free slots... */ 595 596 mutex_exit(proc_lock); 597 kmem_free(n_pt, tsz); 598 } 599 600 struct proc * 601 proc_alloc(void) 602 { 603 struct proc *p; 604 int nxt; 605 pid_t pid; 606 struct pid_table *pt; 607 608 p = pool_cache_get(proc_cache, PR_WAITOK); 609 p->p_stat = SIDL; /* protect against others */ 610 611 proc_initspecific(p); 612 /* allocate next free pid */ 613 614 for (;;expand_pid_table()) { 615 if (__predict_false(pid_alloc_cnt >= pid_alloc_lim)) 616 /* ensure pids cycle through 2000+ values */ 617 continue; 618 mutex_enter(proc_lock); 619 pt = &pid_table[next_free_pt]; 620 #ifdef DIAGNOSTIC 621 if (__predict_false(P_VALID(pt->pt_proc) || pt->pt_pgrp)) 622 panic("proc_alloc: slot busy"); 623 #endif 624 nxt = P_NEXT(pt->pt_proc); 625 if (nxt & pid_tbl_mask) 626 break; 627 /* Table full - expand (NB last entry not used....) */ 628 mutex_exit(proc_lock); 629 } 630 631 /* pid is 'saved use count' + 'size' + entry */ 632 pid = (nxt & ~pid_tbl_mask) + pid_tbl_mask + 1 + next_free_pt; 633 if ((uint)pid > (uint)pid_max) 634 pid &= pid_tbl_mask; 635 p->p_pid = pid; 636 next_free_pt = nxt & pid_tbl_mask; 637 638 /* Grab table slot */ 639 pt->pt_proc = p; 640 pid_alloc_cnt++; 641 642 mutex_exit(proc_lock); 643 644 return p; 645 } 646 647 /* 648 * Free a process id - called from proc_free (in kern_exit.c) 649 * 650 * Called with the proc_lock held. 651 */ 652 void 653 proc_free_pid(struct proc *p) 654 { 655 pid_t pid = p->p_pid; 656 struct pid_table *pt; 657 658 KASSERT(mutex_owned(proc_lock)); 659 660 pt = &pid_table[pid & pid_tbl_mask]; 661 #ifdef DIAGNOSTIC 662 if (__predict_false(pt->pt_proc != p)) 663 panic("proc_free: pid_table mismatch, pid %x, proc %p", 664 pid, p); 665 #endif 666 /* save pid use count in slot */ 667 pt->pt_proc = P_FREE(pid & ~pid_tbl_mask); 668 669 if (pt->pt_pgrp == NULL) { 670 /* link last freed entry onto ours */ 671 pid &= pid_tbl_mask; 672 pt = &pid_table[last_free_pt]; 673 pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pid); 674 last_free_pt = pid; 675 pid_alloc_cnt--; 676 } 677 678 atomic_dec_uint(&nprocs); 679 } 680 681 void 682 proc_free_mem(struct proc *p) 683 { 684 685 pool_cache_put(proc_cache, p); 686 } 687 688 /* 689 * proc_enterpgrp: move p to a new or existing process group (and session). 690 * 691 * If we are creating a new pgrp, the pgid should equal 692 * the calling process' pid. 693 * If is only valid to enter a process group that is in the session 694 * of the process. 695 * Also mksess should only be set if we are creating a process group 696 * 697 * Only called from sys_setsid and sys_setpgid. 698 */ 699 int 700 proc_enterpgrp(struct proc *curp, pid_t pid, pid_t pgid, bool mksess) 701 { 702 struct pgrp *new_pgrp, *pgrp; 703 struct session *sess; 704 struct proc *p; 705 int rval; 706 pid_t pg_id = NO_PGID; 707 708 sess = mksess ? kmem_alloc(sizeof(*sess), KM_SLEEP) : NULL; 709 710 /* Allocate data areas we might need before doing any validity checks */ 711 mutex_enter(proc_lock); /* Because pid_table might change */ 712 if (pid_table[pgid & pid_tbl_mask].pt_pgrp == 0) { 713 mutex_exit(proc_lock); 714 new_pgrp = kmem_alloc(sizeof(*new_pgrp), KM_SLEEP); 715 mutex_enter(proc_lock); 716 } else 717 new_pgrp = NULL; 718 rval = EPERM; /* most common error (to save typing) */ 719 720 /* Check pgrp exists or can be created */ 721 pgrp = pid_table[pgid & pid_tbl_mask].pt_pgrp; 722 if (pgrp != NULL && pgrp->pg_id != pgid) 723 goto done; 724 725 /* Can only set another process under restricted circumstances. */ 726 if (pid != curp->p_pid) { 727 /* must exist and be one of our children... */ 728 if ((p = p_find(pid, PFIND_LOCKED)) == NULL || 729 !p_inferior(p, curp)) { 730 rval = ESRCH; 731 goto done; 732 } 733 /* ... in the same session... */ 734 if (sess != NULL || p->p_session != curp->p_session) 735 goto done; 736 /* ... existing pgid must be in same session ... */ 737 if (pgrp != NULL && pgrp->pg_session != p->p_session) 738 goto done; 739 /* ... and not done an exec. */ 740 if (p->p_flag & PK_EXEC) { 741 rval = EACCES; 742 goto done; 743 } 744 } else { 745 /* ... setsid() cannot re-enter a pgrp */ 746 if (mksess && (curp->p_pgid == curp->p_pid || 747 pg_find(curp->p_pid, PFIND_LOCKED))) 748 goto done; 749 p = curp; 750 } 751 752 /* Changing the process group/session of a session 753 leader is definitely off limits. */ 754 if (SESS_LEADER(p)) { 755 if (sess == NULL && p->p_pgrp == pgrp) 756 /* unless it's a definite noop */ 757 rval = 0; 758 goto done; 759 } 760 761 /* Can only create a process group with id of process */ 762 if (pgrp == NULL && pgid != pid) 763 goto done; 764 765 /* Can only create a session if creating pgrp */ 766 if (sess != NULL && pgrp != NULL) 767 goto done; 768 769 /* Check we allocated memory for a pgrp... */ 770 if (pgrp == NULL && new_pgrp == NULL) 771 goto done; 772 773 /* Don't attach to 'zombie' pgrp */ 774 if (pgrp != NULL && LIST_EMPTY(&pgrp->pg_members)) 775 goto done; 776 777 /* Expect to succeed now */ 778 rval = 0; 779 780 if (pgrp == p->p_pgrp) 781 /* nothing to do */ 782 goto done; 783 784 /* Ok all setup, link up required structures */ 785 786 if (pgrp == NULL) { 787 pgrp = new_pgrp; 788 new_pgrp = NULL; 789 if (sess != NULL) { 790 sess->s_sid = p->p_pid; 791 sess->s_leader = p; 792 sess->s_count = 1; 793 sess->s_ttyvp = NULL; 794 sess->s_ttyp = NULL; 795 sess->s_flags = p->p_session->s_flags & ~S_LOGIN_SET; 796 memcpy(sess->s_login, p->p_session->s_login, 797 sizeof(sess->s_login)); 798 p->p_lflag &= ~PL_CONTROLT; 799 } else { 800 sess = p->p_pgrp->pg_session; 801 proc_sesshold(sess); 802 } 803 pgrp->pg_session = sess; 804 sess = NULL; 805 806 pgrp->pg_id = pgid; 807 LIST_INIT(&pgrp->pg_members); 808 #ifdef DIAGNOSTIC 809 if (__predict_false(pid_table[pgid & pid_tbl_mask].pt_pgrp)) 810 panic("enterpgrp: pgrp table slot in use"); 811 if (__predict_false(mksess && p != curp)) 812 panic("enterpgrp: mksession and p != curproc"); 813 #endif 814 pid_table[pgid & pid_tbl_mask].pt_pgrp = pgrp; 815 pgrp->pg_jobc = 0; 816 } 817 818 /* 819 * Adjust eligibility of affected pgrps to participate in job control. 820 * Increment eligibility counts before decrementing, otherwise we 821 * could reach 0 spuriously during the first call. 822 */ 823 fixjobc(p, pgrp, 1); 824 fixjobc(p, p->p_pgrp, 0); 825 826 /* Interlock with ttread(). */ 827 mutex_spin_enter(&tty_lock); 828 829 /* Move process to requested group. */ 830 LIST_REMOVE(p, p_pglist); 831 if (LIST_EMPTY(&p->p_pgrp->pg_members)) 832 /* defer delete until we've dumped the lock */ 833 pg_id = p->p_pgrp->pg_id; 834 p->p_pgrp = pgrp; 835 LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist); 836 837 /* Done with the swap; we can release the tty mutex. */ 838 mutex_spin_exit(&tty_lock); 839 840 done: 841 if (pg_id != NO_PGID) { 842 /* Releases proc_lock. */ 843 pg_delete(pg_id); 844 } else { 845 mutex_exit(proc_lock); 846 } 847 if (sess != NULL) 848 kmem_free(sess, sizeof(*sess)); 849 if (new_pgrp != NULL) 850 kmem_free(new_pgrp, sizeof(*new_pgrp)); 851 #ifdef DEBUG_PGRP 852 if (__predict_false(rval)) 853 printf("enterpgrp(%d,%d,%d), curproc %d, rval %d\n", 854 pid, pgid, mksess, curp->p_pid, rval); 855 #endif 856 return rval; 857 } 858 859 /* 860 * proc_leavepgrp: remove a process from its process group. 861 * => must be called with the proc_lock held, which will be released; 862 */ 863 void 864 proc_leavepgrp(struct proc *p) 865 { 866 struct pgrp *pgrp; 867 868 KASSERT(mutex_owned(proc_lock)); 869 870 /* Interlock with ttread() */ 871 mutex_spin_enter(&tty_lock); 872 pgrp = p->p_pgrp; 873 LIST_REMOVE(p, p_pglist); 874 p->p_pgrp = NULL; 875 mutex_spin_exit(&tty_lock); 876 877 if (LIST_EMPTY(&pgrp->pg_members)) { 878 /* Releases proc_lock. */ 879 pg_delete(pgrp->pg_id); 880 } else { 881 mutex_exit(proc_lock); 882 } 883 } 884 885 /* 886 * pg_remove: remove a process group from the table. 887 * => must be called with the proc_lock held; 888 * => returns process group to free; 889 */ 890 static struct pgrp * 891 pg_remove(pid_t pg_id) 892 { 893 struct pgrp *pgrp; 894 struct pid_table *pt; 895 896 KASSERT(mutex_owned(proc_lock)); 897 898 pt = &pid_table[pg_id & pid_tbl_mask]; 899 pgrp = pt->pt_pgrp; 900 901 KASSERT(pgrp != NULL); 902 KASSERT(pgrp->pg_id == pg_id); 903 KASSERT(LIST_EMPTY(&pgrp->pg_members)); 904 905 pt->pt_pgrp = NULL; 906 907 if (!P_VALID(pt->pt_proc)) { 908 /* Orphaned pgrp, put slot onto free list. */ 909 KASSERT((P_NEXT(pt->pt_proc) & pid_tbl_mask) == 0); 910 pg_id &= pid_tbl_mask; 911 pt = &pid_table[last_free_pt]; 912 pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pg_id); 913 last_free_pt = pg_id; 914 pid_alloc_cnt--; 915 } 916 return pgrp; 917 } 918 919 /* 920 * pg_delete: delete and free a process group. 921 * => must be called with the proc_lock held, which will be released. 922 */ 923 static void 924 pg_delete(pid_t pg_id) 925 { 926 struct pgrp *pg; 927 struct tty *ttyp; 928 struct session *ss; 929 930 KASSERT(mutex_owned(proc_lock)); 931 932 pg = pid_table[pg_id & pid_tbl_mask].pt_pgrp; 933 if (pg == NULL || pg->pg_id != pg_id || !LIST_EMPTY(&pg->pg_members)) { 934 mutex_exit(proc_lock); 935 return; 936 } 937 938 ss = pg->pg_session; 939 940 /* Remove reference (if any) from tty to this process group */ 941 mutex_spin_enter(&tty_lock); 942 ttyp = ss->s_ttyp; 943 if (ttyp != NULL && ttyp->t_pgrp == pg) { 944 ttyp->t_pgrp = NULL; 945 KASSERT(ttyp->t_session == ss); 946 } 947 mutex_spin_exit(&tty_lock); 948 949 /* 950 * The leading process group in a session is freed by proc_sessrele(), 951 * if last reference. Note: proc_sessrele() releases proc_lock. 952 */ 953 pg = (ss->s_sid != pg->pg_id) ? pg_remove(pg_id) : NULL; 954 proc_sessrele(ss); 955 956 if (pg != NULL) { 957 /* Free it, if was not done by proc_sessrele(). */ 958 kmem_free(pg, sizeof(struct pgrp)); 959 } 960 } 961 962 /* 963 * Adjust pgrp jobc counters when specified process changes process group. 964 * We count the number of processes in each process group that "qualify" 965 * the group for terminal job control (those with a parent in a different 966 * process group of the same session). If that count reaches zero, the 967 * process group becomes orphaned. Check both the specified process' 968 * process group and that of its children. 969 * entering == 0 => p is leaving specified group. 970 * entering == 1 => p is entering specified group. 971 * 972 * Call with proc_lock held. 973 */ 974 void 975 fixjobc(struct proc *p, struct pgrp *pgrp, int entering) 976 { 977 struct pgrp *hispgrp; 978 struct session *mysession = pgrp->pg_session; 979 struct proc *child; 980 981 KASSERT(mutex_owned(proc_lock)); 982 983 /* 984 * Check p's parent to see whether p qualifies its own process 985 * group; if so, adjust count for p's process group. 986 */ 987 hispgrp = p->p_pptr->p_pgrp; 988 if (hispgrp != pgrp && hispgrp->pg_session == mysession) { 989 if (entering) { 990 pgrp->pg_jobc++; 991 p->p_lflag &= ~PL_ORPHANPG; 992 } else if (--pgrp->pg_jobc == 0) 993 orphanpg(pgrp); 994 } 995 996 /* 997 * Check this process' children to see whether they qualify 998 * their process groups; if so, adjust counts for children's 999 * process groups. 1000 */ 1001 LIST_FOREACH(child, &p->p_children, p_sibling) { 1002 hispgrp = child->p_pgrp; 1003 if (hispgrp != pgrp && hispgrp->pg_session == mysession && 1004 !P_ZOMBIE(child)) { 1005 if (entering) { 1006 child->p_lflag &= ~PL_ORPHANPG; 1007 hispgrp->pg_jobc++; 1008 } else if (--hispgrp->pg_jobc == 0) 1009 orphanpg(hispgrp); 1010 } 1011 } 1012 } 1013 1014 /* 1015 * A process group has become orphaned; 1016 * if there are any stopped processes in the group, 1017 * hang-up all process in that group. 1018 * 1019 * Call with proc_lock held. 1020 */ 1021 static void 1022 orphanpg(struct pgrp *pg) 1023 { 1024 struct proc *p; 1025 int doit; 1026 1027 KASSERT(mutex_owned(proc_lock)); 1028 1029 doit = 0; 1030 1031 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 1032 if (p->p_stat == SSTOP) { 1033 p->p_lflag |= PL_ORPHANPG; 1034 psignal(p, SIGHUP); 1035 psignal(p, SIGCONT); 1036 } 1037 } 1038 } 1039 1040 #ifdef DDB 1041 #include <ddb/db_output.h> 1042 void pidtbl_dump(void); 1043 void 1044 pidtbl_dump(void) 1045 { 1046 struct pid_table *pt; 1047 struct proc *p; 1048 struct pgrp *pgrp; 1049 int id; 1050 1051 db_printf("pid table %p size %x, next %x, last %x\n", 1052 pid_table, pid_tbl_mask+1, 1053 next_free_pt, last_free_pt); 1054 for (pt = pid_table, id = 0; id <= pid_tbl_mask; id++, pt++) { 1055 p = pt->pt_proc; 1056 if (!P_VALID(p) && !pt->pt_pgrp) 1057 continue; 1058 db_printf(" id %x: ", id); 1059 if (P_VALID(p)) 1060 db_printf("proc %p id %d (0x%x) %s\n", 1061 p, p->p_pid, p->p_pid, p->p_comm); 1062 else 1063 db_printf("next %x use %x\n", 1064 P_NEXT(p) & pid_tbl_mask, 1065 P_NEXT(p) & ~pid_tbl_mask); 1066 if ((pgrp = pt->pt_pgrp)) { 1067 db_printf("\tsession %p, sid %d, count %d, login %s\n", 1068 pgrp->pg_session, pgrp->pg_session->s_sid, 1069 pgrp->pg_session->s_count, 1070 pgrp->pg_session->s_login); 1071 db_printf("\tpgrp %p, pg_id %d, pg_jobc %d, members %p\n", 1072 pgrp, pgrp->pg_id, pgrp->pg_jobc, 1073 LIST_FIRST(&pgrp->pg_members)); 1074 LIST_FOREACH(p, &pgrp->pg_members, p_pglist) { 1075 db_printf("\t\tpid %d addr %p pgrp %p %s\n", 1076 p->p_pid, p, p->p_pgrp, p->p_comm); 1077 } 1078 } 1079 } 1080 } 1081 #endif /* DDB */ 1082 1083 #ifdef KSTACK_CHECK_MAGIC 1084 #include <sys/user.h> 1085 1086 #define KSTACK_MAGIC 0xdeadbeaf 1087 1088 /* XXX should be per process basis? */ 1089 static int kstackleftmin = KSTACK_SIZE; 1090 static int kstackleftthres = KSTACK_SIZE / 8; 1091 1092 void 1093 kstack_setup_magic(const struct lwp *l) 1094 { 1095 uint32_t *ip; 1096 uint32_t const *end; 1097 1098 KASSERT(l != NULL); 1099 KASSERT(l != &lwp0); 1100 1101 /* 1102 * fill all the stack with magic number 1103 * so that later modification on it can be detected. 1104 */ 1105 ip = (uint32_t *)KSTACK_LOWEST_ADDR(l); 1106 end = (uint32_t *)((char *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE); 1107 for (; ip < end; ip++) { 1108 *ip = KSTACK_MAGIC; 1109 } 1110 } 1111 1112 void 1113 kstack_check_magic(const struct lwp *l) 1114 { 1115 uint32_t const *ip, *end; 1116 int stackleft; 1117 1118 KASSERT(l != NULL); 1119 1120 /* don't check proc0 */ /*XXX*/ 1121 if (l == &lwp0) 1122 return; 1123 1124 #ifdef __MACHINE_STACK_GROWS_UP 1125 /* stack grows upwards (eg. hppa) */ 1126 ip = (uint32_t *)((void *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE); 1127 end = (uint32_t *)KSTACK_LOWEST_ADDR(l); 1128 for (ip--; ip >= end; ip--) 1129 if (*ip != KSTACK_MAGIC) 1130 break; 1131 1132 stackleft = (void *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE - (void *)ip; 1133 #else /* __MACHINE_STACK_GROWS_UP */ 1134 /* stack grows downwards (eg. i386) */ 1135 ip = (uint32_t *)KSTACK_LOWEST_ADDR(l); 1136 end = (uint32_t *)((char *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE); 1137 for (; ip < end; ip++) 1138 if (*ip != KSTACK_MAGIC) 1139 break; 1140 1141 stackleft = ((const char *)ip) - (const char *)KSTACK_LOWEST_ADDR(l); 1142 #endif /* __MACHINE_STACK_GROWS_UP */ 1143 1144 if (kstackleftmin > stackleft) { 1145 kstackleftmin = stackleft; 1146 if (stackleft < kstackleftthres) 1147 printf("warning: kernel stack left %d bytes" 1148 "(pid %u:lid %u)\n", stackleft, 1149 (u_int)l->l_proc->p_pid, (u_int)l->l_lid); 1150 } 1151 1152 if (stackleft <= 0) { 1153 panic("magic on the top of kernel stack changed for " 1154 "pid %u, lid %u: maybe kernel stack overflow", 1155 (u_int)l->l_proc->p_pid, (u_int)l->l_lid); 1156 } 1157 } 1158 #endif /* KSTACK_CHECK_MAGIC */ 1159 1160 int 1161 proclist_foreach_call(struct proclist *list, 1162 int (*callback)(struct proc *, void *arg), void *arg) 1163 { 1164 struct proc marker; 1165 struct proc *p; 1166 struct lwp * const l = curlwp; 1167 int ret = 0; 1168 1169 marker.p_flag = PK_MARKER; 1170 uvm_lwp_hold(l); 1171 mutex_enter(proc_lock); 1172 for (p = LIST_FIRST(list); ret == 0 && p != NULL;) { 1173 if (p->p_flag & PK_MARKER) { 1174 p = LIST_NEXT(p, p_list); 1175 continue; 1176 } 1177 LIST_INSERT_AFTER(p, &marker, p_list); 1178 ret = (*callback)(p, arg); 1179 KASSERT(mutex_owned(proc_lock)); 1180 p = LIST_NEXT(&marker, p_list); 1181 LIST_REMOVE(&marker, p_list); 1182 } 1183 mutex_exit(proc_lock); 1184 uvm_lwp_rele(l); 1185 1186 return ret; 1187 } 1188 1189 int 1190 proc_vmspace_getref(struct proc *p, struct vmspace **vm) 1191 { 1192 1193 /* XXXCDC: how should locking work here? */ 1194 1195 /* curproc exception is for coredump. */ 1196 1197 if ((p != curproc && (p->p_sflag & PS_WEXIT) != 0) || 1198 (p->p_vmspace->vm_refcnt < 1)) { /* XXX */ 1199 return EFAULT; 1200 } 1201 1202 uvmspace_addref(p->p_vmspace); 1203 *vm = p->p_vmspace; 1204 1205 return 0; 1206 } 1207 1208 /* 1209 * Acquire a write lock on the process credential. 1210 */ 1211 void 1212 proc_crmod_enter(void) 1213 { 1214 struct lwp *l = curlwp; 1215 struct proc *p = l->l_proc; 1216 struct plimit *lim; 1217 kauth_cred_t oc; 1218 char *cn; 1219 1220 /* Reset what needs to be reset in plimit. */ 1221 if (p->p_limit->pl_corename != defcorename) { 1222 lim_privatise(p, false); 1223 lim = p->p_limit; 1224 mutex_enter(&lim->pl_lock); 1225 cn = lim->pl_corename; 1226 lim->pl_corename = defcorename; 1227 mutex_exit(&lim->pl_lock); 1228 if (cn != defcorename) 1229 free(cn, M_TEMP); 1230 } 1231 1232 mutex_enter(p->p_lock); 1233 1234 /* Ensure the LWP cached credentials are up to date. */ 1235 if ((oc = l->l_cred) != p->p_cred) { 1236 kauth_cred_hold(p->p_cred); 1237 l->l_cred = p->p_cred; 1238 kauth_cred_free(oc); 1239 } 1240 1241 } 1242 1243 /* 1244 * Set in a new process credential, and drop the write lock. The credential 1245 * must have a reference already. Optionally, free a no-longer required 1246 * credential. The scheduler also needs to inspect p_cred, so we also 1247 * briefly acquire the sched state mutex. 1248 */ 1249 void 1250 proc_crmod_leave(kauth_cred_t scred, kauth_cred_t fcred, bool sugid) 1251 { 1252 struct lwp *l = curlwp, *l2; 1253 struct proc *p = l->l_proc; 1254 kauth_cred_t oc; 1255 1256 KASSERT(mutex_owned(p->p_lock)); 1257 1258 /* Is there a new credential to set in? */ 1259 if (scred != NULL) { 1260 p->p_cred = scred; 1261 LIST_FOREACH(l2, &p->p_lwps, l_sibling) { 1262 if (l2 != l) 1263 l2->l_prflag |= LPR_CRMOD; 1264 } 1265 1266 /* Ensure the LWP cached credentials are up to date. */ 1267 if ((oc = l->l_cred) != scred) { 1268 kauth_cred_hold(scred); 1269 l->l_cred = scred; 1270 } 1271 } else 1272 oc = NULL; /* XXXgcc */ 1273 1274 if (sugid) { 1275 /* 1276 * Mark process as having changed credentials, stops 1277 * tracing etc. 1278 */ 1279 p->p_flag |= PK_SUGID; 1280 } 1281 1282 mutex_exit(p->p_lock); 1283 1284 /* If there is a credential to be released, free it now. */ 1285 if (fcred != NULL) { 1286 KASSERT(scred != NULL); 1287 kauth_cred_free(fcred); 1288 if (oc != scred) 1289 kauth_cred_free(oc); 1290 } 1291 } 1292 1293 /* 1294 * proc_specific_key_create -- 1295 * Create a key for subsystem proc-specific data. 1296 */ 1297 int 1298 proc_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor) 1299 { 1300 1301 return (specificdata_key_create(proc_specificdata_domain, keyp, dtor)); 1302 } 1303 1304 /* 1305 * proc_specific_key_delete -- 1306 * Delete a key for subsystem proc-specific data. 1307 */ 1308 void 1309 proc_specific_key_delete(specificdata_key_t key) 1310 { 1311 1312 specificdata_key_delete(proc_specificdata_domain, key); 1313 } 1314 1315 /* 1316 * proc_initspecific -- 1317 * Initialize a proc's specificdata container. 1318 */ 1319 void 1320 proc_initspecific(struct proc *p) 1321 { 1322 int error; 1323 1324 error = specificdata_init(proc_specificdata_domain, &p->p_specdataref); 1325 KASSERT(error == 0); 1326 } 1327 1328 /* 1329 * proc_finispecific -- 1330 * Finalize a proc's specificdata container. 1331 */ 1332 void 1333 proc_finispecific(struct proc *p) 1334 { 1335 1336 specificdata_fini(proc_specificdata_domain, &p->p_specdataref); 1337 } 1338 1339 /* 1340 * proc_getspecific -- 1341 * Return proc-specific data corresponding to the specified key. 1342 */ 1343 void * 1344 proc_getspecific(struct proc *p, specificdata_key_t key) 1345 { 1346 1347 return (specificdata_getspecific(proc_specificdata_domain, 1348 &p->p_specdataref, key)); 1349 } 1350 1351 /* 1352 * proc_setspecific -- 1353 * Set proc-specific data corresponding to the specified key. 1354 */ 1355 void 1356 proc_setspecific(struct proc *p, specificdata_key_t key, void *data) 1357 { 1358 1359 specificdata_setspecific(proc_specificdata_domain, 1360 &p->p_specdataref, key, data); 1361 } 1362