xref: /netbsd-src/sys/kern/kern_proc.c (revision deb6f0161a9109e7de9b519dc8dfb9478668dcdd)
1 /*	$NetBSD: kern_proc.c,v 1.223 2018/12/06 13:51:43 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 1999, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Copyright (c) 1982, 1986, 1989, 1991, 1993
35  *	The Regents of the University of California.  All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. Neither the name of the University nor the names of its contributors
46  *    may be used to endorse or promote products derived from this software
47  *    without specific prior written permission.
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59  * SUCH DAMAGE.
60  *
61  *	@(#)kern_proc.c	8.7 (Berkeley) 2/14/95
62  */
63 
64 #include <sys/cdefs.h>
65 __KERNEL_RCSID(0, "$NetBSD: kern_proc.c,v 1.223 2018/12/06 13:51:43 christos Exp $");
66 
67 #ifdef _KERNEL_OPT
68 #include "opt_kstack.h"
69 #include "opt_maxuprc.h"
70 #include "opt_dtrace.h"
71 #include "opt_compat_netbsd32.h"
72 #include "opt_kaslr.h"
73 #endif
74 
75 #if defined(__HAVE_COMPAT_NETBSD32) && !defined(COMPAT_NETBSD32) \
76     && !defined(_RUMPKERNEL)
77 #define COMPAT_NETBSD32
78 #endif
79 
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/kernel.h>
83 #include <sys/proc.h>
84 #include <sys/resourcevar.h>
85 #include <sys/buf.h>
86 #include <sys/acct.h>
87 #include <sys/wait.h>
88 #include <sys/file.h>
89 #include <ufs/ufs/quota.h>
90 #include <sys/uio.h>
91 #include <sys/pool.h>
92 #include <sys/pset.h>
93 #include <sys/mbuf.h>
94 #include <sys/ioctl.h>
95 #include <sys/tty.h>
96 #include <sys/signalvar.h>
97 #include <sys/ras.h>
98 #include <sys/filedesc.h>
99 #include <sys/syscall_stats.h>
100 #include <sys/kauth.h>
101 #include <sys/sleepq.h>
102 #include <sys/atomic.h>
103 #include <sys/kmem.h>
104 #include <sys/namei.h>
105 #include <sys/dtrace_bsd.h>
106 #include <sys/sysctl.h>
107 #include <sys/exec.h>
108 #include <sys/cpu.h>
109 
110 #include <uvm/uvm_extern.h>
111 #include <uvm/uvm.h>
112 
113 #ifdef COMPAT_NETBSD32
114 #include <compat/netbsd32/netbsd32.h>
115 #endif
116 
117 /*
118  * Process lists.
119  */
120 
121 struct proclist		allproc		__cacheline_aligned;
122 struct proclist		zombproc	__cacheline_aligned;
123 
124 kmutex_t *		proc_lock	__cacheline_aligned;
125 
126 /*
127  * pid to proc lookup is done by indexing the pid_table array.
128  * Since pid numbers are only allocated when an empty slot
129  * has been found, there is no need to search any lists ever.
130  * (an orphaned pgrp will lock the slot, a session will lock
131  * the pgrp with the same number.)
132  * If the table is too small it is reallocated with twice the
133  * previous size and the entries 'unzipped' into the two halves.
134  * A linked list of free entries is passed through the pt_proc
135  * field of 'free' items - set odd to be an invalid ptr.
136  */
137 
138 struct pid_table {
139 	struct proc	*pt_proc;
140 	struct pgrp	*pt_pgrp;
141 	pid_t		pt_pid;
142 };
143 #if 1	/* strongly typed cast - should be a noop */
144 static inline uint p2u(struct proc *p) { return (uint)(uintptr_t)p; }
145 #else
146 #define p2u(p) ((uint)p)
147 #endif
148 #define P_VALID(p) (!(p2u(p) & 1))
149 #define P_NEXT(p) (p2u(p) >> 1)
150 #define P_FREE(pid) ((struct proc *)(uintptr_t)((pid) << 1 | 1))
151 
152 /*
153  * Table of process IDs (PIDs).
154  */
155 static struct pid_table *pid_table	__read_mostly;
156 
157 #define	INITIAL_PID_TABLE_SIZE		(1 << 5)
158 
159 /* Table mask, threshold for growing and number of allocated PIDs. */
160 static u_int		pid_tbl_mask	__read_mostly;
161 static u_int		pid_alloc_lim	__read_mostly;
162 static u_int		pid_alloc_cnt	__cacheline_aligned;
163 
164 /* Next free, last free and maximum PIDs. */
165 static u_int		next_free_pt	__cacheline_aligned;
166 static u_int		last_free_pt	__cacheline_aligned;
167 static pid_t		pid_max		__read_mostly;
168 
169 /* Components of the first process -- never freed. */
170 
171 extern struct emul emul_netbsd;	/* defined in kern_exec.c */
172 
173 struct session session0 = {
174 	.s_count = 1,
175 	.s_sid = 0,
176 };
177 struct pgrp pgrp0 = {
178 	.pg_members = LIST_HEAD_INITIALIZER(&pgrp0.pg_members),
179 	.pg_session = &session0,
180 };
181 filedesc_t filedesc0;
182 struct cwdinfo cwdi0 = {
183 	.cwdi_cmask = CMASK,
184 	.cwdi_refcnt = 1,
185 };
186 struct plimit limit0;
187 struct pstats pstat0;
188 struct vmspace vmspace0;
189 struct sigacts sigacts0;
190 struct proc proc0 = {
191 	.p_lwps = LIST_HEAD_INITIALIZER(&proc0.p_lwps),
192 	.p_sigwaiters = LIST_HEAD_INITIALIZER(&proc0.p_sigwaiters),
193 	.p_nlwps = 1,
194 	.p_nrlwps = 1,
195 	.p_nlwpid = 1,		/* must match lwp0.l_lid */
196 	.p_pgrp = &pgrp0,
197 	.p_comm = "system",
198 	/*
199 	 * Set P_NOCLDWAIT so that kernel threads are reparented to init(8)
200 	 * when they exit.  init(8) can easily wait them out for us.
201 	 */
202 	.p_flag = PK_SYSTEM | PK_NOCLDWAIT,
203 	.p_stat = SACTIVE,
204 	.p_nice = NZERO,
205 	.p_emul = &emul_netbsd,
206 	.p_cwdi = &cwdi0,
207 	.p_limit = &limit0,
208 	.p_fd = &filedesc0,
209 	.p_vmspace = &vmspace0,
210 	.p_stats = &pstat0,
211 	.p_sigacts = &sigacts0,
212 #ifdef PROC0_MD_INITIALIZERS
213 	PROC0_MD_INITIALIZERS
214 #endif
215 };
216 kauth_cred_t cred0;
217 
218 static const int	nofile	= NOFILE;
219 static const int	maxuprc	= MAXUPRC;
220 
221 static int sysctl_doeproc(SYSCTLFN_PROTO);
222 static int sysctl_kern_proc_args(SYSCTLFN_PROTO);
223 static int sysctl_security_expose_address(SYSCTLFN_PROTO);
224 
225 #ifdef KASLR
226 static int kern_expose_address = 0;
227 #else
228 static int kern_expose_address = 1;
229 #endif
230 /*
231  * The process list descriptors, used during pid allocation and
232  * by sysctl.  No locking on this data structure is needed since
233  * it is completely static.
234  */
235 const struct proclist_desc proclists[] = {
236 	{ &allproc	},
237 	{ &zombproc	},
238 	{ NULL		},
239 };
240 
241 static struct pgrp *	pg_remove(pid_t);
242 static void		pg_delete(pid_t);
243 static void		orphanpg(struct pgrp *);
244 
245 static specificdata_domain_t proc_specificdata_domain;
246 
247 static pool_cache_t proc_cache;
248 
249 static kauth_listener_t proc_listener;
250 
251 static void fill_proc(const struct proc *, struct proc *, bool);
252 static int fill_pathname(struct lwp *, pid_t, void *, size_t *);
253 
254 static int
255 proc_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
256     void *arg0, void *arg1, void *arg2, void *arg3)
257 {
258 	struct proc *p;
259 	int result;
260 
261 	result = KAUTH_RESULT_DEFER;
262 	p = arg0;
263 
264 	switch (action) {
265 	case KAUTH_PROCESS_CANSEE: {
266 		enum kauth_process_req req;
267 
268 		req = (enum kauth_process_req)arg1;
269 
270 		switch (req) {
271 		case KAUTH_REQ_PROCESS_CANSEE_ARGS:
272 		case KAUTH_REQ_PROCESS_CANSEE_ENTRY:
273 		case KAUTH_REQ_PROCESS_CANSEE_OPENFILES:
274 		case KAUTH_REQ_PROCESS_CANSEE_EPROC:
275 			result = KAUTH_RESULT_ALLOW;
276 			break;
277 
278 		case KAUTH_REQ_PROCESS_CANSEE_ENV:
279 			if (kauth_cred_getuid(cred) !=
280 			    kauth_cred_getuid(p->p_cred) ||
281 			    kauth_cred_getuid(cred) !=
282 			    kauth_cred_getsvuid(p->p_cred))
283 				break;
284 
285 			result = KAUTH_RESULT_ALLOW;
286 
287 			break;
288 
289 		case KAUTH_REQ_PROCESS_CANSEE_KPTR:
290 			if (!kern_expose_address)
291 				break;
292 
293 			if (kern_expose_address == 1 && !(p->p_flag & PK_KMEM))
294 				break;
295 
296 			result = KAUTH_RESULT_ALLOW;
297 
298 			break;
299 
300 		default:
301 			break;
302 		}
303 
304 		break;
305 		}
306 
307 	case KAUTH_PROCESS_FORK: {
308 		int lnprocs = (int)(unsigned long)arg2;
309 
310 		/*
311 		 * Don't allow a nonprivileged user to use the last few
312 		 * processes. The variable lnprocs is the current number of
313 		 * processes, maxproc is the limit.
314 		 */
315 		if (__predict_false((lnprocs >= maxproc - 5)))
316 			break;
317 
318 		result = KAUTH_RESULT_ALLOW;
319 
320 		break;
321 		}
322 
323 	case KAUTH_PROCESS_CORENAME:
324 	case KAUTH_PROCESS_STOPFLAG:
325 		if (proc_uidmatch(cred, p->p_cred) == 0)
326 			result = KAUTH_RESULT_ALLOW;
327 
328 		break;
329 
330 	default:
331 		break;
332 	}
333 
334 	return result;
335 }
336 
337 static int
338 proc_ctor(void *arg __unused, void *obj, int flags __unused)
339 {
340 	memset(obj, 0, sizeof(struct proc));
341 	return 0;
342 }
343 
344 /*
345  * Initialize global process hashing structures.
346  */
347 void
348 procinit(void)
349 {
350 	const struct proclist_desc *pd;
351 	u_int i;
352 #define	LINK_EMPTY ((PID_MAX + INITIAL_PID_TABLE_SIZE) & ~(INITIAL_PID_TABLE_SIZE - 1))
353 
354 	for (pd = proclists; pd->pd_list != NULL; pd++)
355 		LIST_INIT(pd->pd_list);
356 
357 	proc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
358 	pid_table = kmem_alloc(INITIAL_PID_TABLE_SIZE
359 	    * sizeof(struct pid_table), KM_SLEEP);
360 	pid_tbl_mask = INITIAL_PID_TABLE_SIZE - 1;
361 	pid_max = PID_MAX;
362 
363 	/* Set free list running through table...
364 	   Preset 'use count' above PID_MAX so we allocate pid 1 next. */
365 	for (i = 0; i <= pid_tbl_mask; i++) {
366 		pid_table[i].pt_proc = P_FREE(LINK_EMPTY + i + 1);
367 		pid_table[i].pt_pgrp = 0;
368 		pid_table[i].pt_pid = 0;
369 	}
370 	/* slot 0 is just grabbed */
371 	next_free_pt = 1;
372 	/* Need to fix last entry. */
373 	last_free_pt = pid_tbl_mask;
374 	pid_table[last_free_pt].pt_proc = P_FREE(LINK_EMPTY);
375 	/* point at which we grow table - to avoid reusing pids too often */
376 	pid_alloc_lim = pid_tbl_mask - 1;
377 #undef LINK_EMPTY
378 
379 	proc_specificdata_domain = specificdata_domain_create();
380 	KASSERT(proc_specificdata_domain != NULL);
381 
382 	proc_cache = pool_cache_init(sizeof(struct proc), 0, 0, 0,
383 	    "procpl", NULL, IPL_NONE, proc_ctor, NULL, NULL);
384 
385 	proc_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
386 	    proc_listener_cb, NULL);
387 }
388 
389 void
390 procinit_sysctl(void)
391 {
392 	static struct sysctllog *clog;
393 
394 	sysctl_createv(&clog, 0, NULL, NULL,
395 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
396 		       CTLTYPE_INT, "expose_address",
397 		       SYSCTL_DESCR("Enable exposing kernel addresses"),
398 		       sysctl_security_expose_address, 0,
399 		       &kern_expose_address, 0, CTL_KERN, CTL_CREATE, CTL_EOL);
400 	sysctl_createv(&clog, 0, NULL, NULL,
401 		       CTLFLAG_PERMANENT,
402 		       CTLTYPE_NODE, "proc",
403 		       SYSCTL_DESCR("System-wide process information"),
404 		       sysctl_doeproc, 0, NULL, 0,
405 		       CTL_KERN, KERN_PROC, CTL_EOL);
406 	sysctl_createv(&clog, 0, NULL, NULL,
407 		       CTLFLAG_PERMANENT,
408 		       CTLTYPE_NODE, "proc2",
409 		       SYSCTL_DESCR("Machine-independent process information"),
410 		       sysctl_doeproc, 0, NULL, 0,
411 		       CTL_KERN, KERN_PROC2, CTL_EOL);
412 	sysctl_createv(&clog, 0, NULL, NULL,
413 		       CTLFLAG_PERMANENT,
414 		       CTLTYPE_NODE, "proc_args",
415 		       SYSCTL_DESCR("Process argument information"),
416 		       sysctl_kern_proc_args, 0, NULL, 0,
417 		       CTL_KERN, KERN_PROC_ARGS, CTL_EOL);
418 
419 	/*
420 	  "nodes" under these:
421 
422 	  KERN_PROC_ALL
423 	  KERN_PROC_PID pid
424 	  KERN_PROC_PGRP pgrp
425 	  KERN_PROC_SESSION sess
426 	  KERN_PROC_TTY tty
427 	  KERN_PROC_UID uid
428 	  KERN_PROC_RUID uid
429 	  KERN_PROC_GID gid
430 	  KERN_PROC_RGID gid
431 
432 	  all in all, probably not worth the effort...
433 	*/
434 }
435 
436 /*
437  * Initialize process 0.
438  */
439 void
440 proc0_init(void)
441 {
442 	struct proc *p;
443 	struct pgrp *pg;
444 	struct rlimit *rlim;
445 	rlim_t lim;
446 	int i;
447 
448 	p = &proc0;
449 	pg = &pgrp0;
450 
451 	mutex_init(&p->p_stmutex, MUTEX_DEFAULT, IPL_HIGH);
452 	mutex_init(&p->p_auxlock, MUTEX_DEFAULT, IPL_NONE);
453 	p->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
454 
455 	rw_init(&p->p_reflock);
456 	cv_init(&p->p_waitcv, "wait");
457 	cv_init(&p->p_lwpcv, "lwpwait");
458 
459 	LIST_INSERT_HEAD(&p->p_lwps, &lwp0, l_sibling);
460 
461 	pid_table[0].pt_proc = p;
462 	LIST_INSERT_HEAD(&allproc, p, p_list);
463 
464 	pid_table[0].pt_pgrp = pg;
465 	LIST_INSERT_HEAD(&pg->pg_members, p, p_pglist);
466 
467 #ifdef __HAVE_SYSCALL_INTERN
468 	(*p->p_emul->e_syscall_intern)(p);
469 #endif
470 
471 	/* Create credentials. */
472 	cred0 = kauth_cred_alloc();
473 	p->p_cred = cred0;
474 
475 	/* Create the CWD info. */
476 	rw_init(&cwdi0.cwdi_lock);
477 
478 	/* Create the limits structures. */
479 	mutex_init(&limit0.pl_lock, MUTEX_DEFAULT, IPL_NONE);
480 
481 	rlim = limit0.pl_rlimit;
482 	for (i = 0; i < __arraycount(limit0.pl_rlimit); i++) {
483 		rlim[i].rlim_cur = RLIM_INFINITY;
484 		rlim[i].rlim_max = RLIM_INFINITY;
485 	}
486 
487 	rlim[RLIMIT_NOFILE].rlim_max = maxfiles;
488 	rlim[RLIMIT_NOFILE].rlim_cur = maxfiles < nofile ? maxfiles : nofile;
489 
490 	rlim[RLIMIT_NPROC].rlim_max = maxproc;
491 	rlim[RLIMIT_NPROC].rlim_cur = maxproc < maxuprc ? maxproc : maxuprc;
492 
493 	lim = MIN(VM_MAXUSER_ADDRESS, ctob((rlim_t)uvmexp.free));
494 	rlim[RLIMIT_RSS].rlim_max = lim;
495 	rlim[RLIMIT_MEMLOCK].rlim_max = lim;
496 	rlim[RLIMIT_MEMLOCK].rlim_cur = lim / 3;
497 
498 	rlim[RLIMIT_NTHR].rlim_max = maxlwp;
499 	rlim[RLIMIT_NTHR].rlim_cur = maxlwp < maxuprc ? maxlwp : maxuprc;
500 
501 	/* Note that default core name has zero length. */
502 	limit0.pl_corename = defcorename;
503 	limit0.pl_cnlen = 0;
504 	limit0.pl_refcnt = 1;
505 	limit0.pl_writeable = false;
506 	limit0.pl_sv_limit = NULL;
507 
508 	/* Configure virtual memory system, set vm rlimits. */
509 	uvm_init_limits(p);
510 
511 	/* Initialize file descriptor table for proc0. */
512 	fd_init(&filedesc0);
513 
514 	/*
515 	 * Initialize proc0's vmspace, which uses the kernel pmap.
516 	 * All kernel processes (which never have user space mappings)
517 	 * share proc0's vmspace, and thus, the kernel pmap.
518 	 */
519 	uvmspace_init(&vmspace0, pmap_kernel(), round_page(VM_MIN_ADDRESS),
520 	    trunc_page(VM_MAXUSER_ADDRESS),
521 #ifdef __USE_TOPDOWN_VM
522 	    true
523 #else
524 	    false
525 #endif
526 	    );
527 
528 	/* Initialize signal state for proc0. XXX IPL_SCHED */
529 	mutex_init(&p->p_sigacts->sa_mutex, MUTEX_DEFAULT, IPL_SCHED);
530 	siginit(p);
531 
532 	proc_initspecific(p);
533 	kdtrace_proc_ctor(NULL, p);
534 }
535 
536 /*
537  * Session reference counting.
538  */
539 
540 void
541 proc_sesshold(struct session *ss)
542 {
543 
544 	KASSERT(mutex_owned(proc_lock));
545 	ss->s_count++;
546 }
547 
548 void
549 proc_sessrele(struct session *ss)
550 {
551 
552 	KASSERT(mutex_owned(proc_lock));
553 	/*
554 	 * We keep the pgrp with the same id as the session in order to
555 	 * stop a process being given the same pid.  Since the pgrp holds
556 	 * a reference to the session, it must be a 'zombie' pgrp by now.
557 	 */
558 	if (--ss->s_count == 0) {
559 		struct pgrp *pg;
560 
561 		pg = pg_remove(ss->s_sid);
562 		mutex_exit(proc_lock);
563 
564 		kmem_free(pg, sizeof(struct pgrp));
565 		kmem_free(ss, sizeof(struct session));
566 	} else {
567 		mutex_exit(proc_lock);
568 	}
569 }
570 
571 /*
572  * Check that the specified process group is in the session of the
573  * specified process.
574  * Treats -ve ids as process ids.
575  * Used to validate TIOCSPGRP requests.
576  */
577 int
578 pgid_in_session(struct proc *p, pid_t pg_id)
579 {
580 	struct pgrp *pgrp;
581 	struct session *session;
582 	int error;
583 
584 	mutex_enter(proc_lock);
585 	if (pg_id < 0) {
586 		struct proc *p1 = proc_find(-pg_id);
587 		if (p1 == NULL) {
588 			error = EINVAL;
589 			goto fail;
590 		}
591 		pgrp = p1->p_pgrp;
592 	} else {
593 		pgrp = pgrp_find(pg_id);
594 		if (pgrp == NULL) {
595 			error = EINVAL;
596 			goto fail;
597 		}
598 	}
599 	session = pgrp->pg_session;
600 	error = (session != p->p_pgrp->pg_session) ? EPERM : 0;
601 fail:
602 	mutex_exit(proc_lock);
603 	return error;
604 }
605 
606 /*
607  * p_inferior: is p an inferior of q?
608  */
609 static inline bool
610 p_inferior(struct proc *p, struct proc *q)
611 {
612 
613 	KASSERT(mutex_owned(proc_lock));
614 
615 	for (; p != q; p = p->p_pptr)
616 		if (p->p_pid == 0)
617 			return false;
618 	return true;
619 }
620 
621 /*
622  * proc_find: locate a process by the ID.
623  *
624  * => Must be called with proc_lock held.
625  */
626 proc_t *
627 proc_find_raw(pid_t pid)
628 {
629 	struct pid_table *pt;
630 	proc_t *p;
631 
632 	KASSERT(mutex_owned(proc_lock));
633 	pt = &pid_table[pid & pid_tbl_mask];
634 	p = pt->pt_proc;
635 	if (__predict_false(!P_VALID(p) || pt->pt_pid != pid)) {
636 		return NULL;
637 	}
638 	return p;
639 }
640 
641 proc_t *
642 proc_find(pid_t pid)
643 {
644 	proc_t *p;
645 
646 	p = proc_find_raw(pid);
647 	if (__predict_false(p == NULL)) {
648 		return NULL;
649 	}
650 
651 	/*
652 	 * Only allow live processes to be found by PID.
653 	 * XXX: p_stat might change, since unlocked.
654 	 */
655 	if (__predict_true(p->p_stat == SACTIVE || p->p_stat == SSTOP)) {
656 		return p;
657 	}
658 	return NULL;
659 }
660 
661 /*
662  * pgrp_find: locate a process group by the ID.
663  *
664  * => Must be called with proc_lock held.
665  */
666 struct pgrp *
667 pgrp_find(pid_t pgid)
668 {
669 	struct pgrp *pg;
670 
671 	KASSERT(mutex_owned(proc_lock));
672 
673 	pg = pid_table[pgid & pid_tbl_mask].pt_pgrp;
674 
675 	/*
676 	 * Cannot look up a process group that only exists because the
677 	 * session has not died yet (traditional).
678 	 */
679 	if (pg == NULL || pg->pg_id != pgid || LIST_EMPTY(&pg->pg_members)) {
680 		return NULL;
681 	}
682 	return pg;
683 }
684 
685 static void
686 expand_pid_table(void)
687 {
688 	size_t pt_size, tsz;
689 	struct pid_table *n_pt, *new_pt;
690 	struct proc *proc;
691 	struct pgrp *pgrp;
692 	pid_t pid, rpid;
693 	u_int i;
694 	uint new_pt_mask;
695 
696 	pt_size = pid_tbl_mask + 1;
697 	tsz = pt_size * 2 * sizeof(struct pid_table);
698 	new_pt = kmem_alloc(tsz, KM_SLEEP);
699 	new_pt_mask = pt_size * 2 - 1;
700 
701 	mutex_enter(proc_lock);
702 	if (pt_size != pid_tbl_mask + 1) {
703 		/* Another process beat us to it... */
704 		mutex_exit(proc_lock);
705 		kmem_free(new_pt, tsz);
706 		return;
707 	}
708 
709 	/*
710 	 * Copy entries from old table into new one.
711 	 * If 'pid' is 'odd' we need to place in the upper half,
712 	 * even pid's to the lower half.
713 	 * Free items stay in the low half so we don't have to
714 	 * fixup the reference to them.
715 	 * We stuff free items on the front of the freelist
716 	 * because we can't write to unmodified entries.
717 	 * Processing the table backwards maintains a semblance
718 	 * of issuing pid numbers that increase with time.
719 	 */
720 	i = pt_size - 1;
721 	n_pt = new_pt + i;
722 	for (; ; i--, n_pt--) {
723 		proc = pid_table[i].pt_proc;
724 		pgrp = pid_table[i].pt_pgrp;
725 		if (!P_VALID(proc)) {
726 			/* Up 'use count' so that link is valid */
727 			pid = (P_NEXT(proc) + pt_size) & ~pt_size;
728 			rpid = 0;
729 			proc = P_FREE(pid);
730 			if (pgrp)
731 				pid = pgrp->pg_id;
732 		} else {
733 			pid = pid_table[i].pt_pid;
734 			rpid = pid;
735 		}
736 
737 		/* Save entry in appropriate half of table */
738 		n_pt[pid & pt_size].pt_proc = proc;
739 		n_pt[pid & pt_size].pt_pgrp = pgrp;
740 		n_pt[pid & pt_size].pt_pid = rpid;
741 
742 		/* Put other piece on start of free list */
743 		pid = (pid ^ pt_size) & ~pid_tbl_mask;
744 		n_pt[pid & pt_size].pt_proc =
745 			P_FREE((pid & ~pt_size) | next_free_pt);
746 		n_pt[pid & pt_size].pt_pgrp = 0;
747 		n_pt[pid & pt_size].pt_pid = 0;
748 
749 		next_free_pt = i | (pid & pt_size);
750 		if (i == 0)
751 			break;
752 	}
753 
754 	/* Save old table size and switch tables */
755 	tsz = pt_size * sizeof(struct pid_table);
756 	n_pt = pid_table;
757 	pid_table = new_pt;
758 	pid_tbl_mask = new_pt_mask;
759 
760 	/*
761 	 * pid_max starts as PID_MAX (= 30000), once we have 16384
762 	 * allocated pids we need it to be larger!
763 	 */
764 	if (pid_tbl_mask > PID_MAX) {
765 		pid_max = pid_tbl_mask * 2 + 1;
766 		pid_alloc_lim |= pid_alloc_lim << 1;
767 	} else
768 		pid_alloc_lim <<= 1;	/* doubles number of free slots... */
769 
770 	mutex_exit(proc_lock);
771 	kmem_free(n_pt, tsz);
772 }
773 
774 struct proc *
775 proc_alloc(void)
776 {
777 	struct proc *p;
778 
779 	p = pool_cache_get(proc_cache, PR_WAITOK);
780 	p->p_stat = SIDL;			/* protect against others */
781 	proc_initspecific(p);
782 	kdtrace_proc_ctor(NULL, p);
783 	p->p_pid = -1;
784 	proc_alloc_pid(p);
785 	return p;
786 }
787 
788 /*
789  * proc_alloc_pid: allocate PID and record the given proc 'p' so that
790  * proc_find_raw() can find it by the PID.
791  */
792 
793 pid_t
794 proc_alloc_pid(struct proc *p)
795 {
796 	struct pid_table *pt;
797 	pid_t pid;
798 	int nxt;
799 
800 	for (;;expand_pid_table()) {
801 		if (__predict_false(pid_alloc_cnt >= pid_alloc_lim))
802 			/* ensure pids cycle through 2000+ values */
803 			continue;
804 		mutex_enter(proc_lock);
805 		pt = &pid_table[next_free_pt];
806 #ifdef DIAGNOSTIC
807 		if (__predict_false(P_VALID(pt->pt_proc) || pt->pt_pgrp))
808 			panic("proc_alloc: slot busy");
809 #endif
810 		nxt = P_NEXT(pt->pt_proc);
811 		if (nxt & pid_tbl_mask)
812 			break;
813 		/* Table full - expand (NB last entry not used....) */
814 		mutex_exit(proc_lock);
815 	}
816 
817 	/* pid is 'saved use count' + 'size' + entry */
818 	pid = (nxt & ~pid_tbl_mask) + pid_tbl_mask + 1 + next_free_pt;
819 	if ((uint)pid > (uint)pid_max)
820 		pid &= pid_tbl_mask;
821 	next_free_pt = nxt & pid_tbl_mask;
822 
823 	/* Grab table slot */
824 	pt->pt_proc = p;
825 
826 	KASSERT(pt->pt_pid == 0);
827 	pt->pt_pid = pid;
828 	if (p->p_pid == -1) {
829 		p->p_pid = pid;
830 	}
831 	pid_alloc_cnt++;
832 	mutex_exit(proc_lock);
833 
834 	return pid;
835 }
836 
837 /*
838  * Free a process id - called from proc_free (in kern_exit.c)
839  *
840  * Called with the proc_lock held.
841  */
842 void
843 proc_free_pid(pid_t pid)
844 {
845 	struct pid_table *pt;
846 
847 	KASSERT(mutex_owned(proc_lock));
848 
849 	pt = &pid_table[pid & pid_tbl_mask];
850 
851 	/* save pid use count in slot */
852 	pt->pt_proc = P_FREE(pid & ~pid_tbl_mask);
853 	KASSERT(pt->pt_pid == pid);
854 	pt->pt_pid = 0;
855 
856 	if (pt->pt_pgrp == NULL) {
857 		/* link last freed entry onto ours */
858 		pid &= pid_tbl_mask;
859 		pt = &pid_table[last_free_pt];
860 		pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pid);
861 		pt->pt_pid = 0;
862 		last_free_pt = pid;
863 		pid_alloc_cnt--;
864 	}
865 
866 	atomic_dec_uint(&nprocs);
867 }
868 
869 void
870 proc_free_mem(struct proc *p)
871 {
872 
873 	kdtrace_proc_dtor(NULL, p);
874 	pool_cache_put(proc_cache, p);
875 }
876 
877 /*
878  * proc_enterpgrp: move p to a new or existing process group (and session).
879  *
880  * If we are creating a new pgrp, the pgid should equal
881  * the calling process' pid.
882  * If is only valid to enter a process group that is in the session
883  * of the process.
884  * Also mksess should only be set if we are creating a process group
885  *
886  * Only called from sys_setsid, sys_setpgid and posix_spawn/spawn_return.
887  */
888 int
889 proc_enterpgrp(struct proc *curp, pid_t pid, pid_t pgid, bool mksess)
890 {
891 	struct pgrp *new_pgrp, *pgrp;
892 	struct session *sess;
893 	struct proc *p;
894 	int rval;
895 	pid_t pg_id = NO_PGID;
896 
897 	sess = mksess ? kmem_alloc(sizeof(*sess), KM_SLEEP) : NULL;
898 
899 	/* Allocate data areas we might need before doing any validity checks */
900 	mutex_enter(proc_lock);		/* Because pid_table might change */
901 	if (pid_table[pgid & pid_tbl_mask].pt_pgrp == 0) {
902 		mutex_exit(proc_lock);
903 		new_pgrp = kmem_alloc(sizeof(*new_pgrp), KM_SLEEP);
904 		mutex_enter(proc_lock);
905 	} else
906 		new_pgrp = NULL;
907 	rval = EPERM;	/* most common error (to save typing) */
908 
909 	/* Check pgrp exists or can be created */
910 	pgrp = pid_table[pgid & pid_tbl_mask].pt_pgrp;
911 	if (pgrp != NULL && pgrp->pg_id != pgid)
912 		goto done;
913 
914 	/* Can only set another process under restricted circumstances. */
915 	if (pid != curp->p_pid) {
916 		/* Must exist and be one of our children... */
917 		p = proc_find(pid);
918 		if (p == NULL || !p_inferior(p, curp)) {
919 			rval = ESRCH;
920 			goto done;
921 		}
922 		/* ... in the same session... */
923 		if (sess != NULL || p->p_session != curp->p_session)
924 			goto done;
925 		/* ... existing pgid must be in same session ... */
926 		if (pgrp != NULL && pgrp->pg_session != p->p_session)
927 			goto done;
928 		/* ... and not done an exec. */
929 		if (p->p_flag & PK_EXEC) {
930 			rval = EACCES;
931 			goto done;
932 		}
933 	} else {
934 		/* ... setsid() cannot re-enter a pgrp */
935 		if (mksess && (curp->p_pgid == curp->p_pid ||
936 		    pgrp_find(curp->p_pid)))
937 			goto done;
938 		p = curp;
939 	}
940 
941 	/* Changing the process group/session of a session
942 	   leader is definitely off limits. */
943 	if (SESS_LEADER(p)) {
944 		if (sess == NULL && p->p_pgrp == pgrp)
945 			/* unless it's a definite noop */
946 			rval = 0;
947 		goto done;
948 	}
949 
950 	/* Can only create a process group with id of process */
951 	if (pgrp == NULL && pgid != pid)
952 		goto done;
953 
954 	/* Can only create a session if creating pgrp */
955 	if (sess != NULL && pgrp != NULL)
956 		goto done;
957 
958 	/* Check we allocated memory for a pgrp... */
959 	if (pgrp == NULL && new_pgrp == NULL)
960 		goto done;
961 
962 	/* Don't attach to 'zombie' pgrp */
963 	if (pgrp != NULL && LIST_EMPTY(&pgrp->pg_members))
964 		goto done;
965 
966 	/* Expect to succeed now */
967 	rval = 0;
968 
969 	if (pgrp == p->p_pgrp)
970 		/* nothing to do */
971 		goto done;
972 
973 	/* Ok all setup, link up required structures */
974 
975 	if (pgrp == NULL) {
976 		pgrp = new_pgrp;
977 		new_pgrp = NULL;
978 		if (sess != NULL) {
979 			sess->s_sid = p->p_pid;
980 			sess->s_leader = p;
981 			sess->s_count = 1;
982 			sess->s_ttyvp = NULL;
983 			sess->s_ttyp = NULL;
984 			sess->s_flags = p->p_session->s_flags & ~S_LOGIN_SET;
985 			memcpy(sess->s_login, p->p_session->s_login,
986 			    sizeof(sess->s_login));
987 			p->p_lflag &= ~PL_CONTROLT;
988 		} else {
989 			sess = p->p_pgrp->pg_session;
990 			proc_sesshold(sess);
991 		}
992 		pgrp->pg_session = sess;
993 		sess = NULL;
994 
995 		pgrp->pg_id = pgid;
996 		LIST_INIT(&pgrp->pg_members);
997 #ifdef DIAGNOSTIC
998 		if (__predict_false(pid_table[pgid & pid_tbl_mask].pt_pgrp))
999 			panic("enterpgrp: pgrp table slot in use");
1000 		if (__predict_false(mksess && p != curp))
1001 			panic("enterpgrp: mksession and p != curproc");
1002 #endif
1003 		pid_table[pgid & pid_tbl_mask].pt_pgrp = pgrp;
1004 		pgrp->pg_jobc = 0;
1005 	}
1006 
1007 	/*
1008 	 * Adjust eligibility of affected pgrps to participate in job control.
1009 	 * Increment eligibility counts before decrementing, otherwise we
1010 	 * could reach 0 spuriously during the first call.
1011 	 */
1012 	fixjobc(p, pgrp, 1);
1013 	fixjobc(p, p->p_pgrp, 0);
1014 
1015 	/* Interlock with ttread(). */
1016 	mutex_spin_enter(&tty_lock);
1017 
1018 	/* Move process to requested group. */
1019 	LIST_REMOVE(p, p_pglist);
1020 	if (LIST_EMPTY(&p->p_pgrp->pg_members))
1021 		/* defer delete until we've dumped the lock */
1022 		pg_id = p->p_pgrp->pg_id;
1023 	p->p_pgrp = pgrp;
1024 	LIST_INSERT_HEAD(&pgrp->pg_members, p, p_pglist);
1025 
1026 	/* Done with the swap; we can release the tty mutex. */
1027 	mutex_spin_exit(&tty_lock);
1028 
1029     done:
1030 	if (pg_id != NO_PGID) {
1031 		/* Releases proc_lock. */
1032 		pg_delete(pg_id);
1033 	} else {
1034 		mutex_exit(proc_lock);
1035 	}
1036 	if (sess != NULL)
1037 		kmem_free(sess, sizeof(*sess));
1038 	if (new_pgrp != NULL)
1039 		kmem_free(new_pgrp, sizeof(*new_pgrp));
1040 #ifdef DEBUG_PGRP
1041 	if (__predict_false(rval))
1042 		printf("enterpgrp(%d,%d,%d), curproc %d, rval %d\n",
1043 			pid, pgid, mksess, curp->p_pid, rval);
1044 #endif
1045 	return rval;
1046 }
1047 
1048 /*
1049  * proc_leavepgrp: remove a process from its process group.
1050  *  => must be called with the proc_lock held, which will be released;
1051  */
1052 void
1053 proc_leavepgrp(struct proc *p)
1054 {
1055 	struct pgrp *pgrp;
1056 
1057 	KASSERT(mutex_owned(proc_lock));
1058 
1059 	/* Interlock with ttread() */
1060 	mutex_spin_enter(&tty_lock);
1061 	pgrp = p->p_pgrp;
1062 	LIST_REMOVE(p, p_pglist);
1063 	p->p_pgrp = NULL;
1064 	mutex_spin_exit(&tty_lock);
1065 
1066 	if (LIST_EMPTY(&pgrp->pg_members)) {
1067 		/* Releases proc_lock. */
1068 		pg_delete(pgrp->pg_id);
1069 	} else {
1070 		mutex_exit(proc_lock);
1071 	}
1072 }
1073 
1074 /*
1075  * pg_remove: remove a process group from the table.
1076  *  => must be called with the proc_lock held;
1077  *  => returns process group to free;
1078  */
1079 static struct pgrp *
1080 pg_remove(pid_t pg_id)
1081 {
1082 	struct pgrp *pgrp;
1083 	struct pid_table *pt;
1084 
1085 	KASSERT(mutex_owned(proc_lock));
1086 
1087 	pt = &pid_table[pg_id & pid_tbl_mask];
1088 	pgrp = pt->pt_pgrp;
1089 
1090 	KASSERT(pgrp != NULL);
1091 	KASSERT(pgrp->pg_id == pg_id);
1092 	KASSERT(LIST_EMPTY(&pgrp->pg_members));
1093 
1094 	pt->pt_pgrp = NULL;
1095 
1096 	if (!P_VALID(pt->pt_proc)) {
1097 		/* Orphaned pgrp, put slot onto free list. */
1098 		KASSERT((P_NEXT(pt->pt_proc) & pid_tbl_mask) == 0);
1099 		pg_id &= pid_tbl_mask;
1100 		pt = &pid_table[last_free_pt];
1101 		pt->pt_proc = P_FREE(P_NEXT(pt->pt_proc) | pg_id);
1102 		KASSERT(pt->pt_pid == 0);
1103 		last_free_pt = pg_id;
1104 		pid_alloc_cnt--;
1105 	}
1106 	return pgrp;
1107 }
1108 
1109 /*
1110  * pg_delete: delete and free a process group.
1111  *  => must be called with the proc_lock held, which will be released.
1112  */
1113 static void
1114 pg_delete(pid_t pg_id)
1115 {
1116 	struct pgrp *pg;
1117 	struct tty *ttyp;
1118 	struct session *ss;
1119 
1120 	KASSERT(mutex_owned(proc_lock));
1121 
1122 	pg = pid_table[pg_id & pid_tbl_mask].pt_pgrp;
1123 	if (pg == NULL || pg->pg_id != pg_id || !LIST_EMPTY(&pg->pg_members)) {
1124 		mutex_exit(proc_lock);
1125 		return;
1126 	}
1127 
1128 	ss = pg->pg_session;
1129 
1130 	/* Remove reference (if any) from tty to this process group */
1131 	mutex_spin_enter(&tty_lock);
1132 	ttyp = ss->s_ttyp;
1133 	if (ttyp != NULL && ttyp->t_pgrp == pg) {
1134 		ttyp->t_pgrp = NULL;
1135 		KASSERT(ttyp->t_session == ss);
1136 	}
1137 	mutex_spin_exit(&tty_lock);
1138 
1139 	/*
1140 	 * The leading process group in a session is freed by proc_sessrele(),
1141 	 * if last reference.  Note: proc_sessrele() releases proc_lock.
1142 	 */
1143 	pg = (ss->s_sid != pg->pg_id) ? pg_remove(pg_id) : NULL;
1144 	proc_sessrele(ss);
1145 
1146 	if (pg != NULL) {
1147 		/* Free it, if was not done by proc_sessrele(). */
1148 		kmem_free(pg, sizeof(struct pgrp));
1149 	}
1150 }
1151 
1152 /*
1153  * Adjust pgrp jobc counters when specified process changes process group.
1154  * We count the number of processes in each process group that "qualify"
1155  * the group for terminal job control (those with a parent in a different
1156  * process group of the same session).  If that count reaches zero, the
1157  * process group becomes orphaned.  Check both the specified process'
1158  * process group and that of its children.
1159  * entering == 0 => p is leaving specified group.
1160  * entering == 1 => p is entering specified group.
1161  *
1162  * Call with proc_lock held.
1163  */
1164 void
1165 fixjobc(struct proc *p, struct pgrp *pgrp, int entering)
1166 {
1167 	struct pgrp *hispgrp;
1168 	struct session *mysession = pgrp->pg_session;
1169 	struct proc *child;
1170 
1171 	KASSERT(mutex_owned(proc_lock));
1172 
1173 	/*
1174 	 * Check p's parent to see whether p qualifies its own process
1175 	 * group; if so, adjust count for p's process group.
1176 	 */
1177 	hispgrp = p->p_pptr->p_pgrp;
1178 	if (hispgrp != pgrp && hispgrp->pg_session == mysession) {
1179 		if (entering) {
1180 			pgrp->pg_jobc++;
1181 			p->p_lflag &= ~PL_ORPHANPG;
1182 		} else if (--pgrp->pg_jobc == 0)
1183 			orphanpg(pgrp);
1184 	}
1185 
1186 	/*
1187 	 * Check this process' children to see whether they qualify
1188 	 * their process groups; if so, adjust counts for children's
1189 	 * process groups.
1190 	 */
1191 	LIST_FOREACH(child, &p->p_children, p_sibling) {
1192 		hispgrp = child->p_pgrp;
1193 		if (hispgrp != pgrp && hispgrp->pg_session == mysession &&
1194 		    !P_ZOMBIE(child)) {
1195 			if (entering) {
1196 				child->p_lflag &= ~PL_ORPHANPG;
1197 				hispgrp->pg_jobc++;
1198 			} else if (--hispgrp->pg_jobc == 0)
1199 				orphanpg(hispgrp);
1200 		}
1201 	}
1202 }
1203 
1204 /*
1205  * A process group has become orphaned;
1206  * if there are any stopped processes in the group,
1207  * hang-up all process in that group.
1208  *
1209  * Call with proc_lock held.
1210  */
1211 static void
1212 orphanpg(struct pgrp *pg)
1213 {
1214 	struct proc *p;
1215 
1216 	KASSERT(mutex_owned(proc_lock));
1217 
1218 	LIST_FOREACH(p, &pg->pg_members, p_pglist) {
1219 		if (p->p_stat == SSTOP) {
1220 			p->p_lflag |= PL_ORPHANPG;
1221 			psignal(p, SIGHUP);
1222 			psignal(p, SIGCONT);
1223 		}
1224 	}
1225 }
1226 
1227 #ifdef DDB
1228 #include <ddb/db_output.h>
1229 void pidtbl_dump(void);
1230 void
1231 pidtbl_dump(void)
1232 {
1233 	struct pid_table *pt;
1234 	struct proc *p;
1235 	struct pgrp *pgrp;
1236 	int id;
1237 
1238 	db_printf("pid table %p size %x, next %x, last %x\n",
1239 		pid_table, pid_tbl_mask+1,
1240 		next_free_pt, last_free_pt);
1241 	for (pt = pid_table, id = 0; id <= pid_tbl_mask; id++, pt++) {
1242 		p = pt->pt_proc;
1243 		if (!P_VALID(p) && !pt->pt_pgrp)
1244 			continue;
1245 		db_printf("  id %x: ", id);
1246 		if (P_VALID(p))
1247 			db_printf("slotpid %d proc %p id %d (0x%x) %s\n",
1248 				pt->pt_pid, p, p->p_pid, p->p_pid, p->p_comm);
1249 		else
1250 			db_printf("next %x use %x\n",
1251 				P_NEXT(p) & pid_tbl_mask,
1252 				P_NEXT(p) & ~pid_tbl_mask);
1253 		if ((pgrp = pt->pt_pgrp)) {
1254 			db_printf("\tsession %p, sid %d, count %d, login %s\n",
1255 			    pgrp->pg_session, pgrp->pg_session->s_sid,
1256 			    pgrp->pg_session->s_count,
1257 			    pgrp->pg_session->s_login);
1258 			db_printf("\tpgrp %p, pg_id %d, pg_jobc %d, members %p\n",
1259 			    pgrp, pgrp->pg_id, pgrp->pg_jobc,
1260 			    LIST_FIRST(&pgrp->pg_members));
1261 			LIST_FOREACH(p, &pgrp->pg_members, p_pglist) {
1262 				db_printf("\t\tpid %d addr %p pgrp %p %s\n",
1263 				    p->p_pid, p, p->p_pgrp, p->p_comm);
1264 			}
1265 		}
1266 	}
1267 }
1268 #endif /* DDB */
1269 
1270 #ifdef KSTACK_CHECK_MAGIC
1271 
1272 #define	KSTACK_MAGIC	0xdeadbeaf
1273 
1274 /* XXX should be per process basis? */
1275 static int	kstackleftmin = KSTACK_SIZE;
1276 static int	kstackleftthres = KSTACK_SIZE / 8;
1277 
1278 void
1279 kstack_setup_magic(const struct lwp *l)
1280 {
1281 	uint32_t *ip;
1282 	uint32_t const *end;
1283 
1284 	KASSERT(l != NULL);
1285 	KASSERT(l != &lwp0);
1286 
1287 	/*
1288 	 * fill all the stack with magic number
1289 	 * so that later modification on it can be detected.
1290 	 */
1291 	ip = (uint32_t *)KSTACK_LOWEST_ADDR(l);
1292 	end = (uint32_t *)((char *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE);
1293 	for (; ip < end; ip++) {
1294 		*ip = KSTACK_MAGIC;
1295 	}
1296 }
1297 
1298 void
1299 kstack_check_magic(const struct lwp *l)
1300 {
1301 	uint32_t const *ip, *end;
1302 	int stackleft;
1303 
1304 	KASSERT(l != NULL);
1305 
1306 	/* don't check proc0 */ /*XXX*/
1307 	if (l == &lwp0)
1308 		return;
1309 
1310 #ifdef __MACHINE_STACK_GROWS_UP
1311 	/* stack grows upwards (eg. hppa) */
1312 	ip = (uint32_t *)((void *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE);
1313 	end = (uint32_t *)KSTACK_LOWEST_ADDR(l);
1314 	for (ip--; ip >= end; ip--)
1315 		if (*ip != KSTACK_MAGIC)
1316 			break;
1317 
1318 	stackleft = (void *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE - (void *)ip;
1319 #else /* __MACHINE_STACK_GROWS_UP */
1320 	/* stack grows downwards (eg. i386) */
1321 	ip = (uint32_t *)KSTACK_LOWEST_ADDR(l);
1322 	end = (uint32_t *)((char *)KSTACK_LOWEST_ADDR(l) + KSTACK_SIZE);
1323 	for (; ip < end; ip++)
1324 		if (*ip != KSTACK_MAGIC)
1325 			break;
1326 
1327 	stackleft = ((const char *)ip) - (const char *)KSTACK_LOWEST_ADDR(l);
1328 #endif /* __MACHINE_STACK_GROWS_UP */
1329 
1330 	if (kstackleftmin > stackleft) {
1331 		kstackleftmin = stackleft;
1332 		if (stackleft < kstackleftthres)
1333 			printf("warning: kernel stack left %d bytes"
1334 			    "(pid %u:lid %u)\n", stackleft,
1335 			    (u_int)l->l_proc->p_pid, (u_int)l->l_lid);
1336 	}
1337 
1338 	if (stackleft <= 0) {
1339 		panic("magic on the top of kernel stack changed for "
1340 		    "pid %u, lid %u: maybe kernel stack overflow",
1341 		    (u_int)l->l_proc->p_pid, (u_int)l->l_lid);
1342 	}
1343 }
1344 #endif /* KSTACK_CHECK_MAGIC */
1345 
1346 int
1347 proclist_foreach_call(struct proclist *list,
1348     int (*callback)(struct proc *, void *arg), void *arg)
1349 {
1350 	struct proc marker;
1351 	struct proc *p;
1352 	int ret = 0;
1353 
1354 	marker.p_flag = PK_MARKER;
1355 	mutex_enter(proc_lock);
1356 	for (p = LIST_FIRST(list); ret == 0 && p != NULL;) {
1357 		if (p->p_flag & PK_MARKER) {
1358 			p = LIST_NEXT(p, p_list);
1359 			continue;
1360 		}
1361 		LIST_INSERT_AFTER(p, &marker, p_list);
1362 		ret = (*callback)(p, arg);
1363 		KASSERT(mutex_owned(proc_lock));
1364 		p = LIST_NEXT(&marker, p_list);
1365 		LIST_REMOVE(&marker, p_list);
1366 	}
1367 	mutex_exit(proc_lock);
1368 
1369 	return ret;
1370 }
1371 
1372 int
1373 proc_vmspace_getref(struct proc *p, struct vmspace **vm)
1374 {
1375 
1376 	/* XXXCDC: how should locking work here? */
1377 
1378 	/* curproc exception is for coredump. */
1379 
1380 	if ((p != curproc && (p->p_sflag & PS_WEXIT) != 0) ||
1381 	    (p->p_vmspace->vm_refcnt < 1)) { /* XXX */
1382 		return EFAULT;
1383 	}
1384 
1385 	uvmspace_addref(p->p_vmspace);
1386 	*vm = p->p_vmspace;
1387 
1388 	return 0;
1389 }
1390 
1391 /*
1392  * Acquire a write lock on the process credential.
1393  */
1394 void
1395 proc_crmod_enter(void)
1396 {
1397 	struct lwp *l = curlwp;
1398 	struct proc *p = l->l_proc;
1399 	kauth_cred_t oc;
1400 
1401 	/* Reset what needs to be reset in plimit. */
1402 	if (p->p_limit->pl_corename != defcorename) {
1403 		lim_setcorename(p, defcorename, 0);
1404 	}
1405 
1406 	mutex_enter(p->p_lock);
1407 
1408 	/* Ensure the LWP cached credentials are up to date. */
1409 	if ((oc = l->l_cred) != p->p_cred) {
1410 		kauth_cred_hold(p->p_cred);
1411 		l->l_cred = p->p_cred;
1412 		kauth_cred_free(oc);
1413 	}
1414 }
1415 
1416 /*
1417  * Set in a new process credential, and drop the write lock.  The credential
1418  * must have a reference already.  Optionally, free a no-longer required
1419  * credential.  The scheduler also needs to inspect p_cred, so we also
1420  * briefly acquire the sched state mutex.
1421  */
1422 void
1423 proc_crmod_leave(kauth_cred_t scred, kauth_cred_t fcred, bool sugid)
1424 {
1425 	struct lwp *l = curlwp, *l2;
1426 	struct proc *p = l->l_proc;
1427 	kauth_cred_t oc;
1428 
1429 	KASSERT(mutex_owned(p->p_lock));
1430 
1431 	/* Is there a new credential to set in? */
1432 	if (scred != NULL) {
1433 		p->p_cred = scred;
1434 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
1435 			if (l2 != l)
1436 				l2->l_prflag |= LPR_CRMOD;
1437 		}
1438 
1439 		/* Ensure the LWP cached credentials are up to date. */
1440 		if ((oc = l->l_cred) != scred) {
1441 			kauth_cred_hold(scred);
1442 			l->l_cred = scred;
1443 		}
1444 	} else
1445 		oc = NULL;	/* XXXgcc */
1446 
1447 	if (sugid) {
1448 		/*
1449 		 * Mark process as having changed credentials, stops
1450 		 * tracing etc.
1451 		 */
1452 		p->p_flag |= PK_SUGID;
1453 	}
1454 
1455 	mutex_exit(p->p_lock);
1456 
1457 	/* If there is a credential to be released, free it now. */
1458 	if (fcred != NULL) {
1459 		KASSERT(scred != NULL);
1460 		kauth_cred_free(fcred);
1461 		if (oc != scred)
1462 			kauth_cred_free(oc);
1463 	}
1464 }
1465 
1466 /*
1467  * proc_specific_key_create --
1468  *	Create a key for subsystem proc-specific data.
1469  */
1470 int
1471 proc_specific_key_create(specificdata_key_t *keyp, specificdata_dtor_t dtor)
1472 {
1473 
1474 	return (specificdata_key_create(proc_specificdata_domain, keyp, dtor));
1475 }
1476 
1477 /*
1478  * proc_specific_key_delete --
1479  *	Delete a key for subsystem proc-specific data.
1480  */
1481 void
1482 proc_specific_key_delete(specificdata_key_t key)
1483 {
1484 
1485 	specificdata_key_delete(proc_specificdata_domain, key);
1486 }
1487 
1488 /*
1489  * proc_initspecific --
1490  *	Initialize a proc's specificdata container.
1491  */
1492 void
1493 proc_initspecific(struct proc *p)
1494 {
1495 	int error __diagused;
1496 
1497 	error = specificdata_init(proc_specificdata_domain, &p->p_specdataref);
1498 	KASSERT(error == 0);
1499 }
1500 
1501 /*
1502  * proc_finispecific --
1503  *	Finalize a proc's specificdata container.
1504  */
1505 void
1506 proc_finispecific(struct proc *p)
1507 {
1508 
1509 	specificdata_fini(proc_specificdata_domain, &p->p_specdataref);
1510 }
1511 
1512 /*
1513  * proc_getspecific --
1514  *	Return proc-specific data corresponding to the specified key.
1515  */
1516 void *
1517 proc_getspecific(struct proc *p, specificdata_key_t key)
1518 {
1519 
1520 	return (specificdata_getspecific(proc_specificdata_domain,
1521 					 &p->p_specdataref, key));
1522 }
1523 
1524 /*
1525  * proc_setspecific --
1526  *	Set proc-specific data corresponding to the specified key.
1527  */
1528 void
1529 proc_setspecific(struct proc *p, specificdata_key_t key, void *data)
1530 {
1531 
1532 	specificdata_setspecific(proc_specificdata_domain,
1533 				 &p->p_specdataref, key, data);
1534 }
1535 
1536 int
1537 proc_uidmatch(kauth_cred_t cred, kauth_cred_t target)
1538 {
1539 	int r = 0;
1540 
1541 	if (kauth_cred_getuid(cred) != kauth_cred_getuid(target) ||
1542 	    kauth_cred_getuid(cred) != kauth_cred_getsvuid(target)) {
1543 		/*
1544 		 * suid proc of ours or proc not ours
1545 		 */
1546 		r = EPERM;
1547 	} else if (kauth_cred_getgid(target) != kauth_cred_getsvgid(target)) {
1548 		/*
1549 		 * sgid proc has sgid back to us temporarily
1550 		 */
1551 		r = EPERM;
1552 	} else {
1553 		/*
1554 		 * our rgid must be in target's group list (ie,
1555 		 * sub-processes started by a sgid process)
1556 		 */
1557 		int ismember = 0;
1558 
1559 		if (kauth_cred_ismember_gid(cred,
1560 		    kauth_cred_getgid(target), &ismember) != 0 ||
1561 		    !ismember)
1562 			r = EPERM;
1563 	}
1564 
1565 	return (r);
1566 }
1567 
1568 /*
1569  * sysctl stuff
1570  */
1571 
1572 #define KERN_PROCSLOP	(5 * sizeof(struct kinfo_proc))
1573 
1574 static const u_int sysctl_flagmap[] = {
1575 	PK_ADVLOCK, P_ADVLOCK,
1576 	PK_EXEC, P_EXEC,
1577 	PK_NOCLDWAIT, P_NOCLDWAIT,
1578 	PK_32, P_32,
1579 	PK_CLDSIGIGN, P_CLDSIGIGN,
1580 	PK_SUGID, P_SUGID,
1581 	0
1582 };
1583 
1584 static const u_int sysctl_sflagmap[] = {
1585 	PS_NOCLDSTOP, P_NOCLDSTOP,
1586 	PS_WEXIT, P_WEXIT,
1587 	PS_STOPFORK, P_STOPFORK,
1588 	PS_STOPEXEC, P_STOPEXEC,
1589 	PS_STOPEXIT, P_STOPEXIT,
1590 	0
1591 };
1592 
1593 static const u_int sysctl_slflagmap[] = {
1594 	PSL_TRACED, P_TRACED,
1595 	PSL_CHTRACED, P_CHTRACED,
1596 	PSL_SYSCALL, P_SYSCALL,
1597 	0
1598 };
1599 
1600 static const u_int sysctl_lflagmap[] = {
1601 	PL_CONTROLT, P_CONTROLT,
1602 	PL_PPWAIT, P_PPWAIT,
1603 	0
1604 };
1605 
1606 static const u_int sysctl_stflagmap[] = {
1607 	PST_PROFIL, P_PROFIL,
1608 	0
1609 
1610 };
1611 
1612 /* used by kern_lwp also */
1613 const u_int sysctl_lwpflagmap[] = {
1614 	LW_SINTR, L_SINTR,
1615 	LW_SYSTEM, L_SYSTEM,
1616 	0
1617 };
1618 
1619 /*
1620  * Find the most ``active'' lwp of a process and return it for ps display
1621  * purposes
1622  */
1623 static struct lwp *
1624 proc_active_lwp(struct proc *p)
1625 {
1626 	static const int ostat[] = {
1627 		0,
1628 		2,	/* LSIDL */
1629 		6,	/* LSRUN */
1630 		5,	/* LSSLEEP */
1631 		4,	/* LSSTOP */
1632 		0,	/* LSZOMB */
1633 		1,	/* LSDEAD */
1634 		7,	/* LSONPROC */
1635 		3	/* LSSUSPENDED */
1636 	};
1637 
1638 	struct lwp *l, *lp = NULL;
1639 	LIST_FOREACH(l, &p->p_lwps, l_sibling) {
1640 		KASSERT(l->l_stat >= 0 && l->l_stat < __arraycount(ostat));
1641 		if (lp == NULL ||
1642 		    ostat[l->l_stat] > ostat[lp->l_stat] ||
1643 		    (ostat[l->l_stat] == ostat[lp->l_stat] &&
1644 		    l->l_cpticks > lp->l_cpticks)) {
1645 			lp = l;
1646 			continue;
1647 		}
1648 	}
1649 	return lp;
1650 }
1651 
1652 static int
1653 sysctl_doeproc(SYSCTLFN_ARGS)
1654 {
1655 	union {
1656 		struct kinfo_proc kproc;
1657 		struct kinfo_proc2 kproc2;
1658 	} *kbuf;
1659 	struct proc *p, *next, *marker;
1660 	char *where, *dp;
1661 	int type, op, arg, error;
1662 	u_int elem_size, kelem_size, elem_count;
1663 	size_t buflen, needed;
1664 	bool match, zombie, mmmbrains;
1665 	const bool allowaddr = get_expose_address(curproc);
1666 
1667 	if (namelen == 1 && name[0] == CTL_QUERY)
1668 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
1669 
1670 	dp = where = oldp;
1671 	buflen = where != NULL ? *oldlenp : 0;
1672 	error = 0;
1673 	needed = 0;
1674 	type = rnode->sysctl_num;
1675 
1676 	if (type == KERN_PROC) {
1677 		if (namelen == 0)
1678 			return EINVAL;
1679 		switch (op = name[0]) {
1680 		case KERN_PROC_ALL:
1681 			if (namelen != 1)
1682 				return EINVAL;
1683 			arg = 0;
1684 			break;
1685 		default:
1686 			if (namelen != 2)
1687 				return EINVAL;
1688 			arg = name[1];
1689 			break;
1690 		}
1691 		elem_count = 0;	/* Hush little compiler, don't you cry */
1692 		kelem_size = elem_size = sizeof(kbuf->kproc);
1693 	} else {
1694 		if (namelen != 4)
1695 			return EINVAL;
1696 		op = name[0];
1697 		arg = name[1];
1698 		elem_size = name[2];
1699 		elem_count = name[3];
1700 		kelem_size = sizeof(kbuf->kproc2);
1701 	}
1702 
1703 	sysctl_unlock();
1704 
1705 	kbuf = kmem_zalloc(sizeof(*kbuf), KM_SLEEP);
1706 	marker = kmem_alloc(sizeof(*marker), KM_SLEEP);
1707 	marker->p_flag = PK_MARKER;
1708 
1709 	mutex_enter(proc_lock);
1710 	/*
1711 	 * Start with zombies to prevent reporting processes twice, in case they
1712 	 * are dying and being moved from the list of alive processes to zombies.
1713 	 */
1714 	mmmbrains = true;
1715 	for (p = LIST_FIRST(&zombproc);; p = next) {
1716 		if (p == NULL) {
1717 			if (mmmbrains) {
1718 				p = LIST_FIRST(&allproc);
1719 				mmmbrains = false;
1720 			}
1721 			if (p == NULL)
1722 				break;
1723 		}
1724 		next = LIST_NEXT(p, p_list);
1725 		if ((p->p_flag & PK_MARKER) != 0)
1726 			continue;
1727 
1728 		/*
1729 		 * Skip embryonic processes.
1730 		 */
1731 		if (p->p_stat == SIDL)
1732 			continue;
1733 
1734 		mutex_enter(p->p_lock);
1735 		error = kauth_authorize_process(l->l_cred,
1736 		    KAUTH_PROCESS_CANSEE, p,
1737 		    KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_EPROC), NULL, NULL);
1738 		if (error != 0) {
1739 			mutex_exit(p->p_lock);
1740 			continue;
1741 		}
1742 
1743 		/*
1744 		 * Hande all the operations in one switch on the cost of
1745 		 * algorithm complexity is on purpose. The win splitting this
1746 		 * function into several similar copies makes maintenance burden
1747 		 * burden, code grow and boost is neglible in practical systems.
1748 		 */
1749 		switch (op) {
1750 		case KERN_PROC_PID:
1751 			match = (p->p_pid == (pid_t)arg);
1752 			break;
1753 
1754 		case KERN_PROC_PGRP:
1755 			match = (p->p_pgrp->pg_id == (pid_t)arg);
1756 			break;
1757 
1758 		case KERN_PROC_SESSION:
1759 			match = (p->p_session->s_sid == (pid_t)arg);
1760 			break;
1761 
1762 		case KERN_PROC_TTY:
1763 			match = true;
1764 			if (arg == (int) KERN_PROC_TTY_REVOKE) {
1765 				if ((p->p_lflag & PL_CONTROLT) == 0 ||
1766 				    p->p_session->s_ttyp == NULL ||
1767 				    p->p_session->s_ttyvp != NULL) {
1768 				    	match = false;
1769 				}
1770 			} else if ((p->p_lflag & PL_CONTROLT) == 0 ||
1771 			    p->p_session->s_ttyp == NULL) {
1772 				if ((dev_t)arg != KERN_PROC_TTY_NODEV) {
1773 					match = false;
1774 				}
1775 			} else if (p->p_session->s_ttyp->t_dev != (dev_t)arg) {
1776 				match = false;
1777 			}
1778 			break;
1779 
1780 		case KERN_PROC_UID:
1781 			match = (kauth_cred_geteuid(p->p_cred) == (uid_t)arg);
1782 			break;
1783 
1784 		case KERN_PROC_RUID:
1785 			match = (kauth_cred_getuid(p->p_cred) == (uid_t)arg);
1786 			break;
1787 
1788 		case KERN_PROC_GID:
1789 			match = (kauth_cred_getegid(p->p_cred) == (uid_t)arg);
1790 			break;
1791 
1792 		case KERN_PROC_RGID:
1793 			match = (kauth_cred_getgid(p->p_cred) == (uid_t)arg);
1794 			break;
1795 
1796 		case KERN_PROC_ALL:
1797 			match = true;
1798 			/* allow everything */
1799 			break;
1800 
1801 		default:
1802 			error = EINVAL;
1803 			mutex_exit(p->p_lock);
1804 			goto cleanup;
1805 		}
1806 		if (!match) {
1807 			mutex_exit(p->p_lock);
1808 			continue;
1809 		}
1810 
1811 		/*
1812 		 * Grab a hold on the process.
1813 		 */
1814 		if (mmmbrains) {
1815 			zombie = true;
1816 		} else {
1817 			zombie = !rw_tryenter(&p->p_reflock, RW_READER);
1818 		}
1819 		if (zombie) {
1820 			LIST_INSERT_AFTER(p, marker, p_list);
1821 		}
1822 
1823 		if (buflen >= elem_size &&
1824 		    (type == KERN_PROC || elem_count > 0)) {
1825 			if (type == KERN_PROC) {
1826 				fill_proc(p, &kbuf->kproc.kp_proc, allowaddr);
1827 				fill_eproc(p, &kbuf->kproc.kp_eproc, zombie,
1828 				    allowaddr);
1829 			} else {
1830 				fill_kproc2(p, &kbuf->kproc2, zombie,
1831 				    allowaddr);
1832 				elem_count--;
1833 			}
1834 			mutex_exit(p->p_lock);
1835 			mutex_exit(proc_lock);
1836 			/*
1837 			 * Copy out elem_size, but not larger than kelem_size
1838 			 */
1839 			error = sysctl_copyout(l, kbuf, dp,
1840 			    uimin(kelem_size, elem_size));
1841 			mutex_enter(proc_lock);
1842 			if (error) {
1843 				goto bah;
1844 			}
1845 			dp += elem_size;
1846 			buflen -= elem_size;
1847 		} else {
1848 			mutex_exit(p->p_lock);
1849 		}
1850 		needed += elem_size;
1851 
1852 		/*
1853 		 * Release reference to process.
1854 		 */
1855 	 	if (zombie) {
1856 			next = LIST_NEXT(marker, p_list);
1857  			LIST_REMOVE(marker, p_list);
1858 		} else {
1859 			rw_exit(&p->p_reflock);
1860 			next = LIST_NEXT(p, p_list);
1861 		}
1862 
1863 		/*
1864 		 * Short-circuit break quickly!
1865 		 */
1866 		if (op == KERN_PROC_PID)
1867                 	break;
1868 	}
1869 	mutex_exit(proc_lock);
1870 
1871 	if (where != NULL) {
1872 		*oldlenp = dp - where;
1873 		if (needed > *oldlenp) {
1874 			error = ENOMEM;
1875 			goto out;
1876 		}
1877 	} else {
1878 		needed += KERN_PROCSLOP;
1879 		*oldlenp = needed;
1880 	}
1881 	kmem_free(kbuf, sizeof(*kbuf));
1882 	kmem_free(marker, sizeof(*marker));
1883 	sysctl_relock();
1884 	return 0;
1885  bah:
1886  	if (zombie)
1887  		LIST_REMOVE(marker, p_list);
1888 	else
1889 		rw_exit(&p->p_reflock);
1890  cleanup:
1891 	mutex_exit(proc_lock);
1892  out:
1893 	kmem_free(kbuf, sizeof(*kbuf));
1894 	kmem_free(marker, sizeof(*marker));
1895 	sysctl_relock();
1896 	return error;
1897 }
1898 
1899 int
1900 copyin_psstrings(struct proc *p, struct ps_strings *arginfo)
1901 {
1902 
1903 #ifdef COMPAT_NETBSD32
1904 	if (p->p_flag & PK_32) {
1905 		struct ps_strings32 arginfo32;
1906 
1907 		int error = copyin_proc(p, (void *)p->p_psstrp, &arginfo32,
1908 		    sizeof(arginfo32));
1909 		if (error)
1910 			return error;
1911 		arginfo->ps_argvstr = (void *)(uintptr_t)arginfo32.ps_argvstr;
1912 		arginfo->ps_nargvstr = arginfo32.ps_nargvstr;
1913 		arginfo->ps_envstr = (void *)(uintptr_t)arginfo32.ps_envstr;
1914 		arginfo->ps_nenvstr = arginfo32.ps_nenvstr;
1915 		return 0;
1916 	}
1917 #endif
1918 	return copyin_proc(p, (void *)p->p_psstrp, arginfo, sizeof(*arginfo));
1919 }
1920 
1921 static int
1922 copy_procargs_sysctl_cb(void *cookie_, const void *src, size_t off, size_t len)
1923 {
1924 	void **cookie = cookie_;
1925 	struct lwp *l = cookie[0];
1926 	char *dst = cookie[1];
1927 
1928 	return sysctl_copyout(l, src, dst + off, len);
1929 }
1930 
1931 /*
1932  * sysctl helper routine for kern.proc_args pseudo-subtree.
1933  */
1934 static int
1935 sysctl_kern_proc_args(SYSCTLFN_ARGS)
1936 {
1937 	struct ps_strings pss;
1938 	struct proc *p;
1939 	pid_t pid;
1940 	int type, error;
1941 	void *cookie[2];
1942 
1943 	if (namelen == 1 && name[0] == CTL_QUERY)
1944 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
1945 
1946 	if (newp != NULL || namelen != 2)
1947 		return (EINVAL);
1948 	pid = name[0];
1949 	type = name[1];
1950 
1951 	switch (type) {
1952 	case KERN_PROC_PATHNAME:
1953 		sysctl_unlock();
1954 		error = fill_pathname(l, pid, oldp, oldlenp);
1955 		sysctl_relock();
1956 		return error;
1957 
1958 	case KERN_PROC_ARGV:
1959 	case KERN_PROC_NARGV:
1960 	case KERN_PROC_ENV:
1961 	case KERN_PROC_NENV:
1962 		/* ok */
1963 		break;
1964 	default:
1965 		return (EINVAL);
1966 	}
1967 
1968 	sysctl_unlock();
1969 
1970 	/* check pid */
1971 	mutex_enter(proc_lock);
1972 	if ((p = proc_find(pid)) == NULL) {
1973 		error = EINVAL;
1974 		goto out_locked;
1975 	}
1976 	mutex_enter(p->p_lock);
1977 
1978 	/* Check permission. */
1979 	if (type == KERN_PROC_ARGV || type == KERN_PROC_NARGV)
1980 		error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE,
1981 		    p, KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ARGS), NULL, NULL);
1982 	else if (type == KERN_PROC_ENV || type == KERN_PROC_NENV)
1983 		error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE,
1984 		    p, KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENV), NULL, NULL);
1985 	else
1986 		error = EINVAL; /* XXXGCC */
1987 	if (error) {
1988 		mutex_exit(p->p_lock);
1989 		goto out_locked;
1990 	}
1991 
1992 	if (oldp == NULL) {
1993 		if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV)
1994 			*oldlenp = sizeof (int);
1995 		else
1996 			*oldlenp = ARG_MAX;	/* XXX XXX XXX */
1997 		error = 0;
1998 		mutex_exit(p->p_lock);
1999 		goto out_locked;
2000 	}
2001 
2002 	/*
2003 	 * Zombies don't have a stack, so we can't read their psstrings.
2004 	 * System processes also don't have a user stack.
2005 	 */
2006 	if (P_ZOMBIE(p) || (p->p_flag & PK_SYSTEM) != 0) {
2007 		error = EINVAL;
2008 		mutex_exit(p->p_lock);
2009 		goto out_locked;
2010 	}
2011 
2012 	error = rw_tryenter(&p->p_reflock, RW_READER) ? 0 : EBUSY;
2013 	mutex_exit(p->p_lock);
2014 	if (error) {
2015 		goto out_locked;
2016 	}
2017 	mutex_exit(proc_lock);
2018 
2019 	if (type == KERN_PROC_NARGV || type == KERN_PROC_NENV) {
2020 		int value;
2021 		if ((error = copyin_psstrings(p, &pss)) == 0) {
2022 			if (type == KERN_PROC_NARGV)
2023 				value = pss.ps_nargvstr;
2024 			else
2025 				value = pss.ps_nenvstr;
2026 			error = sysctl_copyout(l, &value, oldp, sizeof(value));
2027 			*oldlenp = sizeof(value);
2028 		}
2029 	} else {
2030 		cookie[0] = l;
2031 		cookie[1] = oldp;
2032 		error = copy_procargs(p, type, oldlenp,
2033 		    copy_procargs_sysctl_cb, cookie);
2034 	}
2035 	rw_exit(&p->p_reflock);
2036 	sysctl_relock();
2037 	return error;
2038 
2039 out_locked:
2040 	mutex_exit(proc_lock);
2041 	sysctl_relock();
2042 	return error;
2043 }
2044 
2045 int
2046 copy_procargs(struct proc *p, int oid, size_t *limit,
2047     int (*cb)(void *, const void *, size_t, size_t), void *cookie)
2048 {
2049 	struct ps_strings pss;
2050 	size_t len, i, loaded, entry_len;
2051 	struct uio auio;
2052 	struct iovec aiov;
2053 	int error, argvlen;
2054 	char *arg;
2055 	char **argv;
2056 	vaddr_t user_argv;
2057 	struct vmspace *vmspace;
2058 
2059 	/*
2060 	 * Allocate a temporary buffer to hold the argument vector and
2061 	 * the arguments themselve.
2062 	 */
2063 	arg = kmem_alloc(PAGE_SIZE, KM_SLEEP);
2064 	argv = kmem_alloc(PAGE_SIZE, KM_SLEEP);
2065 
2066 	/*
2067 	 * Lock the process down in memory.
2068 	 */
2069 	vmspace = p->p_vmspace;
2070 	uvmspace_addref(vmspace);
2071 
2072 	/*
2073 	 * Read in the ps_strings structure.
2074 	 */
2075 	if ((error = copyin_psstrings(p, &pss)) != 0)
2076 		goto done;
2077 
2078 	/*
2079 	 * Now read the address of the argument vector.
2080 	 */
2081 	switch (oid) {
2082 	case KERN_PROC_ARGV:
2083 		user_argv = (uintptr_t)pss.ps_argvstr;
2084 		argvlen = pss.ps_nargvstr;
2085 		break;
2086 	case KERN_PROC_ENV:
2087 		user_argv = (uintptr_t)pss.ps_envstr;
2088 		argvlen = pss.ps_nenvstr;
2089 		break;
2090 	default:
2091 		error = EINVAL;
2092 		goto done;
2093 	}
2094 
2095 	if (argvlen < 0) {
2096 		error = EIO;
2097 		goto done;
2098 	}
2099 
2100 
2101 	/*
2102 	 * Now copy each string.
2103 	 */
2104 	len = 0; /* bytes written to user buffer */
2105 	loaded = 0; /* bytes from argv already processed */
2106 	i = 0; /* To make compiler happy */
2107 	entry_len = PROC_PTRSZ(p);
2108 
2109 	for (; argvlen; --argvlen) {
2110 		int finished = 0;
2111 		vaddr_t base;
2112 		size_t xlen;
2113 		int j;
2114 
2115 		if (loaded == 0) {
2116 			size_t rem = entry_len * argvlen;
2117 			loaded = MIN(rem, PAGE_SIZE);
2118 			error = copyin_vmspace(vmspace,
2119 			    (const void *)user_argv, argv, loaded);
2120 			if (error)
2121 				break;
2122 			user_argv += loaded;
2123 			i = 0;
2124 		}
2125 
2126 #ifdef COMPAT_NETBSD32
2127 		if (p->p_flag & PK_32) {
2128 			netbsd32_charp *argv32;
2129 
2130 			argv32 = (netbsd32_charp *)argv;
2131 			base = (vaddr_t)NETBSD32PTR64(argv32[i++]);
2132 		} else
2133 #endif
2134 			base = (vaddr_t)argv[i++];
2135 		loaded -= entry_len;
2136 
2137 		/*
2138 		 * The program has messed around with its arguments,
2139 		 * possibly deleting some, and replacing them with
2140 		 * NULL's. Treat this as the last argument and not
2141 		 * a failure.
2142 		 */
2143 		if (base == 0)
2144 			break;
2145 
2146 		while (!finished) {
2147 			xlen = PAGE_SIZE - (base & PAGE_MASK);
2148 
2149 			aiov.iov_base = arg;
2150 			aiov.iov_len = PAGE_SIZE;
2151 			auio.uio_iov = &aiov;
2152 			auio.uio_iovcnt = 1;
2153 			auio.uio_offset = base;
2154 			auio.uio_resid = xlen;
2155 			auio.uio_rw = UIO_READ;
2156 			UIO_SETUP_SYSSPACE(&auio);
2157 			error = uvm_io(&vmspace->vm_map, &auio, 0);
2158 			if (error)
2159 				goto done;
2160 
2161 			/* Look for the end of the string */
2162 			for (j = 0; j < xlen; j++) {
2163 				if (arg[j] == '\0') {
2164 					xlen = j + 1;
2165 					finished = 1;
2166 					break;
2167 				}
2168 			}
2169 
2170 			/* Check for user buffer overflow */
2171 			if (len + xlen > *limit) {
2172 				finished = 1;
2173 				if (len > *limit)
2174 					xlen = 0;
2175 				else
2176 					xlen = *limit - len;
2177 			}
2178 
2179 			/* Copyout the page */
2180 			error = (*cb)(cookie, arg, len, xlen);
2181 			if (error)
2182 				goto done;
2183 
2184 			len += xlen;
2185 			base += xlen;
2186 		}
2187 	}
2188 	*limit = len;
2189 
2190 done:
2191 	kmem_free(argv, PAGE_SIZE);
2192 	kmem_free(arg, PAGE_SIZE);
2193 	uvmspace_free(vmspace);
2194 	return error;
2195 }
2196 
2197 /*
2198  * Fill in a proc structure for the specified process.
2199  */
2200 static void
2201 fill_proc(const struct proc *psrc, struct proc *p, bool allowaddr)
2202 {
2203 	COND_SET_VALUE(p->p_list, psrc->p_list, allowaddr);
2204 	COND_SET_VALUE(p->p_auxlock, psrc->p_auxlock, allowaddr);
2205 	COND_SET_VALUE(p->p_lock, psrc->p_lock, allowaddr);
2206 	COND_SET_VALUE(p->p_stmutex, psrc->p_stmutex, allowaddr);
2207 	COND_SET_VALUE(p->p_reflock, psrc->p_reflock, allowaddr);
2208 	COND_SET_VALUE(p->p_waitcv, psrc->p_waitcv, allowaddr);
2209 	COND_SET_VALUE(p->p_lwpcv, psrc->p_lwpcv, allowaddr);
2210 	COND_SET_VALUE(p->p_cred, psrc->p_cred, allowaddr);
2211 	COND_SET_VALUE(p->p_fd, psrc->p_fd, allowaddr);
2212 	COND_SET_VALUE(p->p_cwdi, psrc->p_cwdi, allowaddr);
2213 	COND_SET_VALUE(p->p_stats, psrc->p_stats, allowaddr);
2214 	COND_SET_VALUE(p->p_limit, psrc->p_limit, allowaddr);
2215 	COND_SET_VALUE(p->p_vmspace, psrc->p_vmspace, allowaddr);
2216 	COND_SET_VALUE(p->p_sigacts, psrc->p_sigacts, allowaddr);
2217 	COND_SET_VALUE(p->p_aio, psrc->p_aio, allowaddr);
2218 	p->p_mqueue_cnt = psrc->p_mqueue_cnt;
2219 	COND_SET_VALUE(p->p_specdataref, psrc->p_specdataref, allowaddr);
2220 	p->p_exitsig = psrc->p_exitsig;
2221 	p->p_flag = psrc->p_flag;
2222 	p->p_sflag = psrc->p_sflag;
2223 	p->p_slflag = psrc->p_slflag;
2224 	p->p_lflag = psrc->p_lflag;
2225 	p->p_stflag = psrc->p_stflag;
2226 	p->p_stat = psrc->p_stat;
2227 	p->p_trace_enabled = psrc->p_trace_enabled;
2228 	p->p_pid = psrc->p_pid;
2229 	COND_SET_VALUE(p->p_pglist, psrc->p_pglist, allowaddr);
2230 	COND_SET_VALUE(p->p_pptr, psrc->p_pptr, allowaddr);
2231 	COND_SET_VALUE(p->p_sibling, psrc->p_sibling, allowaddr);
2232 	COND_SET_VALUE(p->p_children, psrc->p_children, allowaddr);
2233 	COND_SET_VALUE(p->p_lwps, psrc->p_lwps, allowaddr);
2234 	COND_SET_VALUE(p->p_raslist, psrc->p_raslist, allowaddr);
2235 	p->p_nlwps = psrc->p_nlwps;
2236 	p->p_nzlwps = psrc->p_nzlwps;
2237 	p->p_nrlwps = psrc->p_nrlwps;
2238 	p->p_nlwpwait = psrc->p_nlwpwait;
2239 	p->p_ndlwps = psrc->p_ndlwps;
2240 	p->p_nlwpid = psrc->p_nlwpid;
2241 	p->p_nstopchild = psrc->p_nstopchild;
2242 	p->p_waited = psrc->p_waited;
2243 	COND_SET_VALUE(p->p_zomblwp, psrc->p_zomblwp, allowaddr);
2244 	COND_SET_VALUE(p->p_vforklwp, psrc->p_vforklwp, allowaddr);
2245 	COND_SET_VALUE(p->p_sched_info, psrc->p_sched_info, allowaddr);
2246 	p->p_estcpu = psrc->p_estcpu;
2247 	p->p_estcpu_inherited = psrc->p_estcpu_inherited;
2248 	p->p_forktime = psrc->p_forktime;
2249 	p->p_pctcpu = psrc->p_pctcpu;
2250 	COND_SET_VALUE(p->p_opptr, psrc->p_opptr, allowaddr);
2251 	COND_SET_VALUE(p->p_timers, psrc->p_timers, allowaddr);
2252 	p->p_rtime = psrc->p_rtime;
2253 	p->p_uticks = psrc->p_uticks;
2254 	p->p_sticks = psrc->p_sticks;
2255 	p->p_iticks = psrc->p_iticks;
2256 	p->p_xutime = psrc->p_xutime;
2257 	p->p_xstime = psrc->p_xstime;
2258 	p->p_traceflag = psrc->p_traceflag;
2259 	COND_SET_VALUE(p->p_tracep, psrc->p_tracep, allowaddr);
2260 	COND_SET_VALUE(p->p_textvp, psrc->p_textvp, allowaddr);
2261 	COND_SET_VALUE(p->p_emul, psrc->p_emul, allowaddr);
2262 	COND_SET_VALUE(p->p_emuldata, psrc->p_emuldata, allowaddr);
2263 	COND_SET_VALUE(p->p_execsw, psrc->p_execsw, allowaddr);
2264 	COND_SET_VALUE(p->p_klist, psrc->p_klist, allowaddr);
2265 	COND_SET_VALUE(p->p_sigwaiters, psrc->p_sigwaiters, allowaddr);
2266 	COND_SET_VALUE(p->p_sigpend, psrc->p_sigpend, allowaddr);
2267 	COND_SET_VALUE(p->p_lwpctl, psrc->p_lwpctl, allowaddr);
2268 	p->p_ppid = psrc->p_ppid;
2269 	p->p_fpid = psrc->p_fpid;
2270 	p->p_vfpid = psrc->p_vfpid;
2271 	p->p_vfpid_done = psrc->p_vfpid_done;
2272 	p->p_lwp_created = psrc->p_lwp_created;
2273 	p->p_lwp_exited = psrc->p_lwp_exited;
2274 	p->p_nsems = psrc->p_nsems;
2275 	COND_SET_VALUE(p->p_path, psrc->p_path, allowaddr);
2276 	COND_SET_VALUE(p->p_sigctx, psrc->p_sigctx, allowaddr);
2277 	p->p_nice = psrc->p_nice;
2278 	memcpy(p->p_comm, psrc->p_comm, sizeof(p->p_comm));
2279 	COND_SET_VALUE(p->p_pgrp, psrc->p_pgrp, allowaddr);
2280 	COND_SET_VALUE(p->p_psstrp, psrc->p_psstrp, allowaddr);
2281 	p->p_pax = psrc->p_pax;
2282 	p->p_xexit = psrc->p_xexit;
2283 	p->p_xsig = psrc->p_xsig;
2284 	p->p_acflag = psrc->p_acflag;
2285 	COND_SET_VALUE(p->p_md, psrc->p_md, allowaddr);
2286 	p->p_stackbase = psrc->p_stackbase;
2287 	COND_SET_VALUE(p->p_dtrace, psrc->p_dtrace, allowaddr);
2288 }
2289 
2290 /*
2291  * Fill in an eproc structure for the specified process.
2292  */
2293 void
2294 fill_eproc(struct proc *p, struct eproc *ep, bool zombie, bool allowaddr)
2295 {
2296 	struct tty *tp;
2297 	struct lwp *l;
2298 
2299 	KASSERT(mutex_owned(proc_lock));
2300 	KASSERT(mutex_owned(p->p_lock));
2301 
2302 	COND_SET_VALUE(ep->e_paddr, p, allowaddr);
2303 	COND_SET_VALUE(ep->e_sess, p->p_session, allowaddr);
2304 	if (p->p_cred) {
2305 		kauth_cred_topcred(p->p_cred, &ep->e_pcred);
2306 		kauth_cred_toucred(p->p_cred, &ep->e_ucred);
2307 	}
2308 	if (p->p_stat != SIDL && !P_ZOMBIE(p) && !zombie) {
2309 		struct vmspace *vm = p->p_vmspace;
2310 
2311 		ep->e_vm.vm_rssize = vm_resident_count(vm);
2312 		ep->e_vm.vm_tsize = vm->vm_tsize;
2313 		ep->e_vm.vm_dsize = vm->vm_dsize;
2314 		ep->e_vm.vm_ssize = vm->vm_ssize;
2315 		ep->e_vm.vm_map.size = vm->vm_map.size;
2316 
2317 		/* Pick the primary (first) LWP */
2318 		l = proc_active_lwp(p);
2319 		KASSERT(l != NULL);
2320 		lwp_lock(l);
2321 		if (l->l_wchan)
2322 			strncpy(ep->e_wmesg, l->l_wmesg, WMESGLEN);
2323 		lwp_unlock(l);
2324 	}
2325 	ep->e_ppid = p->p_ppid;
2326 	if (p->p_pgrp && p->p_session) {
2327 		ep->e_pgid = p->p_pgrp->pg_id;
2328 		ep->e_jobc = p->p_pgrp->pg_jobc;
2329 		ep->e_sid = p->p_session->s_sid;
2330 		if ((p->p_lflag & PL_CONTROLT) &&
2331 		    (tp = p->p_session->s_ttyp)) {
2332 			ep->e_tdev = tp->t_dev;
2333 			ep->e_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PGID;
2334 			COND_SET_VALUE(ep->e_tsess, tp->t_session, allowaddr);
2335 		} else
2336 			ep->e_tdev = (uint32_t)NODEV;
2337 		ep->e_flag = p->p_session->s_ttyvp ? EPROC_CTTY : 0;
2338 		if (SESS_LEADER(p))
2339 			ep->e_flag |= EPROC_SLEADER;
2340 		strncpy(ep->e_login, p->p_session->s_login, MAXLOGNAME);
2341 	}
2342 	ep->e_xsize = ep->e_xrssize = 0;
2343 	ep->e_xccount = ep->e_xswrss = 0;
2344 }
2345 
2346 /*
2347  * Fill in a kinfo_proc2 structure for the specified process.
2348  */
2349 void
2350 fill_kproc2(struct proc *p, struct kinfo_proc2 *ki, bool zombie, bool allowaddr)
2351 {
2352 	struct tty *tp;
2353 	struct lwp *l, *l2;
2354 	struct timeval ut, st, rt;
2355 	sigset_t ss1, ss2;
2356 	struct rusage ru;
2357 	struct vmspace *vm;
2358 
2359 	KASSERT(mutex_owned(proc_lock));
2360 	KASSERT(mutex_owned(p->p_lock));
2361 
2362 	sigemptyset(&ss1);
2363 	sigemptyset(&ss2);
2364 
2365 	COND_SET_VALUE(ki->p_paddr, PTRTOUINT64(p), allowaddr);
2366 	COND_SET_VALUE(ki->p_fd, PTRTOUINT64(p->p_fd), allowaddr);
2367 	COND_SET_VALUE(ki->p_cwdi, PTRTOUINT64(p->p_cwdi), allowaddr);
2368 	COND_SET_VALUE(ki->p_stats, PTRTOUINT64(p->p_stats), allowaddr);
2369 	COND_SET_VALUE(ki->p_limit, PTRTOUINT64(p->p_limit), allowaddr);
2370 	COND_SET_VALUE(ki->p_vmspace, PTRTOUINT64(p->p_vmspace), allowaddr);
2371 	COND_SET_VALUE(ki->p_sigacts, PTRTOUINT64(p->p_sigacts), allowaddr);
2372 	COND_SET_VALUE(ki->p_sess, PTRTOUINT64(p->p_session), allowaddr);
2373 	ki->p_tsess = 0;	/* may be changed if controlling tty below */
2374 	COND_SET_VALUE(ki->p_ru, PTRTOUINT64(&p->p_stats->p_ru), allowaddr);
2375 	ki->p_eflag = 0;
2376 	ki->p_exitsig = p->p_exitsig;
2377 	ki->p_flag = L_INMEM;   /* Process never swapped out */
2378 	ki->p_flag |= sysctl_map_flags(sysctl_flagmap, p->p_flag);
2379 	ki->p_flag |= sysctl_map_flags(sysctl_sflagmap, p->p_sflag);
2380 	ki->p_flag |= sysctl_map_flags(sysctl_slflagmap, p->p_slflag);
2381 	ki->p_flag |= sysctl_map_flags(sysctl_lflagmap, p->p_lflag);
2382 	ki->p_flag |= sysctl_map_flags(sysctl_stflagmap, p->p_stflag);
2383 	ki->p_pid = p->p_pid;
2384 	ki->p_ppid = p->p_ppid;
2385 	ki->p_uid = kauth_cred_geteuid(p->p_cred);
2386 	ki->p_ruid = kauth_cred_getuid(p->p_cred);
2387 	ki->p_gid = kauth_cred_getegid(p->p_cred);
2388 	ki->p_rgid = kauth_cred_getgid(p->p_cred);
2389 	ki->p_svuid = kauth_cred_getsvuid(p->p_cred);
2390 	ki->p_svgid = kauth_cred_getsvgid(p->p_cred);
2391 	ki->p_ngroups = kauth_cred_ngroups(p->p_cred);
2392 	kauth_cred_getgroups(p->p_cred, ki->p_groups,
2393 	    uimin(ki->p_ngroups, sizeof(ki->p_groups) / sizeof(ki->p_groups[0])),
2394 	    UIO_SYSSPACE);
2395 
2396 	ki->p_uticks = p->p_uticks;
2397 	ki->p_sticks = p->p_sticks;
2398 	ki->p_iticks = p->p_iticks;
2399 	ki->p_tpgid = NO_PGID;	/* may be changed if controlling tty below */
2400 	COND_SET_VALUE(ki->p_tracep, PTRTOUINT64(p->p_tracep), allowaddr);
2401 	ki->p_traceflag = p->p_traceflag;
2402 
2403 	memcpy(&ki->p_sigignore, &p->p_sigctx.ps_sigignore,sizeof(ki_sigset_t));
2404 	memcpy(&ki->p_sigcatch, &p->p_sigctx.ps_sigcatch, sizeof(ki_sigset_t));
2405 
2406 	ki->p_cpticks = 0;
2407 	ki->p_pctcpu = p->p_pctcpu;
2408 	ki->p_estcpu = 0;
2409 	ki->p_stat = p->p_stat; /* Will likely be overridden by LWP status */
2410 	ki->p_realstat = p->p_stat;
2411 	ki->p_nice = p->p_nice;
2412 	ki->p_xstat = P_WAITSTATUS(p);
2413 	ki->p_acflag = p->p_acflag;
2414 
2415 	strncpy(ki->p_comm, p->p_comm,
2416 	    uimin(sizeof(ki->p_comm), sizeof(p->p_comm)));
2417 	strncpy(ki->p_ename, p->p_emul->e_name, sizeof(ki->p_ename));
2418 
2419 	ki->p_nlwps = p->p_nlwps;
2420 	ki->p_realflag = ki->p_flag;
2421 
2422 	if (p->p_stat != SIDL && !P_ZOMBIE(p) && !zombie) {
2423 		vm = p->p_vmspace;
2424 		ki->p_vm_rssize = vm_resident_count(vm);
2425 		ki->p_vm_tsize = vm->vm_tsize;
2426 		ki->p_vm_dsize = vm->vm_dsize;
2427 		ki->p_vm_ssize = vm->vm_ssize;
2428 		ki->p_vm_vsize = atop(vm->vm_map.size);
2429 		/*
2430 		 * Since the stack is initially mapped mostly with
2431 		 * PROT_NONE and grown as needed, adjust the "mapped size"
2432 		 * to skip the unused stack portion.
2433 		 */
2434 		ki->p_vm_msize =
2435 		    atop(vm->vm_map.size) - vm->vm_issize + vm->vm_ssize;
2436 
2437 		/* Pick the primary (first) LWP */
2438 		l = proc_active_lwp(p);
2439 		KASSERT(l != NULL);
2440 		lwp_lock(l);
2441 		ki->p_nrlwps = p->p_nrlwps;
2442 		ki->p_forw = 0;
2443 		ki->p_back = 0;
2444 		COND_SET_VALUE(ki->p_addr, PTRTOUINT64(l->l_addr), allowaddr);
2445 		ki->p_stat = l->l_stat;
2446 		ki->p_flag |= sysctl_map_flags(sysctl_lwpflagmap, l->l_flag);
2447 		ki->p_swtime = l->l_swtime;
2448 		ki->p_slptime = l->l_slptime;
2449 		if (l->l_stat == LSONPROC)
2450 			ki->p_schedflags = l->l_cpu->ci_schedstate.spc_flags;
2451 		else
2452 			ki->p_schedflags = 0;
2453 		ki->p_priority = lwp_eprio(l);
2454 		ki->p_usrpri = l->l_priority;
2455 		if (l->l_wchan)
2456 			strncpy(ki->p_wmesg, l->l_wmesg, sizeof(ki->p_wmesg));
2457 		COND_SET_VALUE(ki->p_wchan, PTRTOUINT64(l->l_wchan), allowaddr);
2458 		ki->p_cpuid = cpu_index(l->l_cpu);
2459 		lwp_unlock(l);
2460 		LIST_FOREACH(l, &p->p_lwps, l_sibling) {
2461 			/* This is hardly correct, but... */
2462 			sigplusset(&l->l_sigpend.sp_set, &ss1);
2463 			sigplusset(&l->l_sigmask, &ss2);
2464 			ki->p_cpticks += l->l_cpticks;
2465 			ki->p_pctcpu += l->l_pctcpu;
2466 			ki->p_estcpu += l->l_estcpu;
2467 		}
2468 	}
2469 	sigplusset(&p->p_sigpend.sp_set, &ss2);
2470 	memcpy(&ki->p_siglist, &ss1, sizeof(ki_sigset_t));
2471 	memcpy(&ki->p_sigmask, &ss2, sizeof(ki_sigset_t));
2472 
2473 	if (p->p_session != NULL) {
2474 		ki->p_sid = p->p_session->s_sid;
2475 		ki->p__pgid = p->p_pgrp->pg_id;
2476 		if (p->p_session->s_ttyvp)
2477 			ki->p_eflag |= EPROC_CTTY;
2478 		if (SESS_LEADER(p))
2479 			ki->p_eflag |= EPROC_SLEADER;
2480 		strncpy(ki->p_login, p->p_session->s_login,
2481 		    uimin(sizeof ki->p_login - 1, sizeof p->p_session->s_login));
2482 		ki->p_jobc = p->p_pgrp->pg_jobc;
2483 		if ((p->p_lflag & PL_CONTROLT) && (tp = p->p_session->s_ttyp)) {
2484 			ki->p_tdev = tp->t_dev;
2485 			ki->p_tpgid = tp->t_pgrp ? tp->t_pgrp->pg_id : NO_PGID;
2486 			COND_SET_VALUE(ki->p_tsess, PTRTOUINT64(tp->t_session),
2487 			    allowaddr);
2488 		} else {
2489 			ki->p_tdev = (int32_t)NODEV;
2490 		}
2491 	}
2492 
2493 	if (!P_ZOMBIE(p) && !zombie) {
2494 		ki->p_uvalid = 1;
2495 		ki->p_ustart_sec = p->p_stats->p_start.tv_sec;
2496 		ki->p_ustart_usec = p->p_stats->p_start.tv_usec;
2497 
2498 		calcru(p, &ut, &st, NULL, &rt);
2499 		ki->p_rtime_sec = rt.tv_sec;
2500 		ki->p_rtime_usec = rt.tv_usec;
2501 		ki->p_uutime_sec = ut.tv_sec;
2502 		ki->p_uutime_usec = ut.tv_usec;
2503 		ki->p_ustime_sec = st.tv_sec;
2504 		ki->p_ustime_usec = st.tv_usec;
2505 
2506 		memcpy(&ru, &p->p_stats->p_ru, sizeof(ru));
2507 		ki->p_uru_nvcsw = 0;
2508 		ki->p_uru_nivcsw = 0;
2509 		LIST_FOREACH(l2, &p->p_lwps, l_sibling) {
2510 			ki->p_uru_nvcsw += (l2->l_ncsw - l2->l_nivcsw);
2511 			ki->p_uru_nivcsw += l2->l_nivcsw;
2512 			ruadd(&ru, &l2->l_ru);
2513 		}
2514 		ki->p_uru_maxrss = ru.ru_maxrss;
2515 		ki->p_uru_ixrss = ru.ru_ixrss;
2516 		ki->p_uru_idrss = ru.ru_idrss;
2517 		ki->p_uru_isrss = ru.ru_isrss;
2518 		ki->p_uru_minflt = ru.ru_minflt;
2519 		ki->p_uru_majflt = ru.ru_majflt;
2520 		ki->p_uru_nswap = ru.ru_nswap;
2521 		ki->p_uru_inblock = ru.ru_inblock;
2522 		ki->p_uru_oublock = ru.ru_oublock;
2523 		ki->p_uru_msgsnd = ru.ru_msgsnd;
2524 		ki->p_uru_msgrcv = ru.ru_msgrcv;
2525 		ki->p_uru_nsignals = ru.ru_nsignals;
2526 
2527 		timeradd(&p->p_stats->p_cru.ru_utime,
2528 			 &p->p_stats->p_cru.ru_stime, &ut);
2529 		ki->p_uctime_sec = ut.tv_sec;
2530 		ki->p_uctime_usec = ut.tv_usec;
2531 	}
2532 }
2533 
2534 
2535 int
2536 proc_find_locked(struct lwp *l, struct proc **p, pid_t pid)
2537 {
2538 	int error;
2539 
2540 	mutex_enter(proc_lock);
2541 	if (pid == -1)
2542 		*p = l->l_proc;
2543 	else
2544 		*p = proc_find(pid);
2545 
2546 	if (*p == NULL) {
2547 		if (pid != -1)
2548 			mutex_exit(proc_lock);
2549 		return ESRCH;
2550 	}
2551 	if (pid != -1)
2552 		mutex_enter((*p)->p_lock);
2553 	mutex_exit(proc_lock);
2554 
2555 	error = kauth_authorize_process(l->l_cred,
2556 	    KAUTH_PROCESS_CANSEE, *p,
2557 	    KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL);
2558 	if (error) {
2559 		if (pid != -1)
2560 			mutex_exit((*p)->p_lock);
2561 	}
2562 	return error;
2563 }
2564 
2565 static int
2566 fill_pathname(struct lwp *l, pid_t pid, void *oldp, size_t *oldlenp)
2567 {
2568 	int error;
2569 	struct proc *p;
2570 
2571 	if ((error = proc_find_locked(l, &p, pid)) != 0)
2572 		return error;
2573 
2574 	if (p->p_path == NULL) {
2575 		if (pid != -1)
2576 			mutex_exit(p->p_lock);
2577 		return ENOENT;
2578 	}
2579 
2580 	size_t len = strlen(p->p_path) + 1;
2581 	if (oldp != NULL) {
2582 		size_t copylen = uimin(len, *oldlenp);
2583 		error = sysctl_copyout(l, p->p_path, oldp, copylen);
2584 		if (error == 0 && *oldlenp < len)
2585 			error = ENOSPC;
2586 	}
2587 	*oldlenp = len;
2588 	if (pid != -1)
2589 		mutex_exit(p->p_lock);
2590 	return error;
2591 }
2592 
2593 int
2594 proc_getauxv(struct proc *p, void **buf, size_t *len)
2595 {
2596 	struct ps_strings pss;
2597 	int error;
2598 	void *uauxv, *kauxv;
2599 	size_t size;
2600 
2601 	if ((error = copyin_psstrings(p, &pss)) != 0)
2602 		return error;
2603 	if (pss.ps_envstr == NULL)
2604 		return EIO;
2605 
2606 	size = p->p_execsw->es_arglen;
2607 	if (size == 0)
2608 		return EIO;
2609 
2610 	size_t ptrsz = PROC_PTRSZ(p);
2611 	uauxv = (void *)((char *)pss.ps_envstr + (pss.ps_nenvstr + 1) * ptrsz);
2612 
2613 	kauxv = kmem_alloc(size, KM_SLEEP);
2614 
2615 	error = copyin_proc(p, uauxv, kauxv, size);
2616 	if (error) {
2617 		kmem_free(kauxv, size);
2618 		return error;
2619 	}
2620 
2621 	*buf = kauxv;
2622 	*len = size;
2623 
2624 	return 0;
2625 }
2626 
2627 
2628 static int
2629 sysctl_security_expose_address(SYSCTLFN_ARGS)
2630 {
2631 	int expose_address, error;
2632 	struct sysctlnode node;
2633 
2634 	node = *rnode;
2635 	node.sysctl_data = &expose_address;
2636 	expose_address = *(int *)rnode->sysctl_data;
2637 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
2638 	if (error || newp == NULL)
2639 		return error;
2640 
2641 	if (kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_KERNADDR,
2642 	    0, NULL, NULL, NULL))
2643 		return EPERM;
2644 
2645 	switch (expose_address) {
2646 	case 0:
2647 	case 1:
2648 	case 2:
2649 		break;
2650 	default:
2651 		return EINVAL;
2652 	}
2653 
2654 	*(int *)rnode->sysctl_data = expose_address;
2655 
2656 	return 0;
2657 }
2658 
2659 bool
2660 get_expose_address(struct proc *p)
2661 {
2662 	/* allow only if sysctl variable is set or privileged */
2663 	return kauth_authorize_process(kauth_cred_get(), KAUTH_PROCESS_CANSEE,
2664 	    p, KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_KPTR), NULL, NULL) == 0;
2665 }
2666