xref: /openbsd-src/lib/libkvm/kvm_proc.c (revision 1fc27e414118cd8922c6b93fbaeb7a5246bfd593)
1 /*	$OpenBSD: kvm_proc.c,v 1.5 1999/06/22 12:54:45 art Exp $	*/
2 /*	$NetBSD: kvm_proc.c,v 1.30 1999/03/24 05:50:50 mrg Exp $	*/
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 /*-
39  * Copyright (c) 1994, 1995 Charles M. Hannum.  All rights reserved.
40  * Copyright (c) 1989, 1992, 1993
41  *	The Regents of the University of California.  All rights reserved.
42  *
43  * This code is derived from software developed by the Computer Systems
44  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
45  * BG 91-66 and contributed to Berkeley.
46  *
47  * Redistribution and use in source and binary forms, with or without
48  * modification, are permitted provided that the following conditions
49  * are met:
50  * 1. Redistributions of source code must retain the above copyright
51  *    notice, this list of conditions and the following disclaimer.
52  * 2. Redistributions in binary form must reproduce the above copyright
53  *    notice, this list of conditions and the following disclaimer in the
54  *    documentation and/or other materials provided with the distribution.
55  * 3. All advertising materials mentioning features or use of this software
56  *    must display the following acknowledgement:
57  *	This product includes software developed by the University of
58  *	California, Berkeley and its contributors.
59  * 4. Neither the name of the University nor the names of its contributors
60  *    may be used to endorse or promote products derived from this software
61  *    without specific prior written permission.
62  *
63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73  * SUCH DAMAGE.
74  */
75 
76 #if defined(LIBC_SCCS) && !defined(lint)
77 #if 0
78 static char sccsid[] = "@(#)kvm_proc.c	8.3 (Berkeley) 9/23/93";
79 #else
80 static char *rcsid = "$OpenBSD: kvm_proc.c,v 1.5 1999/06/22 12:54:45 art Exp $";
81 #endif
82 #endif /* LIBC_SCCS and not lint */
83 
84 /*
85  * Proc traversal interface for kvm.  ps and w are (probably) the exclusive
86  * users of this code, so we've factored it out into a separate module.
87  * Thus, we keep this grunge out of the other kvm applications (i.e.,
88  * most other applications are interested only in open/close/read/nlist).
89  */
90 
91 #include <sys/param.h>
92 #include <sys/user.h>
93 #include <sys/proc.h>
94 #include <sys/exec.h>
95 #include <sys/stat.h>
96 #include <sys/ioctl.h>
97 #include <sys/tty.h>
98 #include <stdlib.h>
99 #include <string.h>
100 #include <unistd.h>
101 #include <nlist.h>
102 #include <kvm.h>
103 
104 #include <vm/vm.h>
105 #include <vm/vm_param.h>
106 
107 #ifdef UVM
108 #include <uvm/uvm_extern.h>
109 #include <uvm/uvm_amap.h>
110 #else
111 #include <vm/swap_pager.h>
112 #endif
113 
114 #include <sys/sysctl.h>
115 
116 #include <limits.h>
117 #include <db.h>
118 #include <paths.h>
119 
120 #include "kvm_private.h"
121 
122 #define KREAD(kd, addr, obj) \
123 	(kvm_read(kd, addr, (void *)(obj), sizeof(*obj)) != sizeof(*obj))
124 
125 #ifndef UVM
126 int		_kvm_readfromcore __P((kvm_t *, u_long, u_long));
127 int		_kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long));
128 #endif
129 ssize_t		kvm_uread __P((kvm_t *, const struct proc *, u_long, char *,
130 		    size_t));
131 
132 static char	**kvm_argv __P((kvm_t *, const struct proc *, u_long, int,
133 		    int));
134 static int	kvm_deadprocs __P((kvm_t *, int, int, u_long, u_long, int));
135 static char	**kvm_doargv __P((kvm_t *, const struct kinfo_proc *, int,
136 		    void (*)(struct ps_strings *, u_long *, int *)));
137 static int	kvm_proclist __P((kvm_t *, int, int, struct proc *,
138 		    struct kinfo_proc *, int));
139 static int	proc_verify __P((kvm_t *, u_long, const struct proc *));
140 static void	ps_str_a __P((struct ps_strings *, u_long *, int *));
141 static void	ps_str_e __P((struct ps_strings *, u_long *, int *));
142 
143 char *
144 _kvm_uread(kd, p, va, cnt)
145 	kvm_t *kd;
146 	const struct proc *p;
147 	u_long va;
148 	u_long *cnt;
149 {
150 	u_long addr, head;
151 	u_long offset;
152 	struct vm_map_entry vme;
153 #ifdef UVM
154 	struct vm_amap amap;
155 	struct vm_anon *anonp, anon;
156 	struct vm_page pg;
157 	u_long slot;
158 #else
159 	struct vm_object vmo;
160 	int rv;
161 #endif
162 
163 	if (kd->swapspc == 0) {
164 		kd->swapspc = (char *)_kvm_malloc(kd, kd->nbpg);
165 		if (kd->swapspc == 0)
166 			return (0);
167 	}
168 
169 	/*
170 	 * Look through the address map for the memory object
171 	 * that corresponds to the given virtual address.
172 	 * The header just has the entire valid range.
173 	 */
174 	head = (u_long)&p->p_vmspace->vm_map.header;
175 	addr = head;
176 	while (1) {
177 		if (KREAD(kd, addr, &vme))
178 			return (0);
179 
180 #ifdef UVM
181 		if (va >= vme.start && va < vme.end &&
182 		    vme.aref.ar_amap != NULL)
183 			break;
184 #else
185 		if (va >= vme.start && va < vme.end &&
186 		    vme.object.vm_object != 0)
187 			break;
188 #endif
189 
190 		addr = (u_long)vme.next;
191 		if (addr == head)
192 			return (0);
193 	}
194 
195 #ifdef UVM
196 	/*
197 	 * we found the map entry, now to find the object...
198 	 */
199 	if (vme.aref.ar_amap == NULL)
200 		return NULL;
201 
202 	addr = (u_long)vme.aref.ar_amap;
203 	if (KREAD(kd, addr, &amap))
204 		return NULL;
205 
206 	offset = va - vme.start;
207 	slot = offset / kd->nbpg + vme.aref.ar_pageoff;
208 	/* sanity-check slot number */
209 	if (slot > amap.am_nslot)
210 		return NULL;
211 
212 	addr = (u_long)amap.am_anon + (offset / kd->nbpg) * sizeof(anonp);
213 	if (KREAD(kd, addr, &anonp))
214 		return NULL;
215 
216 	addr = (u_long)anonp;
217 	if (KREAD(kd, addr, &anon))
218 		return NULL;
219 
220 	addr = (u_long)anon.u.an_page;
221 	if (addr) {
222 		if (KREAD(kd, addr, &pg))
223 			return NULL;
224 
225 		if (lseek(kd->pmfd, pg.phys_addr, SEEK_SET) < 0)
226 			return NULL;
227 
228 		if (read(kd->pmfd, (void *)kd->swapspc, (size_t)kd->nbpg)
229 		    != kd->nbpg)
230 			return NULL;
231 	} else {
232 		if (lseek(kd->swfd, (off_t)(anon.an_swslot * kd->nbpg),
233 			  SEEK_SET) < 0)
234 			return NULL;
235 		if (read(kd->swfd, (void *)kd->swapspc, (size_t)kd->nbpg)
236 		    != kd->nbpg)
237 			return NULL;
238 	}
239 
240 #else
241 	/*
242 	 * We found the right object -- follow shadow links.
243 	 */
244 	offset = va - vme.start + vme.offset;
245 	addr = (u_long)vme.object.vm_object;
246 
247 	while (1) {
248 		/* Try reading the page from core first. */
249 		if ((rv = _kvm_readfromcore(kd, addr, offset)))
250 			break;
251 
252 		if (KREAD(kd, addr, &vmo))
253 			return (0);
254 
255 		/* If there is a pager here, see if it has the page. */
256 		if (vmo.pager != 0 &&
257 		    (rv = _kvm_readfrompager(kd, &vmo, offset)))
258 			break;
259 
260 		/* Move down the shadow chain. */
261 		addr = (u_long)vmo.shadow;
262 		if (addr == 0)
263 			return (0);
264 		offset += vmo.shadow_offset;
265 	}
266 
267 	if (rv == -1)
268 		return (0);
269 #endif
270 
271 	/* Found the page. */
272 	offset %= kd->nbpg;
273 	*cnt = kd->nbpg - offset;
274 	return (&kd->swapspc[offset]);
275 }
276 
277 #ifndef UVM
278 
279 #define	vm_page_hash(kd, object, offset) \
280 	(((u_long)object + (u_long)(offset / kd->nbpg)) & kd->vm_page_hash_mask)
281 
282 int
283 _kvm_coreinit(kd)
284 	kvm_t *kd;
285 {
286 	struct nlist nlist[3];
287 
288 	nlist[0].n_name = "_vm_page_buckets";
289 	nlist[1].n_name = "_vm_page_hash_mask";
290 	nlist[2].n_name = 0;
291 	if (kvm_nlist(kd, nlist) != 0)
292 		return (-1);
293 
294 	if (KREAD(kd, nlist[0].n_value, &kd->vm_page_buckets) ||
295 	    KREAD(kd, nlist[1].n_value, &kd->vm_page_hash_mask))
296 		return (-1);
297 
298 	return (0);
299 }
300 
301 int
302 _kvm_readfromcore(kd, object, offset)
303 	kvm_t *kd;
304 	u_long object, offset;
305 {
306 	u_long addr;
307 	struct pglist bucket;
308 	struct vm_page mem;
309 	off_t seekpoint;
310 
311 	if (kd->vm_page_buckets == 0 &&
312 	    _kvm_coreinit(kd))
313 		return (-1);
314 
315 	addr = (u_long)&kd->vm_page_buckets[vm_page_hash(kd, object, offset)];
316 	if (KREAD(kd, addr, &bucket))
317 		return (-1);
318 
319 	addr = (u_long)bucket.tqh_first;
320 	offset &= ~(kd->nbpg -1);
321 	while (1) {
322 		if (addr == 0)
323 			return (0);
324 
325 		if (KREAD(kd, addr, &mem))
326 			return (-1);
327 
328 		if ((u_long)mem.object == object &&
329 		    (u_long)mem.offset == offset)
330 			break;
331 
332 		addr = (u_long)mem.hashq.tqe_next;
333 	}
334 
335 	seekpoint = mem.phys_addr;
336 
337 	if (lseek(kd->pmfd, seekpoint, 0) == -1)
338 		return (-1);
339 	if (read(kd->pmfd, kd->swapspc, kd->nbpg) != kd->nbpg)
340 		return (-1);
341 
342 	return (1);
343 }
344 
345 int
346 _kvm_readfrompager(kd, vmop, offset)
347 	kvm_t *kd;
348 	struct vm_object *vmop;
349 	u_long offset;
350 {
351 	u_long addr;
352 	struct pager_struct pager;
353 	struct swpager swap;
354 	int ix;
355 	struct swblock swb;
356 	off_t seekpoint;
357 
358 	/* Read in the pager info and make sure it's a swap device. */
359 	addr = (u_long)vmop->pager;
360 	if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
361 		return (-1);
362 
363 	/* Read in the swap_pager private data. */
364 	addr = (u_long)pager.pg_data;
365 	if (KREAD(kd, addr, &swap))
366 		return (-1);
367 
368 	/*
369 	 * Calculate the paging offset, and make sure it's within the
370 	 * bounds of the pager.
371 	 */
372 	offset += vmop->paging_offset;
373 	ix = offset / dbtob(swap.sw_bsize);
374 #if 0
375 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
376 		return (-1);
377 #else
378 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
379 		int i;
380 		printf("BUG BUG BUG BUG:\n");
381 		printf("object %x offset %x pgoffset %x pager %x swpager %x\n",
382 		    vmop, offset - vmop->paging_offset, vmop->paging_offset,
383 		    vmop->pager, pager.pg_data);
384 		printf("osize %x bsize %x blocks %x nblocks %x\n",
385 		    swap.sw_osize, swap.sw_bsize, swap.sw_blocks,
386 		    swap.sw_nblocks);
387 		for (ix = 0; ix < swap.sw_nblocks; ix++) {
388 			addr = (u_long)&swap.sw_blocks[ix];
389 			if (KREAD(kd, addr, &swb))
390 				return (0);
391 			printf("sw_blocks[%d]: block %x mask %x\n", ix,
392 			    swb.swb_block, swb.swb_mask);
393 		}
394 		return (-1);
395 	}
396 #endif
397 
398 	/* Read in the swap records. */
399 	addr = (u_long)&swap.sw_blocks[ix];
400 	if (KREAD(kd, addr, &swb))
401 		return (-1);
402 
403 	/* Calculate offset within pager. */
404 	offset %= dbtob(swap.sw_bsize);
405 
406 	/* Check that the page is actually present. */
407 	if ((swb.swb_mask & (1 << (offset / kd->nbpg))) == 0)
408 		return (0);
409 
410 	if (!ISALIVE(kd))
411 		return (-1);
412 
413 	/* Calculate the physical address and read the page. */
414 	seekpoint = dbtob(swb.swb_block) + (offset & ~(kd->nbpg -1));
415 
416 	if (lseek(kd->swfd, seekpoint, 0) == -1)
417 		return (-1);
418 	if (read(kd->swfd, kd->swapspc, kd->nbpg) != kd->nbpg)
419 		return (-1);
420 
421 	return (1);
422 }
423 #endif /* UVM */
424 
425 /*
426  * Read proc's from memory file into buffer bp, which has space to hold
427  * at most maxcnt procs.
428  */
429 static int
430 kvm_proclist(kd, what, arg, p, bp, maxcnt)
431 	kvm_t *kd;
432 	int what, arg;
433 	struct proc *p;
434 	struct kinfo_proc *bp;
435 	int maxcnt;
436 {
437 	int cnt = 0;
438 	struct eproc eproc;
439 	struct pgrp pgrp;
440 	struct session sess;
441 	struct tty tty;
442 	struct proc proc;
443 
444 	for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
445 		if (KREAD(kd, (u_long)p, &proc)) {
446 			_kvm_err(kd, kd->program, "can't read proc at %x", p);
447 			return (-1);
448 		}
449 		if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
450 			KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
451 			      &eproc.e_ucred);
452 
453 		switch(what) {
454 
455 		case KERN_PROC_PID:
456 			if (proc.p_pid != (pid_t)arg)
457 				continue;
458 			break;
459 
460 		case KERN_PROC_UID:
461 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
462 				continue;
463 			break;
464 
465 		case KERN_PROC_RUID:
466 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
467 				continue;
468 			break;
469 		}
470 		/*
471 		 * We're going to add another proc to the set.  If this
472 		 * will overflow the buffer, assume the reason is because
473 		 * nprocs (or the proc list) is corrupt and declare an error.
474 		 */
475 		if (cnt >= maxcnt) {
476 			_kvm_err(kd, kd->program, "nprocs corrupt");
477 			return (-1);
478 		}
479 		/*
480 		 * gather eproc
481 		 */
482 		eproc.e_paddr = p;
483 		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
484 			_kvm_err(kd, kd->program, "can't read pgrp at %x",
485 				 proc.p_pgrp);
486 			return (-1);
487 		}
488 		eproc.e_sess = pgrp.pg_session;
489 		eproc.e_pgid = pgrp.pg_id;
490 		eproc.e_jobc = pgrp.pg_jobc;
491 		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
492 			_kvm_err(kd, kd->program, "can't read session at %x",
493 				pgrp.pg_session);
494 			return (-1);
495 		}
496 		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
497 			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
498 				_kvm_err(kd, kd->program,
499 					 "can't read tty at %x", sess.s_ttyp);
500 				return (-1);
501 			}
502 			eproc.e_tdev = tty.t_dev;
503 			eproc.e_tsess = tty.t_session;
504 			if (tty.t_pgrp != NULL) {
505 				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
506 					_kvm_err(kd, kd->program,
507 						 "can't read tpgrp at &x",
508 						tty.t_pgrp);
509 					return (-1);
510 				}
511 				eproc.e_tpgid = pgrp.pg_id;
512 			} else
513 				eproc.e_tpgid = -1;
514 		} else
515 			eproc.e_tdev = NODEV;
516 		eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
517 		if (sess.s_leader == p)
518 			eproc.e_flag |= EPROC_SLEADER;
519 		if (proc.p_wmesg)
520 			(void)kvm_read(kd, (u_long)proc.p_wmesg,
521 			    eproc.e_wmesg, WMESGLEN);
522 
523 		(void)kvm_read(kd, (u_long)proc.p_vmspace,
524 		    &eproc.e_vm, sizeof(eproc.e_vm));
525 
526 		eproc.e_xsize = eproc.e_xrssize = 0;
527 		eproc.e_xccount = eproc.e_xswrss = 0;
528 
529 		switch (what) {
530 
531 		case KERN_PROC_PGRP:
532 			if (eproc.e_pgid != (pid_t)arg)
533 				continue;
534 			break;
535 
536 		case KERN_PROC_TTY:
537 			if ((proc.p_flag & P_CONTROLT) == 0 ||
538 			     eproc.e_tdev != (dev_t)arg)
539 				continue;
540 			break;
541 		}
542 		bcopy(&proc, &bp->kp_proc, sizeof(proc));
543 		bcopy(&eproc, &bp->kp_eproc, sizeof(eproc));
544 		++bp;
545 		++cnt;
546 	}
547 	return (cnt);
548 }
549 
550 /*
551  * Build proc info array by reading in proc list from a crash dump.
552  * Return number of procs read.  maxcnt is the max we will read.
553  */
554 static int
555 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
556 	kvm_t *kd;
557 	int what, arg;
558 	u_long a_allproc;
559 	u_long a_zombproc;
560 	int maxcnt;
561 {
562 	struct kinfo_proc *bp = kd->procbase;
563 	int acnt, zcnt;
564 	struct proc *p;
565 
566 	if (KREAD(kd, a_allproc, &p)) {
567 		_kvm_err(kd, kd->program, "cannot read allproc");
568 		return (-1);
569 	}
570 	acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
571 	if (acnt < 0)
572 		return (acnt);
573 
574 	if (KREAD(kd, a_zombproc, &p)) {
575 		_kvm_err(kd, kd->program, "cannot read zombproc");
576 		return (-1);
577 	}
578 	zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
579 	if (zcnt < 0)
580 		zcnt = 0;
581 
582 	return (acnt + zcnt);
583 }
584 
585 struct kinfo_proc *
586 kvm_getprocs(kd, op, arg, cnt)
587 	kvm_t *kd;
588 	int op, arg;
589 	int *cnt;
590 {
591 	size_t size;
592 	int mib[4], st, nprocs;
593 
594 	if (kd->procbase != 0) {
595 		free((void *)kd->procbase);
596 		/*
597 		 * Clear this pointer in case this call fails.  Otherwise,
598 		 * kvm_close() will free it again.
599 		 */
600 		kd->procbase = 0;
601 	}
602 	if (ISALIVE(kd)) {
603 		size = 0;
604 		mib[0] = CTL_KERN;
605 		mib[1] = KERN_PROC;
606 		mib[2] = op;
607 		mib[3] = arg;
608 		st = sysctl(mib, 4, NULL, &size, NULL, 0);
609 		if (st == -1) {
610 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
611 			return (0);
612 		}
613 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
614 		if (kd->procbase == 0)
615 			return (0);
616 		st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
617 		if (st == -1) {
618 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
619 			return (0);
620 		}
621 		if (size % sizeof(struct kinfo_proc) != 0) {
622 			_kvm_err(kd, kd->program,
623 				"proc size mismatch (%d total, %d chunks)",
624 				size, sizeof(struct kinfo_proc));
625 			return (0);
626 		}
627 		nprocs = size / sizeof(struct kinfo_proc);
628 	} else {
629 		struct nlist nl[4], *p;
630 
631 		nl[0].n_name = "_nprocs";
632 		nl[1].n_name = "_allproc";
633 		nl[2].n_name = "_zombproc";
634 		nl[3].n_name = 0;
635 
636 		if (kvm_nlist(kd, nl) != 0) {
637 			for (p = nl; p->n_type != 0; ++p)
638 				;
639 			_kvm_err(kd, kd->program,
640 				 "%s: no such symbol", p->n_name);
641 			return (0);
642 		}
643 		if (KREAD(kd, nl[0].n_value, &nprocs)) {
644 			_kvm_err(kd, kd->program, "can't read nprocs");
645 			return (0);
646 		}
647 		size = nprocs * sizeof(struct kinfo_proc);
648 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
649 		if (kd->procbase == 0)
650 			return (0);
651 
652 		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
653 				      nl[2].n_value, nprocs);
654 #ifdef notdef
655 		size = nprocs * sizeof(struct kinfo_proc);
656 		(void)realloc(kd->procbase, size);
657 #endif
658 	}
659 	*cnt = nprocs;
660 	return (kd->procbase);
661 }
662 
663 void
664 _kvm_freeprocs(kd)
665 	kvm_t *kd;
666 {
667 	if (kd->procbase) {
668 		free(kd->procbase);
669 		kd->procbase = 0;
670 	}
671 }
672 
673 void *
674 _kvm_realloc(kd, p, n)
675 	kvm_t *kd;
676 	void *p;
677 	size_t n;
678 {
679 	void *np = (void *)realloc(p, n);
680 
681 	if (np == 0)
682 		_kvm_err(kd, kd->program, "out of memory");
683 	return (np);
684 }
685 
686 #ifndef MAX
687 #define MAX(a, b) ((a) > (b) ? (a) : (b))
688 #endif
689 
690 /*
691  * Read in an argument vector from the user address space of process p.
692  * addr if the user-space base address of narg null-terminated contiguous
693  * strings.  This is used to read in both the command arguments and
694  * environment strings.  Read at most maxcnt characters of strings.
695  */
696 static char **
697 kvm_argv(kd, p, addr, narg, maxcnt)
698 	kvm_t *kd;
699 	const struct proc *p;
700 	u_long addr;
701 	int narg;
702 	int maxcnt;
703 {
704 	char *np, *cp, *ep, *ap;
705 	u_long oaddr = -1;
706 	int len, cc;
707 	char **argv;
708 
709 	/*
710 	 * Check that there aren't an unreasonable number of agruments,
711 	 * and that the address is in user space.
712 	 */
713 	if (narg > ARG_MAX || addr < VM_MIN_ADDRESS || addr >= VM_MAXUSER_ADDRESS)
714 		return (0);
715 
716 	if (kd->argv == 0) {
717 		/*
718 		 * Try to avoid reallocs.
719 		 */
720 		kd->argc = MAX(narg + 1, 32);
721 		kd->argv = (char **)_kvm_malloc(kd, kd->argc *
722 						sizeof(*kd->argv));
723 		if (kd->argv == 0)
724 			return (0);
725 	} else if (narg + 1 > kd->argc) {
726 		kd->argc = MAX(2 * kd->argc, narg + 1);
727 		kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
728 						sizeof(*kd->argv));
729 		if (kd->argv == 0)
730 			return (0);
731 	}
732 	if (kd->argspc == 0) {
733 		kd->argspc = (char *)_kvm_malloc(kd, kd->nbpg);
734 		if (kd->argspc == 0)
735 			return (0);
736 		kd->arglen = kd->nbpg;
737 	}
738 	if (kd->argbuf == 0) {
739 		kd->argbuf = (char *)_kvm_malloc(kd, kd->nbpg);
740 		if (kd->argbuf == 0)
741 			return (0);
742 	}
743 	cc = sizeof(char *) * narg;
744 	if (kvm_uread(kd, p, addr, (char *)kd->argv, cc) != cc)
745 		return (0);
746 	ap = np = kd->argspc;
747 	argv = kd->argv;
748 	len = 0;
749 	/*
750 	 * Loop over pages, filling in the argument vector.
751 	 */
752 	while (argv < kd->argv + narg && *argv != 0) {
753 		addr = (u_long)*argv & ~(kd->nbpg - 1);
754 		if (addr != oaddr) {
755 			if (kvm_uread(kd, p, addr, kd->argbuf, kd->nbpg) !=
756 			    kd->nbpg)
757 				return (0);
758 			oaddr = addr;
759 		}
760 		addr = (u_long)*argv & (kd->nbpg - 1);
761 		cp = kd->argbuf + addr;
762 		cc = kd->nbpg - addr;
763 		if (maxcnt > 0 && cc > maxcnt - len)
764 			cc = maxcnt - len;;
765 		ep = memchr(cp, '\0', cc);
766 		if (ep != 0)
767 			cc = ep - cp + 1;
768 		if (len + cc > kd->arglen) {
769 			int off;
770 			char **pp;
771 			char *op = kd->argspc;
772 
773 			kd->arglen *= 2;
774 			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
775 							  kd->arglen);
776 			if (kd->argspc == 0)
777 				return (0);
778 			/*
779 			 * Adjust argv pointers in case realloc moved
780 			 * the string space.
781 			 */
782 			off = kd->argspc - op;
783 			for (pp = kd->argv; pp < argv; pp++)
784 				*pp += off;
785 			ap += off;
786 			np += off;
787 		}
788 		memcpy(np, cp, cc);
789 		np += cc;
790 		len += cc;
791 		if (ep != 0) {
792 			*argv++ = ap;
793 			ap = np;
794 		} else
795 			*argv += cc;
796 		if (maxcnt > 0 && len >= maxcnt) {
797 			/*
798 			 * We're stopping prematurely.  Terminate the
799 			 * current string.
800 			 */
801 			if (ep == 0) {
802 				*np = '\0';
803 				*argv++ = ap;
804 			}
805 			break;
806 		}
807 	}
808 	/* Make sure argv is terminated. */
809 	*argv = 0;
810 	return (kd->argv);
811 }
812 
813 static void
814 ps_str_a(p, addr, n)
815 	struct ps_strings *p;
816 	u_long *addr;
817 	int *n;
818 {
819 	*addr = (u_long)p->ps_argvstr;
820 	*n = p->ps_nargvstr;
821 }
822 
823 static void
824 ps_str_e(p, addr, n)
825 	struct ps_strings *p;
826 	u_long *addr;
827 	int *n;
828 {
829 	*addr = (u_long)p->ps_envstr;
830 	*n = p->ps_nenvstr;
831 }
832 
833 /*
834  * Determine if the proc indicated by p is still active.
835  * This test is not 100% foolproof in theory, but chances of
836  * being wrong are very low.
837  */
838 static int
839 proc_verify(kd, kernp, p)
840 	kvm_t *kd;
841 	u_long kernp;
842 	const struct proc *p;
843 {
844 	struct proc kernproc;
845 
846 	/*
847 	 * Just read in the whole proc.  It's not that big relative
848 	 * to the cost of the read system call.
849 	 */
850 	if (kvm_read(kd, kernp, &kernproc, sizeof(kernproc)) !=
851 	    sizeof(kernproc))
852 		return (0);
853 	return (p->p_pid == kernproc.p_pid &&
854 		(kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
855 }
856 
857 static char **
858 kvm_doargv(kd, kp, nchr, info)
859 	kvm_t *kd;
860 	const struct kinfo_proc *kp;
861 	int nchr;
862 	void (*info)(struct ps_strings *, u_long *, int *);
863 {
864 	const struct proc *p = &kp->kp_proc;
865 	char **ap;
866 	u_long addr;
867 	int cnt;
868 	struct ps_strings arginfo;
869 	static struct ps_strings *ps;
870 
871 	if (ps == NULL) {
872 		struct _ps_strings _ps;
873 		int mib[2];
874 		size_t len;
875 
876 		mib[0] = CTL_VM;
877 		mib[1] = VM_PSSTRINGS;
878 		len = sizeof(_ps);
879 		sysctl(mib, 2, &_ps, &len, NULL, 0);
880 		ps = (struct ps_strings *)_ps.val;
881 	}
882 
883 	/*
884 	 * Pointers are stored at the top of the user stack.
885 	 */
886 	if (p->p_stat == SZOMB ||
887 	    kvm_uread(kd, p, (u_long)ps, (char *)&arginfo,
888 		      sizeof(arginfo)) != sizeof(arginfo))
889 		return (0);
890 
891 	(*info)(&arginfo, &addr, &cnt);
892 	if (cnt == 0)
893 		return (0);
894 	ap = kvm_argv(kd, p, addr, cnt, nchr);
895 	/*
896 	 * For live kernels, make sure this process didn't go away.
897 	 */
898 	if (ap != 0 && ISALIVE(kd) &&
899 	    !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
900 		ap = 0;
901 	return (ap);
902 }
903 
904 /*
905  * Get the command args.  This code is now machine independent.
906  */
907 char **
908 kvm_getargv(kd, kp, nchr)
909 	kvm_t *kd;
910 	const struct kinfo_proc *kp;
911 	int nchr;
912 {
913 	return (kvm_doargv(kd, kp, nchr, ps_str_a));
914 }
915 
916 char **
917 kvm_getenvv(kd, kp, nchr)
918 	kvm_t *kd;
919 	const struct kinfo_proc *kp;
920 	int nchr;
921 {
922 	return (kvm_doargv(kd, kp, nchr, ps_str_e));
923 }
924 
925 /*
926  * Read from user space.  The user context is given by p.
927  */
928 ssize_t
929 kvm_uread(kd, p, uva, buf, len)
930 	kvm_t *kd;
931 	const struct proc *p;
932 	u_long uva;
933 	char *buf;
934 	size_t len;
935 {
936 	char *cp;
937 
938 	cp = buf;
939 	while (len > 0) {
940 		int cc;
941 		char *dp;
942 		u_long cnt;
943 
944 		dp = _kvm_uread(kd, p, uva, &cnt);
945 		if (dp == 0) {
946 			_kvm_err(kd, 0, "invalid address (%lx)", uva);
947 			return (0);
948 		}
949 		cc = MIN(cnt, len);
950 		bcopy(dp, cp, cc);
951 
952 		cp += cc;
953 		uva += cc;
954 		len -= cc;
955 	}
956 	return (ssize_t)(cp - buf);
957 }
958