xref: /openbsd-src/lib/libkvm/kvm_proc.c (revision 62a742911104f98b9185b2c6b6007d9b1c36396c)
1 /*	$OpenBSD: kvm_proc.c,v 1.4 1997/02/26 16:46:33 niklas Exp $ */
2 /*	$NetBSD: kvm_proc.c,v 1.16 1996/03/18 22:33:57 thorpej Exp $	*/
3 
4 /*-
5  * Copyright (c) 1994, 1995 Charles M. Hannum.  All rights reserved.
6  * Copyright (c) 1989, 1992, 1993
7  *	The Regents of the University of California.  All rights reserved.
8  *
9  * This code is derived from software developed by the Computer Systems
10  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
11  * BG 91-66 and contributed to Berkeley.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. All advertising materials mentioning features or use of this software
22  *    must display the following acknowledgement:
23  *	This product includes software developed by the University of
24  *	California, Berkeley and its contributors.
25  * 4. Neither the name of the University nor the names of its contributors
26  *    may be used to endorse or promote products derived from this software
27  *    without specific prior written permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39  * SUCH DAMAGE.
40  */
41 
42 #if defined(LIBC_SCCS) && !defined(lint)
43 #if 0
44 static char sccsid[] = "@(#)kvm_proc.c	8.3 (Berkeley) 9/23/93";
45 #else
46 static char *rcsid = "$OpenBSD: kvm_proc.c,v 1.4 1997/02/26 16:46:33 niklas Exp $";
47 #endif
48 #endif /* LIBC_SCCS and not lint */
49 
50 /*
51  * Proc traversal interface for kvm.  ps and w are (probably) the exclusive
52  * users of this code, so we've factored it out into a separate module.
53  * Thus, we keep this grunge out of the other kvm applications (i.e.,
54  * most other applications are interested only in open/close/read/nlist).
55  */
56 
57 #include <sys/param.h>
58 #include <sys/user.h>
59 #include <sys/proc.h>
60 #include <sys/exec.h>
61 #include <sys/stat.h>
62 #include <sys/ioctl.h>
63 #include <sys/tty.h>
64 #include <stdlib.h>
65 #include <string.h>
66 #include <unistd.h>
67 #include <nlist.h>
68 #include <kvm.h>
69 
70 #include <vm/vm.h>
71 #include <vm/vm_param.h>
72 #include <vm/swap_pager.h>
73 
74 #include <sys/sysctl.h>
75 
76 #include <limits.h>
77 #include <db.h>
78 #include <paths.h>
79 
80 #include "kvm_private.h"
81 
82 #define KREAD(kd, addr, obj) \
83 	(kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
84 
85 int		_kvm_readfromcore __P((kvm_t *, u_long, u_long));
86 int		_kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long));
87 ssize_t		kvm_uread __P((kvm_t *, const struct proc *, u_long, char *,
88 		    size_t));
89 
90 static char	**kvm_argv __P((kvm_t *, const struct proc *, u_long, int,
91 		    int));
92 static int	kvm_deadprocs __P((kvm_t *, int, int, u_long, u_long, int));
93 static char	**kvm_doargv __P((kvm_t *, const struct kinfo_proc *, int,
94 		    void (*)(struct ps_strings *, u_long *, int *)));
95 static int	kvm_proclist __P((kvm_t *, int, int, struct proc *,
96 		    struct kinfo_proc *, int));
97 static int	proc_verify __P((kvm_t *, u_long, const struct proc *));
98 static void	ps_str_a __P((struct ps_strings *, u_long *, int *));
99 static void	ps_str_e __P((struct ps_strings *, u_long *, int *));
100 
101 char *
102 _kvm_uread(kd, p, va, cnt)
103 	kvm_t *kd;
104 	const struct proc *p;
105 	u_long va;
106 	u_long *cnt;
107 {
108 	register u_long addr, head;
109 	register u_long offset;
110 	struct vm_map_entry vme;
111 	struct vm_object vmo;
112 	int rv;
113 
114 	if (kd->swapspc == 0) {
115 		kd->swapspc = (char *)_kvm_malloc(kd, kd->nbpg);
116 		if (kd->swapspc == 0)
117 			return (0);
118 	}
119 
120 	/*
121 	 * Look through the address map for the memory object
122 	 * that corresponds to the given virtual address.
123 	 * The header just has the entire valid range.
124 	 */
125 	head = (u_long)&p->p_vmspace->vm_map.header;
126 	addr = head;
127 	while (1) {
128 		if (KREAD(kd, addr, &vme))
129 			return (0);
130 
131 		if (va >= vme.start && va < vme.end &&
132 		    vme.object.vm_object != 0)
133 			break;
134 
135 		addr = (u_long)vme.next;
136 		if (addr == head)
137 			return (0);
138 	}
139 
140 	/*
141 	 * We found the right object -- follow shadow links.
142 	 */
143 	offset = va - vme.start + vme.offset;
144 	addr = (u_long)vme.object.vm_object;
145 
146 	while (1) {
147 		/* Try reading the page from core first. */
148 		if ((rv = _kvm_readfromcore(kd, addr, offset)))
149 			break;
150 
151 		if (KREAD(kd, addr, &vmo))
152 			return (0);
153 
154 		/* If there is a pager here, see if it has the page. */
155 		if (vmo.pager != 0 &&
156 		    (rv = _kvm_readfrompager(kd, &vmo, offset)))
157 			break;
158 
159 		/* Move down the shadow chain. */
160 		addr = (u_long)vmo.shadow;
161 		if (addr == 0)
162 			return (0);
163 		offset += vmo.shadow_offset;
164 	}
165 
166 	if (rv == -1)
167 		return (0);
168 
169 	/* Found the page. */
170 	offset %= kd->nbpg;
171 	*cnt = kd->nbpg - offset;
172 	return (&kd->swapspc[offset]);
173 }
174 
175 #define	vm_page_hash(kd, object, offset) \
176 	(((u_long)object + (u_long)(offset / kd->nbpg)) & kd->vm_page_hash_mask)
177 
178 int
179 _kvm_coreinit(kd)
180 	kvm_t *kd;
181 {
182 	struct nlist nlist[3];
183 
184 	nlist[0].n_name = "_vm_page_buckets";
185 	nlist[1].n_name = "_vm_page_hash_mask";
186 	nlist[2].n_name = 0;
187 	if (kvm_nlist(kd, nlist) != 0)
188 		return (-1);
189 
190 	if (KREAD(kd, nlist[0].n_value, &kd->vm_page_buckets) ||
191 	    KREAD(kd, nlist[1].n_value, &kd->vm_page_hash_mask))
192 		return (-1);
193 
194 	return (0);
195 }
196 
197 int
198 _kvm_readfromcore(kd, object, offset)
199 	kvm_t *kd;
200 	u_long object, offset;
201 {
202 	u_long addr;
203 	struct pglist bucket;
204 	struct vm_page mem;
205 	off_t seekpoint;
206 
207 	if (kd->vm_page_buckets == 0 &&
208 	    _kvm_coreinit(kd))
209 		return (-1);
210 
211 	addr = (u_long)&kd->vm_page_buckets[vm_page_hash(kd, object, offset)];
212 	if (KREAD(kd, addr, &bucket))
213 		return (-1);
214 
215 	addr = (u_long)bucket.tqh_first;
216 	offset &= ~(kd->nbpg -1);
217 	while (1) {
218 		if (addr == 0)
219 			return (0);
220 
221 		if (KREAD(kd, addr, &mem))
222 			return (-1);
223 
224 		if ((u_long)mem.object == object &&
225 		    (u_long)mem.offset == offset)
226 			break;
227 
228 		addr = (u_long)mem.hashq.tqe_next;
229 	}
230 
231 	seekpoint = mem.phys_addr;
232 
233 	if (lseek(kd->pmfd, seekpoint, 0) == -1)
234 		return (-1);
235 	if (read(kd->pmfd, kd->swapspc, kd->nbpg) != kd->nbpg)
236 		return (-1);
237 
238 	return (1);
239 }
240 
241 int
242 _kvm_readfrompager(kd, vmop, offset)
243 	kvm_t *kd;
244 	struct vm_object *vmop;
245 	u_long offset;
246 {
247 	u_long addr;
248 	struct pager_struct pager;
249 	struct swpager swap;
250 	int ix;
251 	struct swblock swb;
252 	off_t seekpoint;
253 
254 	/* Read in the pager info and make sure it's a swap device. */
255 	addr = (u_long)vmop->pager;
256 	if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
257 		return (-1);
258 
259 	/* Read in the swap_pager private data. */
260 	addr = (u_long)pager.pg_data;
261 	if (KREAD(kd, addr, &swap))
262 		return (-1);
263 
264 	/*
265 	 * Calculate the paging offset, and make sure it's within the
266 	 * bounds of the pager.
267 	 */
268 	offset += vmop->paging_offset;
269 	ix = offset / dbtob(swap.sw_bsize);
270 #if 0
271 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
272 		return (-1);
273 #else
274 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
275 		int i;
276 		printf("BUG BUG BUG BUG:\n");
277 		printf("object %x offset %x pgoffset %x pager %x swpager %x\n",
278 		    vmop, offset - vmop->paging_offset, vmop->paging_offset,
279 		    vmop->pager, pager.pg_data);
280 		printf("osize %x bsize %x blocks %x nblocks %x\n",
281 		    swap.sw_osize, swap.sw_bsize, swap.sw_blocks,
282 		    swap.sw_nblocks);
283 		for (ix = 0; ix < swap.sw_nblocks; ix++) {
284 			addr = (u_long)&swap.sw_blocks[ix];
285 			if (KREAD(kd, addr, &swb))
286 				return (0);
287 			printf("sw_blocks[%d]: block %x mask %x\n", ix,
288 			    swb.swb_block, swb.swb_mask);
289 		}
290 		return (-1);
291 	}
292 #endif
293 
294 	/* Read in the swap records. */
295 	addr = (u_long)&swap.sw_blocks[ix];
296 	if (KREAD(kd, addr, &swb))
297 		return (-1);
298 
299 	/* Calculate offset within pager. */
300 	offset %= dbtob(swap.sw_bsize);
301 
302 	/* Check that the page is actually present. */
303 	if ((swb.swb_mask & (1 << (offset / kd->nbpg))) == 0)
304 		return (0);
305 
306 	if (!ISALIVE(kd))
307 		return (-1);
308 
309 	/* Calculate the physical address and read the page. */
310 	seekpoint = dbtob(swb.swb_block) + (offset & ~(kd->nbpg -1));
311 
312 	if (lseek(kd->swfd, seekpoint, 0) == -1)
313 		return (-1);
314 	if (read(kd->swfd, kd->swapspc, kd->nbpg) != kd->nbpg)
315 		return (-1);
316 
317 	return (1);
318 }
319 
320 /*
321  * Read proc's from memory file into buffer bp, which has space to hold
322  * at most maxcnt procs.
323  */
324 static int
325 kvm_proclist(kd, what, arg, p, bp, maxcnt)
326 	kvm_t *kd;
327 	int what, arg;
328 	struct proc *p;
329 	struct kinfo_proc *bp;
330 	int maxcnt;
331 {
332 	register int cnt = 0;
333 	struct eproc eproc;
334 	struct pgrp pgrp;
335 	struct session sess;
336 	struct tty tty;
337 	struct proc proc;
338 
339 	for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
340 		if (KREAD(kd, (u_long)p, &proc)) {
341 			_kvm_err(kd, kd->program, "can't read proc at %x", p);
342 			return (-1);
343 		}
344 		if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
345 			KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
346 			      &eproc.e_ucred);
347 
348 		switch(what) {
349 
350 		case KERN_PROC_PID:
351 			if (proc.p_pid != (pid_t)arg)
352 				continue;
353 			break;
354 
355 		case KERN_PROC_UID:
356 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
357 				continue;
358 			break;
359 
360 		case KERN_PROC_RUID:
361 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
362 				continue;
363 			break;
364 		}
365 		/*
366 		 * We're going to add another proc to the set.  If this
367 		 * will overflow the buffer, assume the reason is because
368 		 * nprocs (or the proc list) is corrupt and declare an error.
369 		 */
370 		if (cnt >= maxcnt) {
371 			_kvm_err(kd, kd->program, "nprocs corrupt");
372 			return (-1);
373 		}
374 		/*
375 		 * gather eproc
376 		 */
377 		eproc.e_paddr = p;
378 		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
379 			_kvm_err(kd, kd->program, "can't read pgrp at %x",
380 				 proc.p_pgrp);
381 			return (-1);
382 		}
383 		eproc.e_sess = pgrp.pg_session;
384 		eproc.e_pgid = pgrp.pg_id;
385 		eproc.e_jobc = pgrp.pg_jobc;
386 		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
387 			_kvm_err(kd, kd->program, "can't read session at %x",
388 				pgrp.pg_session);
389 			return (-1);
390 		}
391 		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
392 			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
393 				_kvm_err(kd, kd->program,
394 					 "can't read tty at %x", sess.s_ttyp);
395 				return (-1);
396 			}
397 			eproc.e_tdev = tty.t_dev;
398 			eproc.e_tsess = tty.t_session;
399 			if (tty.t_pgrp != NULL) {
400 				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
401 					_kvm_err(kd, kd->program,
402 						 "can't read tpgrp at &x",
403 						tty.t_pgrp);
404 					return (-1);
405 				}
406 				eproc.e_tpgid = pgrp.pg_id;
407 			} else
408 				eproc.e_tpgid = -1;
409 		} else
410 			eproc.e_tdev = NODEV;
411 		eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
412 		if (sess.s_leader == p)
413 			eproc.e_flag |= EPROC_SLEADER;
414 		if (proc.p_wmesg)
415 			(void)kvm_read(kd, (u_long)proc.p_wmesg,
416 			    eproc.e_wmesg, WMESGLEN);
417 
418 		(void)kvm_read(kd, (u_long)proc.p_vmspace,
419 		    (char *)&eproc.e_vm, sizeof(eproc.e_vm));
420 
421 		eproc.e_xsize = eproc.e_xrssize = 0;
422 		eproc.e_xccount = eproc.e_xswrss = 0;
423 
424 		switch (what) {
425 
426 		case KERN_PROC_PGRP:
427 			if (eproc.e_pgid != (pid_t)arg)
428 				continue;
429 			break;
430 
431 		case KERN_PROC_TTY:
432 			if ((proc.p_flag & P_CONTROLT) == 0 ||
433 			     eproc.e_tdev != (dev_t)arg)
434 				continue;
435 			break;
436 		}
437 		bcopy(&proc, &bp->kp_proc, sizeof(proc));
438 		bcopy(&eproc, &bp->kp_eproc, sizeof(eproc));
439 		++bp;
440 		++cnt;
441 	}
442 	return (cnt);
443 }
444 
445 /*
446  * Build proc info array by reading in proc list from a crash dump.
447  * Return number of procs read.  maxcnt is the max we will read.
448  */
449 static int
450 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
451 	kvm_t *kd;
452 	int what, arg;
453 	u_long a_allproc;
454 	u_long a_zombproc;
455 	int maxcnt;
456 {
457 	register struct kinfo_proc *bp = kd->procbase;
458 	register int acnt, zcnt;
459 	struct proc *p;
460 
461 	if (KREAD(kd, a_allproc, &p)) {
462 		_kvm_err(kd, kd->program, "cannot read allproc");
463 		return (-1);
464 	}
465 	acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
466 	if (acnt < 0)
467 		return (acnt);
468 
469 	if (KREAD(kd, a_zombproc, &p)) {
470 		_kvm_err(kd, kd->program, "cannot read zombproc");
471 		return (-1);
472 	}
473 	zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
474 	if (zcnt < 0)
475 		zcnt = 0;
476 
477 	return (acnt + zcnt);
478 }
479 
480 struct kinfo_proc *
481 kvm_getprocs(kd, op, arg, cnt)
482 	kvm_t *kd;
483 	int op, arg;
484 	int *cnt;
485 {
486 	size_t size;
487 	int mib[4], st, nprocs;
488 
489 	if (kd->procbase != 0) {
490 		free((void *)kd->procbase);
491 		/*
492 		 * Clear this pointer in case this call fails.  Otherwise,
493 		 * kvm_close() will free it again.
494 		 */
495 		kd->procbase = 0;
496 	}
497 	if (ISALIVE(kd)) {
498 		size = 0;
499 		mib[0] = CTL_KERN;
500 		mib[1] = KERN_PROC;
501 		mib[2] = op;
502 		mib[3] = arg;
503 		st = sysctl(mib, 4, NULL, &size, NULL, 0);
504 		if (st == -1) {
505 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
506 			return (0);
507 		}
508 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
509 		if (kd->procbase == 0)
510 			return (0);
511 		st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
512 		if (st == -1) {
513 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
514 			return (0);
515 		}
516 		if (size % sizeof(struct kinfo_proc) != 0) {
517 			_kvm_err(kd, kd->program,
518 				"proc size mismatch (%d total, %d chunks)",
519 				size, sizeof(struct kinfo_proc));
520 			return (0);
521 		}
522 		nprocs = size / sizeof(struct kinfo_proc);
523 	} else {
524 		struct nlist nl[4], *p;
525 
526 		nl[0].n_name = "_nprocs";
527 		nl[1].n_name = "_allproc";
528 		nl[2].n_name = "_zombproc";
529 		nl[3].n_name = 0;
530 
531 		if (kvm_nlist(kd, nl) != 0) {
532 			for (p = nl; p->n_type != 0; ++p)
533 				;
534 			_kvm_err(kd, kd->program,
535 				 "%s: no such symbol", p->n_name);
536 			return (0);
537 		}
538 		if (KREAD(kd, nl[0].n_value, &nprocs)) {
539 			_kvm_err(kd, kd->program, "can't read nprocs");
540 			return (0);
541 		}
542 		size = nprocs * sizeof(struct kinfo_proc);
543 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
544 		if (kd->procbase == 0)
545 			return (0);
546 
547 		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
548 				      nl[2].n_value, nprocs);
549 #ifdef notdef
550 		size = nprocs * sizeof(struct kinfo_proc);
551 		(void)realloc(kd->procbase, size);
552 #endif
553 	}
554 	*cnt = nprocs;
555 	return (kd->procbase);
556 }
557 
558 void
559 _kvm_freeprocs(kd)
560 	kvm_t *kd;
561 {
562 	if (kd->procbase) {
563 		free(kd->procbase);
564 		kd->procbase = 0;
565 	}
566 }
567 
568 void *
569 _kvm_realloc(kd, p, n)
570 	kvm_t *kd;
571 	void *p;
572 	size_t n;
573 {
574 	void *np = (void *)realloc(p, n);
575 
576 	if (np == 0)
577 		_kvm_err(kd, kd->program, "out of memory");
578 	return (np);
579 }
580 
581 #ifndef MAX
582 #define MAX(a, b) ((a) > (b) ? (a) : (b))
583 #endif
584 
585 /*
586  * Read in an argument vector from the user address space of process p.
587  * addr if the user-space base address of narg null-terminated contiguous
588  * strings.  This is used to read in both the command arguments and
589  * environment strings.  Read at most maxcnt characters of strings.
590  */
591 static char **
592 kvm_argv(kd, p, addr, narg, maxcnt)
593 	kvm_t *kd;
594 	const struct proc *p;
595 	register u_long addr;
596 	register int narg;
597 	register int maxcnt;
598 {
599 	register char *np, *cp, *ep, *ap;
600 	register u_long oaddr = -1;
601 	register int len, cc;
602 	register char **argv;
603 
604 	/*
605 	 * Check that there aren't an unreasonable number of agruments,
606 	 * and that the address is in user space.
607 	 */
608 	if (narg > ARG_MAX || addr < VM_MIN_ADDRESS || addr >= VM_MAXUSER_ADDRESS)
609 		return (0);
610 
611 	if (kd->argv == 0) {
612 		/*
613 		 * Try to avoid reallocs.
614 		 */
615 		kd->argc = MAX(narg + 1, 32);
616 		kd->argv = (char **)_kvm_malloc(kd, kd->argc *
617 						sizeof(*kd->argv));
618 		if (kd->argv == 0)
619 			return (0);
620 	} else if (narg + 1 > kd->argc) {
621 		kd->argc = MAX(2 * kd->argc, narg + 1);
622 		kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
623 						sizeof(*kd->argv));
624 		if (kd->argv == 0)
625 			return (0);
626 	}
627 	if (kd->argspc == 0) {
628 		kd->argspc = (char *)_kvm_malloc(kd, kd->nbpg);
629 		if (kd->argspc == 0)
630 			return (0);
631 		kd->arglen = kd->nbpg;
632 	}
633 	if (kd->argbuf == 0) {
634 		kd->argbuf = (char *)_kvm_malloc(kd, kd->nbpg);
635 		if (kd->argbuf == 0)
636 			return (0);
637 	}
638 	cc = sizeof(char *) * narg;
639 	if (kvm_uread(kd, p, addr, (char *)kd->argv, cc) != cc)
640 		return (0);
641 	ap = np = kd->argspc;
642 	argv = kd->argv;
643 	len = 0;
644 	/*
645 	 * Loop over pages, filling in the argument vector.
646 	 */
647 	while (argv < kd->argv + narg && *argv != 0) {
648 		addr = (u_long)*argv & ~(kd->nbpg - 1);
649 		if (addr != oaddr) {
650 			if (kvm_uread(kd, p, addr, kd->argbuf, kd->nbpg) !=
651 			    kd->nbpg)
652 				return (0);
653 			oaddr = addr;
654 		}
655 		addr = (u_long)*argv & (kd->nbpg - 1);
656 		cp = kd->argbuf + addr;
657 		cc = kd->nbpg - addr;
658 		if (maxcnt > 0 && cc > maxcnt - len)
659 			cc = maxcnt - len;;
660 		ep = memchr(cp, '\0', cc);
661 		if (ep != 0)
662 			cc = ep - cp + 1;
663 		if (len + cc > kd->arglen) {
664 			register int off;
665 			register char **pp;
666 			register char *op = kd->argspc;
667 
668 			kd->arglen *= 2;
669 			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
670 							  kd->arglen);
671 			if (kd->argspc == 0)
672 				return (0);
673 			/*
674 			 * Adjust argv pointers in case realloc moved
675 			 * the string space.
676 			 */
677 			off = kd->argspc - op;
678 			for (pp = kd->argv; pp < argv; pp++)
679 				*pp += off;
680 			ap += off;
681 			np += off;
682 		}
683 		memcpy(np, cp, cc);
684 		np += cc;
685 		len += cc;
686 		if (ep != 0) {
687 			*argv++ = ap;
688 			ap = np;
689 		} else
690 			*argv += cc;
691 		if (maxcnt > 0 && len >= maxcnt) {
692 			/*
693 			 * We're stopping prematurely.  Terminate the
694 			 * current string.
695 			 */
696 			if (ep == 0) {
697 				*np = '\0';
698 				*argv++ = ap;
699 			}
700 			break;
701 		}
702 	}
703 	/* Make sure argv is terminated. */
704 	*argv = 0;
705 	return (kd->argv);
706 }
707 
708 static void
709 ps_str_a(p, addr, n)
710 	struct ps_strings *p;
711 	u_long *addr;
712 	int *n;
713 {
714 	*addr = (u_long)p->ps_argvstr;
715 	*n = p->ps_nargvstr;
716 }
717 
718 static void
719 ps_str_e(p, addr, n)
720 	struct ps_strings *p;
721 	u_long *addr;
722 	int *n;
723 {
724 	*addr = (u_long)p->ps_envstr;
725 	*n = p->ps_nenvstr;
726 }
727 
728 /*
729  * Determine if the proc indicated by p is still active.
730  * This test is not 100% foolproof in theory, but chances of
731  * being wrong are very low.
732  */
733 static int
734 proc_verify(kd, kernp, p)
735 	kvm_t *kd;
736 	u_long kernp;
737 	const struct proc *p;
738 {
739 	struct proc kernproc;
740 
741 	/*
742 	 * Just read in the whole proc.  It's not that big relative
743 	 * to the cost of the read system call.
744 	 */
745 	if (kvm_read(kd, kernp, (char *)&kernproc, sizeof(kernproc)) !=
746 	    sizeof(kernproc))
747 		return (0);
748 	return (p->p_pid == kernproc.p_pid &&
749 		(kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
750 }
751 
752 static char **
753 kvm_doargv(kd, kp, nchr, info)
754 	kvm_t *kd;
755 	const struct kinfo_proc *kp;
756 	int nchr;
757 	void (*info)(struct ps_strings *, u_long *, int *);
758 {
759 	register const struct proc *p = &kp->kp_proc;
760 	register char **ap;
761 	u_long addr;
762 	int cnt;
763 	struct ps_strings arginfo;
764 	static struct ps_strings *ps;
765 
766 	if (ps == NULL) {
767 		struct _ps_strings _ps;
768 		int mib[2];
769 		size_t len;
770 
771 		mib[0] = CTL_VM;
772 		mib[1] = VM_PSSTRINGS;
773 		len = sizeof(_ps);
774 		sysctl(mib, 2, &_ps, &len, NULL, 0);
775 		ps = (struct ps_strings *)_ps.val;
776 	}
777 
778 	/*
779 	 * Pointers are stored at the top of the user stack.
780 	 */
781 	if (p->p_stat == SZOMB ||
782 	    kvm_uread(kd, p, (u_long)ps, (char *)&arginfo,
783 		      sizeof(arginfo)) != sizeof(arginfo))
784 		return (0);
785 
786 	(*info)(&arginfo, &addr, &cnt);
787 	if (cnt == 0)
788 		return (0);
789 	ap = kvm_argv(kd, p, addr, cnt, nchr);
790 	/*
791 	 * For live kernels, make sure this process didn't go away.
792 	 */
793 	if (ap != 0 && ISALIVE(kd) &&
794 	    !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
795 		ap = 0;
796 	return (ap);
797 }
798 
799 /*
800  * Get the command args.  This code is now machine independent.
801  */
802 char **
803 kvm_getargv(kd, kp, nchr)
804 	kvm_t *kd;
805 	const struct kinfo_proc *kp;
806 	int nchr;
807 {
808 	return (kvm_doargv(kd, kp, nchr, ps_str_a));
809 }
810 
811 char **
812 kvm_getenvv(kd, kp, nchr)
813 	kvm_t *kd;
814 	const struct kinfo_proc *kp;
815 	int nchr;
816 {
817 	return (kvm_doargv(kd, kp, nchr, ps_str_e));
818 }
819 
820 /*
821  * Read from user space.  The user context is given by p.
822  */
823 ssize_t
824 kvm_uread(kd, p, uva, buf, len)
825 	kvm_t *kd;
826 	register const struct proc *p;
827 	register u_long uva;
828 	register char *buf;
829 	register size_t len;
830 {
831 	register char *cp;
832 
833 	cp = buf;
834 	while (len > 0) {
835 		register int cc;
836 		register char *dp;
837 		u_long cnt;
838 
839 		dp = _kvm_uread(kd, p, uva, &cnt);
840 		if (dp == 0) {
841 			_kvm_err(kd, 0, "invalid address (%lx)", uva);
842 			return (0);
843 		}
844 		cc = MIN(cnt, len);
845 		bcopy(dp, cp, cc);
846 
847 		cp += cc;
848 		uva += cc;
849 		len -= cc;
850 	}
851 	return (ssize_t)(cp - buf);
852 }
853