xref: /netbsd-src/lib/libkvm/kvm_proc.c (revision 2a399c6883d870daece976daec6ffa7bb7f934ce)
1 /*	$NetBSD: kvm_proc.c,v 1.20 1997/08/15 17:52:46 drochner Exp $	*/
2 
3 /*-
4  * Copyright (c) 1994, 1995 Charles M. Hannum.  All rights reserved.
5  * Copyright (c) 1989, 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software developed by the Computer Systems
9  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
10  * BG 91-66 and contributed to Berkeley.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  */
40 
41 #include <sys/cdefs.h>
42 #if defined(LIBC_SCCS) && !defined(lint)
43 #if 0
44 static char sccsid[] = "@(#)kvm_proc.c	8.3 (Berkeley) 9/23/93";
45 #else
46 __RCSID("$NetBSD: kvm_proc.c,v 1.20 1997/08/15 17:52:46 drochner Exp $");
47 #endif
48 #endif /* LIBC_SCCS and not lint */
49 
50 /*
51  * Proc traversal interface for kvm.  ps and w are (probably) the exclusive
52  * users of this code, so we've factored it out into a separate module.
53  * Thus, we keep this grunge out of the other kvm applications (i.e.,
54  * most other applications are interested only in open/close/read/nlist).
55  */
56 
57 #include <sys/param.h>
58 #include <sys/user.h>
59 #include <sys/proc.h>
60 #include <sys/exec.h>
61 #include <sys/stat.h>
62 #include <sys/ioctl.h>
63 #include <sys/tty.h>
64 #include <stdlib.h>
65 #include <string.h>
66 #include <unistd.h>
67 #include <nlist.h>
68 #include <kvm.h>
69 
70 #include <vm/vm.h>
71 #include <vm/vm_param.h>
72 #include <vm/swap_pager.h>
73 
74 #include <sys/sysctl.h>
75 
76 #include <limits.h>
77 #include <db.h>
78 #include <paths.h>
79 
80 #include "kvm_private.h"
81 
82 #define KREAD(kd, addr, obj) \
83 	(kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
84 
85 char		*_kvm_uread __P((kvm_t *, const struct proc *, u_long, u_long *));
86 int		_kvm_coreinit __P((kvm_t *));
87 int		_kvm_readfromcore __P((kvm_t *, u_long, u_long));
88 int		_kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long));
89 ssize_t		kvm_uread __P((kvm_t *, const struct proc *, u_long, char *,
90 		    size_t));
91 
92 static char	**kvm_argv __P((kvm_t *, const struct proc *, u_long, int,
93 		    int));
94 static int	kvm_deadprocs __P((kvm_t *, int, int, u_long, u_long, int));
95 static char	**kvm_doargv __P((kvm_t *, const struct kinfo_proc *, int,
96 		    void (*)(struct ps_strings *, u_long *, int *)));
97 static int	kvm_proclist __P((kvm_t *, int, int, struct proc *,
98 		    struct kinfo_proc *, int));
99 static int	proc_verify __P((kvm_t *, u_long, const struct proc *));
100 static void	ps_str_a __P((struct ps_strings *, u_long *, int *));
101 static void	ps_str_e __P((struct ps_strings *, u_long *, int *));
102 
103 char *
104 _kvm_uread(kd, p, va, cnt)
105 	kvm_t *kd;
106 	const struct proc *p;
107 	u_long va;
108 	u_long *cnt;
109 {
110 	register u_long addr, head;
111 	register u_long offset;
112 	struct vm_map_entry vme;
113 	struct vm_object vmo;
114 	int rv;
115 
116 	if (kd->swapspc == 0) {
117 		kd->swapspc = (char *)_kvm_malloc(kd, kd->nbpg);
118 		if (kd->swapspc == 0)
119 			return (0);
120 	}
121 
122 	/*
123 	 * Look through the address map for the memory object
124 	 * that corresponds to the given virtual address.
125 	 * The header just has the entire valid range.
126 	 */
127 	head = (u_long)&p->p_vmspace->vm_map.header;
128 	addr = head;
129 	while (1) {
130 		if (KREAD(kd, addr, &vme))
131 			return (0);
132 
133 		if (va >= vme.start && va < vme.end &&
134 		    vme.object.vm_object != 0)
135 			break;
136 
137 		addr = (u_long)vme.next;
138 		if (addr == head)
139 			return (0);
140 	}
141 
142 	/*
143 	 * We found the right object -- follow shadow links.
144 	 */
145 	offset = va - vme.start + vme.offset;
146 	addr = (u_long)vme.object.vm_object;
147 
148 	while (1) {
149 		/* Try reading the page from core first. */
150 		if ((rv = _kvm_readfromcore(kd, addr, offset)))
151 			break;
152 
153 		if (KREAD(kd, addr, &vmo))
154 			return (0);
155 
156 		/* If there is a pager here, see if it has the page. */
157 		if (vmo.pager != 0 &&
158 		    (rv = _kvm_readfrompager(kd, &vmo, offset)))
159 			break;
160 
161 		/* Move down the shadow chain. */
162 		addr = (u_long)vmo.shadow;
163 		if (addr == 0)
164 			return (0);
165 		offset += vmo.shadow_offset;
166 	}
167 
168 	if (rv == -1)
169 		return (0);
170 
171 	/* Found the page. */
172 	offset %= kd->nbpg;
173 	*cnt = kd->nbpg - offset;
174 	return (&kd->swapspc[offset]);
175 }
176 
177 #define	vm_page_hash(kd, object, offset) \
178 	(((u_long)object + (u_long)(offset / kd->nbpg)) & kd->vm_page_hash_mask)
179 
180 int
181 _kvm_coreinit(kd)
182 	kvm_t *kd;
183 {
184 	struct nlist nlist[3];
185 
186 	nlist[0].n_name = "_vm_page_buckets";
187 	nlist[1].n_name = "_vm_page_hash_mask";
188 	nlist[2].n_name = 0;
189 	if (kvm_nlist(kd, nlist) != 0)
190 		return (-1);
191 
192 	if (KREAD(kd, nlist[0].n_value, &kd->vm_page_buckets) ||
193 	    KREAD(kd, nlist[1].n_value, &kd->vm_page_hash_mask))
194 		return (-1);
195 
196 	return (0);
197 }
198 
199 int
200 _kvm_readfromcore(kd, object, offset)
201 	kvm_t *kd;
202 	u_long object, offset;
203 {
204 	u_long addr;
205 	struct pglist bucket;
206 	struct vm_page mem;
207 	off_t seekpoint;
208 
209 	if (kd->vm_page_buckets == 0 &&
210 	    _kvm_coreinit(kd))
211 		return (-1);
212 
213 	addr = (u_long)&kd->vm_page_buckets[vm_page_hash(kd, object, offset)];
214 	if (KREAD(kd, addr, &bucket))
215 		return (-1);
216 
217 	addr = (u_long)bucket.tqh_first;
218 	offset &= ~(kd->nbpg -1);
219 	while (1) {
220 		if (addr == 0)
221 			return (0);
222 
223 		if (KREAD(kd, addr, &mem))
224 			return (-1);
225 
226 		if ((u_long)mem.object == object &&
227 		    (u_long)mem.offset == offset)
228 			break;
229 
230 		addr = (u_long)mem.hashq.tqe_next;
231 	}
232 
233 	seekpoint = mem.phys_addr;
234 
235 	if (lseek(kd->pmfd, seekpoint, 0) == -1)
236 		return (-1);
237 	if (read(kd->pmfd, kd->swapspc, kd->nbpg) != kd->nbpg)
238 		return (-1);
239 
240 	return (1);
241 }
242 
243 int
244 _kvm_readfrompager(kd, vmop, offset)
245 	kvm_t *kd;
246 	struct vm_object *vmop;
247 	u_long offset;
248 {
249 	u_long addr;
250 	struct pager_struct pager;
251 	struct swpager swap;
252 	int ix;
253 	struct swblock swb;
254 	off_t seekpoint;
255 
256 	/* Read in the pager info and make sure it's a swap device. */
257 	addr = (u_long)vmop->pager;
258 	if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
259 		return (-1);
260 
261 	/* Read in the swap_pager private data. */
262 	addr = (u_long)pager.pg_data;
263 	if (KREAD(kd, addr, &swap))
264 		return (-1);
265 
266 	/*
267 	 * Calculate the paging offset, and make sure it's within the
268 	 * bounds of the pager.
269 	 */
270 	offset += vmop->paging_offset;
271 	ix = offset / dbtob(swap.sw_bsize);
272 #if 0
273 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
274 		return (-1);
275 #else
276 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
277 		int i;
278 		printf("BUG BUG BUG BUG:\n");
279 		printf("object %p offset %lx pgoffset %lx ",
280 		    vmop, offset - vmop->paging_offset,
281 		    (u_long)vmop->paging_offset);
282 		printf("pager %p swpager %p\n",
283 		    vmop->pager, pager.pg_data);
284 		printf("osize %lx bsize %x blocks %p nblocks %x\n",
285 		    (u_long)swap.sw_osize, swap.sw_bsize, swap.sw_blocks,
286 		    swap.sw_nblocks);
287 		for (i = 0; i < swap.sw_nblocks; i++) {
288 			addr = (u_long)&swap.sw_blocks[i];
289 			if (KREAD(kd, addr, &swb))
290 				return (0);
291 			printf("sw_blocks[%d]: block %x mask %x\n", i,
292 			    swb.swb_block, swb.swb_mask);
293 		}
294 		return (-1);
295 	}
296 #endif
297 
298 	/* Read in the swap records. */
299 	addr = (u_long)&swap.sw_blocks[ix];
300 	if (KREAD(kd, addr, &swb))
301 		return (-1);
302 
303 	/* Calculate offset within pager. */
304 	offset %= dbtob(swap.sw_bsize);
305 
306 	/* Check that the page is actually present. */
307 	if ((swb.swb_mask & (1 << (offset / kd->nbpg))) == 0)
308 		return (0);
309 
310 	if (!ISALIVE(kd))
311 		return (-1);
312 
313 	/* Calculate the physical address and read the page. */
314 	seekpoint = dbtob(swb.swb_block) + (offset & ~(kd->nbpg -1));
315 
316 	if (lseek(kd->swfd, seekpoint, 0) == -1)
317 		return (-1);
318 	if (read(kd->swfd, kd->swapspc, kd->nbpg) != kd->nbpg)
319 		return (-1);
320 
321 	return (1);
322 }
323 
324 /*
325  * Read proc's from memory file into buffer bp, which has space to hold
326  * at most maxcnt procs.
327  */
328 static int
329 kvm_proclist(kd, what, arg, p, bp, maxcnt)
330 	kvm_t *kd;
331 	int what, arg;
332 	struct proc *p;
333 	struct kinfo_proc *bp;
334 	int maxcnt;
335 {
336 	register int cnt = 0;
337 	struct eproc eproc;
338 	struct pgrp pgrp;
339 	struct session sess;
340 	struct tty tty;
341 	struct proc proc;
342 
343 	for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
344 		if (KREAD(kd, (u_long)p, &proc)) {
345 			_kvm_err(kd, kd->program, "can't read proc at %x", p);
346 			return (-1);
347 		}
348 		if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
349 			(void)KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
350 			      &eproc.e_ucred);
351 
352 		switch(what) {
353 
354 		case KERN_PROC_PID:
355 			if (proc.p_pid != (pid_t)arg)
356 				continue;
357 			break;
358 
359 		case KERN_PROC_UID:
360 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
361 				continue;
362 			break;
363 
364 		case KERN_PROC_RUID:
365 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
366 				continue;
367 			break;
368 		}
369 		/*
370 		 * We're going to add another proc to the set.  If this
371 		 * will overflow the buffer, assume the reason is because
372 		 * nprocs (or the proc list) is corrupt and declare an error.
373 		 */
374 		if (cnt >= maxcnt) {
375 			_kvm_err(kd, kd->program, "nprocs corrupt");
376 			return (-1);
377 		}
378 		/*
379 		 * gather eproc
380 		 */
381 		eproc.e_paddr = p;
382 		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
383 			_kvm_err(kd, kd->program, "can't read pgrp at %x",
384 				 proc.p_pgrp);
385 			return (-1);
386 		}
387 		eproc.e_sess = pgrp.pg_session;
388 		eproc.e_pgid = pgrp.pg_id;
389 		eproc.e_jobc = pgrp.pg_jobc;
390 		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
391 			_kvm_err(kd, kd->program, "can't read session at %x",
392 				pgrp.pg_session);
393 			return (-1);
394 		}
395 		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
396 			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
397 				_kvm_err(kd, kd->program,
398 					 "can't read tty at %x", sess.s_ttyp);
399 				return (-1);
400 			}
401 			eproc.e_tdev = tty.t_dev;
402 			eproc.e_tsess = tty.t_session;
403 			if (tty.t_pgrp != NULL) {
404 				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
405 					_kvm_err(kd, kd->program,
406 						 "can't read tpgrp at &x",
407 						tty.t_pgrp);
408 					return (-1);
409 				}
410 				eproc.e_tpgid = pgrp.pg_id;
411 			} else
412 				eproc.e_tpgid = -1;
413 		} else
414 			eproc.e_tdev = NODEV;
415 		eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
416 		if (sess.s_leader == p)
417 			eproc.e_flag |= EPROC_SLEADER;
418 		if (proc.p_wmesg)
419 			(void)kvm_read(kd, (u_long)proc.p_wmesg,
420 			    eproc.e_wmesg, WMESGLEN);
421 
422 		(void)kvm_read(kd, (u_long)proc.p_vmspace,
423 		    (char *)&eproc.e_vm, sizeof(eproc.e_vm));
424 
425 		eproc.e_xsize = eproc.e_xrssize = 0;
426 		eproc.e_xccount = eproc.e_xswrss = 0;
427 
428 		switch (what) {
429 
430 		case KERN_PROC_PGRP:
431 			if (eproc.e_pgid != (pid_t)arg)
432 				continue;
433 			break;
434 
435 		case KERN_PROC_TTY:
436 			if ((proc.p_flag & P_CONTROLT) == 0 ||
437 			     eproc.e_tdev != (dev_t)arg)
438 				continue;
439 			break;
440 		}
441 		bcopy(&proc, &bp->kp_proc, sizeof(proc));
442 		bcopy(&eproc, &bp->kp_eproc, sizeof(eproc));
443 		++bp;
444 		++cnt;
445 	}
446 	return (cnt);
447 }
448 
449 /*
450  * Build proc info array by reading in proc list from a crash dump.
451  * Return number of procs read.  maxcnt is the max we will read.
452  */
453 static int
454 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
455 	kvm_t *kd;
456 	int what, arg;
457 	u_long a_allproc;
458 	u_long a_zombproc;
459 	int maxcnt;
460 {
461 	register struct kinfo_proc *bp = kd->procbase;
462 	register int acnt, zcnt;
463 	struct proc *p;
464 
465 	if (KREAD(kd, a_allproc, &p)) {
466 		_kvm_err(kd, kd->program, "cannot read allproc");
467 		return (-1);
468 	}
469 	acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
470 	if (acnt < 0)
471 		return (acnt);
472 
473 	if (KREAD(kd, a_zombproc, &p)) {
474 		_kvm_err(kd, kd->program, "cannot read zombproc");
475 		return (-1);
476 	}
477 	zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
478 	if (zcnt < 0)
479 		zcnt = 0;
480 
481 	return (acnt + zcnt);
482 }
483 
484 struct kinfo_proc *
485 kvm_getprocs(kd, op, arg, cnt)
486 	kvm_t *kd;
487 	int op, arg;
488 	int *cnt;
489 {
490 	size_t size;
491 	int mib[4], st, nprocs;
492 
493 	if (kd->procbase != 0) {
494 		free((void *)kd->procbase);
495 		/*
496 		 * Clear this pointer in case this call fails.  Otherwise,
497 		 * kvm_close() will free it again.
498 		 */
499 		kd->procbase = 0;
500 	}
501 	if (ISALIVE(kd)) {
502 		size = 0;
503 		mib[0] = CTL_KERN;
504 		mib[1] = KERN_PROC;
505 		mib[2] = op;
506 		mib[3] = arg;
507 		st = sysctl(mib, 4, NULL, &size, NULL, 0);
508 		if (st == -1) {
509 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
510 			return (0);
511 		}
512 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
513 		if (kd->procbase == 0)
514 			return (0);
515 		st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
516 		if (st == -1) {
517 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
518 			return (0);
519 		}
520 		if (size % sizeof(struct kinfo_proc) != 0) {
521 			_kvm_err(kd, kd->program,
522 				"proc size mismatch (%d total, %d chunks)",
523 				size, sizeof(struct kinfo_proc));
524 			return (0);
525 		}
526 		nprocs = size / sizeof(struct kinfo_proc);
527 	} else {
528 		struct nlist nl[4], *p;
529 
530 		nl[0].n_name = "_nprocs";
531 		nl[1].n_name = "_allproc";
532 		nl[2].n_name = "_zombproc";
533 		nl[3].n_name = 0;
534 
535 		if (kvm_nlist(kd, nl) != 0) {
536 			for (p = nl; p->n_type != 0; ++p)
537 				;
538 			_kvm_err(kd, kd->program,
539 				 "%s: no such symbol", p->n_name);
540 			return (0);
541 		}
542 		if (KREAD(kd, nl[0].n_value, &nprocs)) {
543 			_kvm_err(kd, kd->program, "can't read nprocs");
544 			return (0);
545 		}
546 		size = nprocs * sizeof(struct kinfo_proc);
547 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
548 		if (kd->procbase == 0)
549 			return (0);
550 
551 		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
552 				      nl[2].n_value, nprocs);
553 #ifdef notdef
554 		size = nprocs * sizeof(struct kinfo_proc);
555 		(void)realloc(kd->procbase, size);
556 #endif
557 	}
558 	*cnt = nprocs;
559 	return (kd->procbase);
560 }
561 
562 void
563 _kvm_freeprocs(kd)
564 	kvm_t *kd;
565 {
566 	if (kd->procbase) {
567 		free(kd->procbase);
568 		kd->procbase = 0;
569 	}
570 }
571 
572 void *
573 _kvm_realloc(kd, p, n)
574 	kvm_t *kd;
575 	void *p;
576 	size_t n;
577 {
578 	void *np = (void *)realloc(p, n);
579 
580 	if (np == 0)
581 		_kvm_err(kd, kd->program, "out of memory");
582 	return (np);
583 }
584 
585 #ifndef MAX
586 #define MAX(a, b) ((a) > (b) ? (a) : (b))
587 #endif
588 
589 /*
590  * Read in an argument vector from the user address space of process p.
591  * addr if the user-space base address of narg null-terminated contiguous
592  * strings.  This is used to read in both the command arguments and
593  * environment strings.  Read at most maxcnt characters of strings.
594  */
595 static char **
596 kvm_argv(kd, p, addr, narg, maxcnt)
597 	kvm_t *kd;
598 	const struct proc *p;
599 	register u_long addr;
600 	register int narg;
601 	register int maxcnt;
602 {
603 	register char *np, *cp, *ep, *ap;
604 	register u_long oaddr = -1;
605 	register int len, cc;
606 	register char **argv;
607 
608 	/*
609 	 * Check that there aren't an unreasonable number of agruments,
610 	 * and that the address is in user space.
611 	 */
612 	if (narg > ARG_MAX || addr < kd->min_uva || addr >= kd->max_uva)
613 		return (0);
614 
615 	if (kd->argv == 0) {
616 		/*
617 		 * Try to avoid reallocs.
618 		 */
619 		kd->argc = MAX(narg + 1, 32);
620 		kd->argv = (char **)_kvm_malloc(kd, kd->argc *
621 						sizeof(*kd->argv));
622 		if (kd->argv == 0)
623 			return (0);
624 	} else if (narg + 1 > kd->argc) {
625 		kd->argc = MAX(2 * kd->argc, narg + 1);
626 		kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
627 						sizeof(*kd->argv));
628 		if (kd->argv == 0)
629 			return (0);
630 	}
631 	if (kd->argspc == 0) {
632 		kd->argspc = (char *)_kvm_malloc(kd, kd->nbpg);
633 		if (kd->argspc == 0)
634 			return (0);
635 		kd->arglen = kd->nbpg;
636 	}
637 	if (kd->argbuf == 0) {
638 		kd->argbuf = (char *)_kvm_malloc(kd, kd->nbpg);
639 		if (kd->argbuf == 0)
640 			return (0);
641 	}
642 	cc = sizeof(char *) * narg;
643 	if (kvm_uread(kd, p, addr, (char *)kd->argv, cc) != cc)
644 		return (0);
645 	ap = np = kd->argspc;
646 	argv = kd->argv;
647 	len = 0;
648 	/*
649 	 * Loop over pages, filling in the argument vector.
650 	 */
651 	while (argv < kd->argv + narg && *argv != 0) {
652 		addr = (u_long)*argv & ~(kd->nbpg - 1);
653 		if (addr != oaddr) {
654 			if (kvm_uread(kd, p, addr, kd->argbuf, kd->nbpg) !=
655 			    kd->nbpg)
656 				return (0);
657 			oaddr = addr;
658 		}
659 		addr = (u_long)*argv & (kd->nbpg - 1);
660 		cp = kd->argbuf + addr;
661 		cc = kd->nbpg - addr;
662 		if (maxcnt > 0 && cc > maxcnt - len)
663 			cc = maxcnt - len;;
664 		ep = memchr(cp, '\0', cc);
665 		if (ep != 0)
666 			cc = ep - cp + 1;
667 		if (len + cc > kd->arglen) {
668 			register int off;
669 			register char **pp;
670 			register char *op = kd->argspc;
671 
672 			kd->arglen *= 2;
673 			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
674 							  kd->arglen);
675 			if (kd->argspc == 0)
676 				return (0);
677 			/*
678 			 * Adjust argv pointers in case realloc moved
679 			 * the string space.
680 			 */
681 			off = kd->argspc - op;
682 			for (pp = kd->argv; pp < argv; pp++)
683 				*pp += off;
684 			ap += off;
685 			np += off;
686 		}
687 		memcpy(np, cp, cc);
688 		np += cc;
689 		len += cc;
690 		if (ep != 0) {
691 			*argv++ = ap;
692 			ap = np;
693 		} else
694 			*argv += cc;
695 		if (maxcnt > 0 && len >= maxcnt) {
696 			/*
697 			 * We're stopping prematurely.  Terminate the
698 			 * current string.
699 			 */
700 			if (ep == 0) {
701 				*np = '\0';
702 				*argv++ = ap;
703 			}
704 			break;
705 		}
706 	}
707 	/* Make sure argv is terminated. */
708 	*argv = 0;
709 	return (kd->argv);
710 }
711 
712 static void
713 ps_str_a(p, addr, n)
714 	struct ps_strings *p;
715 	u_long *addr;
716 	int *n;
717 {
718 	*addr = (u_long)p->ps_argvstr;
719 	*n = p->ps_nargvstr;
720 }
721 
722 static void
723 ps_str_e(p, addr, n)
724 	struct ps_strings *p;
725 	u_long *addr;
726 	int *n;
727 {
728 	*addr = (u_long)p->ps_envstr;
729 	*n = p->ps_nenvstr;
730 }
731 
732 /*
733  * Determine if the proc indicated by p is still active.
734  * This test is not 100% foolproof in theory, but chances of
735  * being wrong are very low.
736  */
737 static int
738 proc_verify(kd, kernp, p)
739 	kvm_t *kd;
740 	u_long kernp;
741 	const struct proc *p;
742 {
743 	struct proc kernproc;
744 
745 	/*
746 	 * Just read in the whole proc.  It's not that big relative
747 	 * to the cost of the read system call.
748 	 */
749 	if (kvm_read(kd, kernp, (char *)&kernproc, sizeof(kernproc)) !=
750 	    sizeof(kernproc))
751 		return (0);
752 	return (p->p_pid == kernproc.p_pid &&
753 		(kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
754 }
755 
756 static char **
757 kvm_doargv(kd, kp, nchr, info)
758 	kvm_t *kd;
759 	const struct kinfo_proc *kp;
760 	int nchr;
761 	void (*info)(struct ps_strings *, u_long *, int *);
762 {
763 	register const struct proc *p = &kp->kp_proc;
764 	register char **ap;
765 	u_long addr;
766 	int cnt;
767 	struct ps_strings arginfo;
768 
769 	/*
770 	 * Pointers are stored at the top of the user stack.
771 	 */
772 	if (p->p_stat == SZOMB)
773 		return (0);
774 	cnt = kvm_uread(kd, p, kd->usrstack - sizeof(arginfo),
775 	    (char *)&arginfo, sizeof(arginfo));
776 	if (cnt != sizeof(arginfo))
777 		return (0);
778 
779 	(*info)(&arginfo, &addr, &cnt);
780 	if (cnt == 0)
781 		return (0);
782 	ap = kvm_argv(kd, p, addr, cnt, nchr);
783 	/*
784 	 * For live kernels, make sure this process didn't go away.
785 	 */
786 	if (ap != 0 && ISALIVE(kd) &&
787 	    !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
788 		ap = 0;
789 	return (ap);
790 }
791 
792 /*
793  * Get the command args.  This code is now machine independent.
794  */
795 char **
796 kvm_getargv(kd, kp, nchr)
797 	kvm_t *kd;
798 	const struct kinfo_proc *kp;
799 	int nchr;
800 {
801 	return (kvm_doargv(kd, kp, nchr, ps_str_a));
802 }
803 
804 char **
805 kvm_getenvv(kd, kp, nchr)
806 	kvm_t *kd;
807 	const struct kinfo_proc *kp;
808 	int nchr;
809 {
810 	return (kvm_doargv(kd, kp, nchr, ps_str_e));
811 }
812 
813 /*
814  * Read from user space.  The user context is given by p.
815  */
816 ssize_t
817 kvm_uread(kd, p, uva, buf, len)
818 	kvm_t *kd;
819 	register const struct proc *p;
820 	register u_long uva;
821 	register char *buf;
822 	register size_t len;
823 {
824 	register char *cp;
825 
826 	cp = buf;
827 	while (len > 0) {
828 		register int cc;
829 		register char *dp;
830 		u_long cnt;
831 
832 		dp = _kvm_uread(kd, p, uva, &cnt);
833 		if (dp == 0) {
834 			_kvm_err(kd, 0, "invalid address (%x)", uva);
835 			return (0);
836 		}
837 		cc = MIN(cnt, len);
838 		bcopy(dp, cp, cc);
839 
840 		cp += cc;
841 		uva += cc;
842 		len -= cc;
843 	}
844 	return (ssize_t)(cp - buf);
845 }
846