xref: /netbsd-src/lib/libkvm/kvm_proc.c (revision 7c7c171d130af9949261bc7dce2150a03c3d239c)
1 /*	$NetBSD: kvm_proc.c,v 1.23 1998/02/12 06:55:29 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 1994, 1995 Charles M. Hannum.  All rights reserved.
5  * Copyright (c) 1989, 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software developed by the Computer Systems
9  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
10  * BG 91-66 and contributed to Berkeley.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  */
40 
41 #include <sys/cdefs.h>
42 #if defined(LIBC_SCCS) && !defined(lint)
43 #if 0
44 static char sccsid[] = "@(#)kvm_proc.c	8.3 (Berkeley) 9/23/93";
45 #else
46 __RCSID("$NetBSD: kvm_proc.c,v 1.23 1998/02/12 06:55:29 chs Exp $");
47 #endif
48 #endif /* LIBC_SCCS and not lint */
49 
50 /*
51  * Proc traversal interface for kvm.  ps and w are (probably) the exclusive
52  * users of this code, so we've factored it out into a separate module.
53  * Thus, we keep this grunge out of the other kvm applications (i.e.,
54  * most other applications are interested only in open/close/read/nlist).
55  */
56 
57 #include <sys/param.h>
58 #include <sys/user.h>
59 #include <sys/proc.h>
60 #include <sys/exec.h>
61 #include <sys/stat.h>
62 #include <sys/ioctl.h>
63 #include <sys/tty.h>
64 #include <stdlib.h>
65 #include <string.h>
66 #include <unistd.h>
67 #include <nlist.h>
68 #include <kvm.h>
69 
70 #include <vm/vm.h>
71 #include <vm/vm_param.h>
72 #include <vm/swap_pager.h>
73 
74 #if defined(UVM)
75 #include <uvm/uvm_extern.h>
76 #endif
77 
78 #include <sys/sysctl.h>
79 
80 #include <limits.h>
81 #include <db.h>
82 #include <paths.h>
83 
84 #include "kvm_private.h"
85 
86 #define KREAD(kd, addr, obj) \
87 	(kvm_read(kd, addr, (char *)(obj), sizeof(*obj)) != sizeof(*obj))
88 
89 char		*_kvm_uread __P((kvm_t *, const struct proc *, u_long, u_long *));
90 #if !defined(UVM)
91 int		_kvm_coreinit __P((kvm_t *));
92 int		_kvm_readfromcore __P((kvm_t *, u_long, u_long));
93 int		_kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long));
94 #endif
95 ssize_t		kvm_uread __P((kvm_t *, const struct proc *, u_long, char *,
96 		    size_t));
97 
98 static char	**kvm_argv __P((kvm_t *, const struct proc *, u_long, int,
99 		    int));
100 static int	kvm_deadprocs __P((kvm_t *, int, int, u_long, u_long, int));
101 static char	**kvm_doargv __P((kvm_t *, const struct kinfo_proc *, int,
102 		    void (*)(struct ps_strings *, u_long *, int *)));
103 static int	kvm_proclist __P((kvm_t *, int, int, struct proc *,
104 		    struct kinfo_proc *, int));
105 static int	proc_verify __P((kvm_t *, u_long, const struct proc *));
106 static void	ps_str_a __P((struct ps_strings *, u_long *, int *));
107 static void	ps_str_e __P((struct ps_strings *, u_long *, int *));
108 
109 char *
110 _kvm_uread(kd, p, va, cnt)
111 	kvm_t *kd;
112 	const struct proc *p;
113 	u_long va;
114 	u_long *cnt;
115 {
116 	u_long addr, head;
117 	u_long offset;
118 	struct vm_map_entry vme;
119 #if defined(UVM)
120 	struct vm_amap amap;
121 	struct vm_anon *anonp, anon;
122 	struct vm_page pg;
123 	int slot;
124 #else
125 	struct vm_object vmo;
126 	int rv;
127 #endif
128 
129 	if (kd->swapspc == 0) {
130 		kd->swapspc = (char *)_kvm_malloc(kd, kd->nbpg);
131 		if (kd->swapspc == 0)
132 			return (0);
133 	}
134 
135 	/*
136 	 * Look through the address map for the memory object
137 	 * that corresponds to the given virtual address.
138 	 * The header just has the entire valid range.
139 	 */
140 	head = (u_long)&p->p_vmspace->vm_map.header;
141 	addr = head;
142 	while (1) {
143 		if (KREAD(kd, addr, &vme))
144 			return (0);
145 
146 #if defined(UVM)
147 		if (va >= vme.start && va < vme.end &&
148 		    vme.aref.ar_amap != NULL)
149 			break;
150 
151 #else
152 		if (va >= vme.start && va < vme.end &&
153 		    vme.object.vm_object != 0)
154 			break;
155 #endif
156 
157 		addr = (u_long)vme.next;
158 		if (addr == head)
159 			return (0);
160 
161 	}
162 #if defined(UVM)
163 
164 	/*
165 	 * we found the map entry, now to find the object...
166 	 */
167 	if (vme.aref.ar_amap == NULL)
168 		return NULL;
169 
170 	addr = (u_long)vme.aref.ar_amap;
171 	if (KREAD(kd, addr, &amap))
172 		return NULL;
173 
174 	offset = va - vme.start;
175 	slot = offset / kd->nbpg + vme.aref.ar_slotoff;
176 	/* sanity-check slot number */
177 	if (slot  > amap.am_nslot)
178 		return NULL;
179 
180 	addr = (u_long)amap.am_anon + (offset / kd->nbpg) * sizeof(anonp);
181 	if (KREAD(kd, addr, &anonp))
182 		return NULL;
183 
184 	addr = (u_long)anonp;
185 	if (KREAD(kd, addr, &anon))
186 		return NULL;
187 
188 	addr = (u_long)anon.u.an_page;
189 	if (addr) {
190 		if (KREAD(kd, addr, &pg))
191 			return NULL;
192 
193 		if (lseek(kd->pmfd, (off_t)pg.phys_addr, SEEK_SET) == -1)
194 			return NULL;
195 
196 		if (read(kd->pmfd, kd->swapspc, kd->nbpg) != kd->nbpg)
197 			return NULL;
198 	}
199 	else {
200 		if (lseek(kd->swfd, anon.an_swslot * kd->nbpg, SEEK_SET) == -1)
201 			return NULL;
202 		if (read(kd->swfd, kd->swapspc, kd->nbpg) != kd->nbpg)
203 			return NULL;
204 	}
205 #else
206 	/*
207 	 * We found the right object -- follow shadow links.
208 	 */
209 	offset = va - vme.start + vme.offset;
210 	addr = (u_long)vme.object.vm_object;
211 
212 	while (1) {
213 		/* Try reading the page from core first. */
214 		if ((rv = _kvm_readfromcore(kd, addr, offset)))
215 			break;
216 
217 		if (KREAD(kd, addr, &vmo))
218 			return (0);
219 
220 		/* If there is a pager here, see if it has the page. */
221 		if (vmo.pager != 0 &&
222 		    (rv = _kvm_readfrompager(kd, &vmo, offset)))
223 			break;
224 
225 		/* Move down the shadow chain. */
226 		addr = (u_long)vmo.shadow;
227 		if (addr == 0)
228 			return (0);
229 		offset += vmo.shadow_offset;
230 	}
231 
232 	if (rv == -1)
233 		return (0);
234 #endif
235 
236 	/* Found the page. */
237 	offset %= kd->nbpg;
238 	*cnt = kd->nbpg - offset;
239 	return (&kd->swapspc[offset]);
240 }
241 
242 #if !defined(UVM)
243 
244 #define	vm_page_hash(kd, object, offset) \
245 	(((u_long)object + (u_long)(offset / kd->nbpg)) & kd->vm_page_hash_mask)
246 
247 int
248 _kvm_coreinit(kd)
249 	kvm_t *kd;
250 {
251 	struct nlist nlist[3];
252 
253 	nlist[0].n_name = "_vm_page_buckets";
254 	nlist[1].n_name = "_vm_page_hash_mask";
255 	nlist[2].n_name = 0;
256 	if (kvm_nlist(kd, nlist) != 0)
257 		return (-1);
258 
259 	if (KREAD(kd, nlist[0].n_value, &kd->vm_page_buckets) ||
260 	    KREAD(kd, nlist[1].n_value, &kd->vm_page_hash_mask))
261 		return (-1);
262 
263 	return (0);
264 }
265 
266 int
267 _kvm_readfromcore(kd, object, offset)
268 	kvm_t *kd;
269 	u_long object, offset;
270 {
271 	u_long addr;
272 	struct pglist bucket;
273 	struct vm_page mem;
274 	off_t seekpoint;
275 
276 	if (kd->vm_page_buckets == 0 &&
277 	    _kvm_coreinit(kd))
278 		return (-1);
279 
280 	addr = (u_long)&kd->vm_page_buckets[vm_page_hash(kd, object, offset)];
281 	if (KREAD(kd, addr, &bucket))
282 		return (-1);
283 
284 	addr = (u_long)bucket.tqh_first;
285 	offset &= ~(kd->nbpg -1);
286 	while (1) {
287 		if (addr == 0)
288 			return (0);
289 
290 		if (KREAD(kd, addr, &mem))
291 			return (-1);
292 
293 		if ((u_long)mem.object == object &&
294 		    (u_long)mem.offset == offset)
295 			break;
296 
297 		addr = (u_long)mem.hashq.tqe_next;
298 	}
299 
300 	seekpoint = mem.phys_addr;
301 
302 	if (lseek(kd->pmfd, seekpoint, 0) == -1)
303 		return (-1);
304 	if (read(kd->pmfd, kd->swapspc, kd->nbpg) != kd->nbpg)
305 		return (-1);
306 
307 	return (1);
308 }
309 
310 int
311 _kvm_readfrompager(kd, vmop, offset)
312 	kvm_t *kd;
313 	struct vm_object *vmop;
314 	u_long offset;
315 {
316 	u_long addr;
317 	struct pager_struct pager;
318 	struct swpager swap;
319 	int ix;
320 	struct swblock swb;
321 	off_t seekpoint;
322 
323 	/* Read in the pager info and make sure it's a swap device. */
324 	addr = (u_long)vmop->pager;
325 	if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
326 		return (-1);
327 
328 	/* Read in the swap_pager private data. */
329 	addr = (u_long)pager.pg_data;
330 	if (KREAD(kd, addr, &swap))
331 		return (-1);
332 
333 	/*
334 	 * Calculate the paging offset, and make sure it's within the
335 	 * bounds of the pager.
336 	 */
337 	offset += vmop->paging_offset;
338 	ix = offset / dbtob(swap.sw_bsize);
339 #if 0
340 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
341 		return (-1);
342 #else
343 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
344 		int i;
345 		printf("BUG BUG BUG BUG:\n");
346 		printf("object %p offset %lx pgoffset %lx ",
347 		    vmop, offset - vmop->paging_offset,
348 		    (u_long)vmop->paging_offset);
349 		printf("pager %p swpager %p\n",
350 		    vmop->pager, pager.pg_data);
351 		printf("osize %lx bsize %x blocks %p nblocks %x\n",
352 		    (u_long)swap.sw_osize, swap.sw_bsize, swap.sw_blocks,
353 		    swap.sw_nblocks);
354 		for (i = 0; i < swap.sw_nblocks; i++) {
355 			addr = (u_long)&swap.sw_blocks[i];
356 			if (KREAD(kd, addr, &swb))
357 				return (0);
358 			printf("sw_blocks[%d]: block %x mask %x\n", i,
359 			    swb.swb_block, swb.swb_mask);
360 		}
361 		return (-1);
362 	}
363 #endif
364 
365 	/* Read in the swap records. */
366 	addr = (u_long)&swap.sw_blocks[ix];
367 	if (KREAD(kd, addr, &swb))
368 		return (-1);
369 
370 	/* Calculate offset within pager. */
371 	offset %= dbtob(swap.sw_bsize);
372 
373 	/* Check that the page is actually present. */
374 	if ((swb.swb_mask & (1 << (offset / kd->nbpg))) == 0)
375 		return (0);
376 
377 	if (!ISALIVE(kd))
378 		return (-1);
379 
380 	/* Calculate the physical address and read the page. */
381 	seekpoint = dbtob(swb.swb_block) + (offset & ~(kd->nbpg -1));
382 
383 	if (lseek(kd->swfd, seekpoint, 0) == -1)
384 		return (-1);
385 	if (read(kd->swfd, kd->swapspc, kd->nbpg) != kd->nbpg)
386 		return (-1);
387 
388 	return (1);
389 }
390 #endif /* !defined(UVM) */
391 
392 /*
393  * Read proc's from memory file into buffer bp, which has space to hold
394  * at most maxcnt procs.
395  */
396 static int
397 kvm_proclist(kd, what, arg, p, bp, maxcnt)
398 	kvm_t *kd;
399 	int what, arg;
400 	struct proc *p;
401 	struct kinfo_proc *bp;
402 	int maxcnt;
403 {
404 	int cnt = 0;
405 	struct eproc eproc;
406 	struct pgrp pgrp;
407 	struct session sess;
408 	struct tty tty;
409 	struct proc proc;
410 
411 	for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
412 		if (KREAD(kd, (u_long)p, &proc)) {
413 			_kvm_err(kd, kd->program, "can't read proc at %x", p);
414 			return (-1);
415 		}
416 		if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
417 			(void)KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
418 			      &eproc.e_ucred);
419 
420 		switch(what) {
421 
422 		case KERN_PROC_PID:
423 			if (proc.p_pid != (pid_t)arg)
424 				continue;
425 			break;
426 
427 		case KERN_PROC_UID:
428 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
429 				continue;
430 			break;
431 
432 		case KERN_PROC_RUID:
433 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
434 				continue;
435 			break;
436 		}
437 		/*
438 		 * We're going to add another proc to the set.  If this
439 		 * will overflow the buffer, assume the reason is because
440 		 * nprocs (or the proc list) is corrupt and declare an error.
441 		 */
442 		if (cnt >= maxcnt) {
443 			_kvm_err(kd, kd->program, "nprocs corrupt");
444 			return (-1);
445 		}
446 		/*
447 		 * gather eproc
448 		 */
449 		eproc.e_paddr = p;
450 		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
451 			_kvm_err(kd, kd->program, "can't read pgrp at %x",
452 				 proc.p_pgrp);
453 			return (-1);
454 		}
455 		eproc.e_sess = pgrp.pg_session;
456 		eproc.e_pgid = pgrp.pg_id;
457 		eproc.e_jobc = pgrp.pg_jobc;
458 		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
459 			_kvm_err(kd, kd->program, "can't read session at %x",
460 				pgrp.pg_session);
461 			return (-1);
462 		}
463 		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
464 			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
465 				_kvm_err(kd, kd->program,
466 					 "can't read tty at %x", sess.s_ttyp);
467 				return (-1);
468 			}
469 			eproc.e_tdev = tty.t_dev;
470 			eproc.e_tsess = tty.t_session;
471 			if (tty.t_pgrp != NULL) {
472 				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
473 					_kvm_err(kd, kd->program,
474 						 "can't read tpgrp at &x",
475 						tty.t_pgrp);
476 					return (-1);
477 				}
478 				eproc.e_tpgid = pgrp.pg_id;
479 			} else
480 				eproc.e_tpgid = -1;
481 		} else
482 			eproc.e_tdev = NODEV;
483 		eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
484 		if (sess.s_leader == p)
485 			eproc.e_flag |= EPROC_SLEADER;
486 		if (proc.p_wmesg)
487 			(void)kvm_read(kd, (u_long)proc.p_wmesg,
488 			    eproc.e_wmesg, WMESGLEN);
489 
490 		(void)kvm_read(kd, (u_long)proc.p_vmspace,
491 		    (char *)&eproc.e_vm, sizeof(eproc.e_vm));
492 
493 		eproc.e_xsize = eproc.e_xrssize = 0;
494 		eproc.e_xccount = eproc.e_xswrss = 0;
495 
496 		switch (what) {
497 
498 		case KERN_PROC_PGRP:
499 			if (eproc.e_pgid != (pid_t)arg)
500 				continue;
501 			break;
502 
503 		case KERN_PROC_TTY:
504 			if ((proc.p_flag & P_CONTROLT) == 0 ||
505 			     eproc.e_tdev != (dev_t)arg)
506 				continue;
507 			break;
508 		}
509 		bcopy(&proc, &bp->kp_proc, sizeof(proc));
510 		bcopy(&eproc, &bp->kp_eproc, sizeof(eproc));
511 		++bp;
512 		++cnt;
513 	}
514 	return (cnt);
515 }
516 
517 /*
518  * Build proc info array by reading in proc list from a crash dump.
519  * Return number of procs read.  maxcnt is the max we will read.
520  */
521 static int
522 kvm_deadprocs(kd, what, arg, a_allproc, a_zombproc, maxcnt)
523 	kvm_t *kd;
524 	int what, arg;
525 	u_long a_allproc;
526 	u_long a_zombproc;
527 	int maxcnt;
528 {
529 	struct kinfo_proc *bp = kd->procbase;
530 	int acnt, zcnt;
531 	struct proc *p;
532 
533 	if (KREAD(kd, a_allproc, &p)) {
534 		_kvm_err(kd, kd->program, "cannot read allproc");
535 		return (-1);
536 	}
537 	acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
538 	if (acnt < 0)
539 		return (acnt);
540 
541 	if (KREAD(kd, a_zombproc, &p)) {
542 		_kvm_err(kd, kd->program, "cannot read zombproc");
543 		return (-1);
544 	}
545 	zcnt = kvm_proclist(kd, what, arg, p, bp + acnt, maxcnt - acnt);
546 	if (zcnt < 0)
547 		zcnt = 0;
548 
549 	return (acnt + zcnt);
550 }
551 
552 struct kinfo_proc *
553 kvm_getprocs(kd, op, arg, cnt)
554 	kvm_t *kd;
555 	int op, arg;
556 	int *cnt;
557 {
558 	size_t size;
559 	int mib[4], st, nprocs;
560 
561 	if (kd->procbase != 0) {
562 		free((void *)kd->procbase);
563 		/*
564 		 * Clear this pointer in case this call fails.  Otherwise,
565 		 * kvm_close() will free it again.
566 		 */
567 		kd->procbase = 0;
568 	}
569 	if (ISALIVE(kd)) {
570 		size = 0;
571 		mib[0] = CTL_KERN;
572 		mib[1] = KERN_PROC;
573 		mib[2] = op;
574 		mib[3] = arg;
575 		st = sysctl(mib, 4, NULL, &size, NULL, 0);
576 		if (st == -1) {
577 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
578 			return (0);
579 		}
580 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
581 		if (kd->procbase == 0)
582 			return (0);
583 		st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
584 		if (st == -1) {
585 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
586 			return (0);
587 		}
588 		if (size % sizeof(struct kinfo_proc) != 0) {
589 			_kvm_err(kd, kd->program,
590 				"proc size mismatch (%d total, %d chunks)",
591 				size, sizeof(struct kinfo_proc));
592 			return (0);
593 		}
594 		nprocs = size / sizeof(struct kinfo_proc);
595 	} else {
596 		struct nlist nl[4], *p;
597 
598 		nl[0].n_name = "_nprocs";
599 		nl[1].n_name = "_allproc";
600 		nl[2].n_name = "_zombproc";
601 		nl[3].n_name = 0;
602 
603 		if (kvm_nlist(kd, nl) != 0) {
604 			for (p = nl; p->n_type != 0; ++p)
605 				;
606 			_kvm_err(kd, kd->program,
607 				 "%s: no such symbol", p->n_name);
608 			return (0);
609 		}
610 		if (KREAD(kd, nl[0].n_value, &nprocs)) {
611 			_kvm_err(kd, kd->program, "can't read nprocs");
612 			return (0);
613 		}
614 		size = nprocs * sizeof(struct kinfo_proc);
615 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
616 		if (kd->procbase == 0)
617 			return (0);
618 
619 		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
620 				      nl[2].n_value, nprocs);
621 #ifdef notdef
622 		size = nprocs * sizeof(struct kinfo_proc);
623 		(void)realloc(kd->procbase, size);
624 #endif
625 	}
626 	*cnt = nprocs;
627 	return (kd->procbase);
628 }
629 
630 void
631 _kvm_freeprocs(kd)
632 	kvm_t *kd;
633 {
634 	if (kd->procbase) {
635 		free(kd->procbase);
636 		kd->procbase = 0;
637 	}
638 }
639 
640 void *
641 _kvm_realloc(kd, p, n)
642 	kvm_t *kd;
643 	void *p;
644 	size_t n;
645 {
646 	void *np = (void *)realloc(p, n);
647 
648 	if (np == 0)
649 		_kvm_err(kd, kd->program, "out of memory");
650 	return (np);
651 }
652 
653 #ifndef MAX
654 #define MAX(a, b) ((a) > (b) ? (a) : (b))
655 #endif
656 
657 /*
658  * Read in an argument vector from the user address space of process p.
659  * addr if the user-space base address of narg null-terminated contiguous
660  * strings.  This is used to read in both the command arguments and
661  * environment strings.  Read at most maxcnt characters of strings.
662  */
663 static char **
664 kvm_argv(kd, p, addr, narg, maxcnt)
665 	kvm_t *kd;
666 	const struct proc *p;
667 	u_long addr;
668 	int narg;
669 	int maxcnt;
670 {
671 	char *np, *cp, *ep, *ap;
672 	u_long oaddr = -1;
673 	int len, cc;
674 	char **argv;
675 
676 	/*
677 	 * Check that there aren't an unreasonable number of agruments,
678 	 * and that the address is in user space.
679 	 */
680 	if (narg > ARG_MAX || addr < kd->min_uva || addr >= kd->max_uva)
681 		return (0);
682 
683 	if (kd->argv == 0) {
684 		/*
685 		 * Try to avoid reallocs.
686 		 */
687 		kd->argc = MAX(narg + 1, 32);
688 		kd->argv = (char **)_kvm_malloc(kd, kd->argc *
689 						sizeof(*kd->argv));
690 		if (kd->argv == 0)
691 			return (0);
692 	} else if (narg + 1 > kd->argc) {
693 		kd->argc = MAX(2 * kd->argc, narg + 1);
694 		kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
695 						sizeof(*kd->argv));
696 		if (kd->argv == 0)
697 			return (0);
698 	}
699 	if (kd->argspc == 0) {
700 		kd->argspc = (char *)_kvm_malloc(kd, kd->nbpg);
701 		if (kd->argspc == 0)
702 			return (0);
703 		kd->arglen = kd->nbpg;
704 	}
705 	if (kd->argbuf == 0) {
706 		kd->argbuf = (char *)_kvm_malloc(kd, kd->nbpg);
707 		if (kd->argbuf == 0)
708 			return (0);
709 	}
710 	cc = sizeof(char *) * narg;
711 	if (kvm_uread(kd, p, addr, (char *)kd->argv, cc) != cc)
712 		return (0);
713 	ap = np = kd->argspc;
714 	argv = kd->argv;
715 	len = 0;
716 	/*
717 	 * Loop over pages, filling in the argument vector.
718 	 */
719 	while (argv < kd->argv + narg && *argv != 0) {
720 		addr = (u_long)*argv & ~(kd->nbpg - 1);
721 		if (addr != oaddr) {
722 			if (kvm_uread(kd, p, addr, kd->argbuf, kd->nbpg) !=
723 			    kd->nbpg)
724 				return (0);
725 			oaddr = addr;
726 		}
727 		addr = (u_long)*argv & (kd->nbpg - 1);
728 		cp = kd->argbuf + addr;
729 		cc = kd->nbpg - addr;
730 		if (maxcnt > 0 && cc > maxcnt - len)
731 			cc = maxcnt - len;;
732 		ep = memchr(cp, '\0', cc);
733 		if (ep != 0)
734 			cc = ep - cp + 1;
735 		if (len + cc > kd->arglen) {
736 			int off;
737 			char **pp;
738 			char *op = kd->argspc;
739 
740 			kd->arglen *= 2;
741 			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
742 							  kd->arglen);
743 			if (kd->argspc == 0)
744 				return (0);
745 			/*
746 			 * Adjust argv pointers in case realloc moved
747 			 * the string space.
748 			 */
749 			off = kd->argspc - op;
750 			for (pp = kd->argv; pp < argv; pp++)
751 				*pp += off;
752 			ap += off;
753 			np += off;
754 		}
755 		memcpy(np, cp, cc);
756 		np += cc;
757 		len += cc;
758 		if (ep != 0) {
759 			*argv++ = ap;
760 			ap = np;
761 		} else
762 			*argv += cc;
763 		if (maxcnt > 0 && len >= maxcnt) {
764 			/*
765 			 * We're stopping prematurely.  Terminate the
766 			 * current string.
767 			 */
768 			if (ep == 0) {
769 				*np = '\0';
770 				*argv++ = ap;
771 			}
772 			break;
773 		}
774 	}
775 	/* Make sure argv is terminated. */
776 	*argv = 0;
777 	return (kd->argv);
778 }
779 
780 static void
781 ps_str_a(p, addr, n)
782 	struct ps_strings *p;
783 	u_long *addr;
784 	int *n;
785 {
786 	*addr = (u_long)p->ps_argvstr;
787 	*n = p->ps_nargvstr;
788 }
789 
790 static void
791 ps_str_e(p, addr, n)
792 	struct ps_strings *p;
793 	u_long *addr;
794 	int *n;
795 {
796 	*addr = (u_long)p->ps_envstr;
797 	*n = p->ps_nenvstr;
798 }
799 
800 /*
801  * Determine if the proc indicated by p is still active.
802  * This test is not 100% foolproof in theory, but chances of
803  * being wrong are very low.
804  */
805 static int
806 proc_verify(kd, kernp, p)
807 	kvm_t *kd;
808 	u_long kernp;
809 	const struct proc *p;
810 {
811 	struct proc kernproc;
812 
813 	/*
814 	 * Just read in the whole proc.  It's not that big relative
815 	 * to the cost of the read system call.
816 	 */
817 	if (kvm_read(kd, kernp, (char *)&kernproc, sizeof(kernproc)) !=
818 	    sizeof(kernproc))
819 		return (0);
820 	return (p->p_pid == kernproc.p_pid &&
821 		(kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
822 }
823 
824 static char **
825 kvm_doargv(kd, kp, nchr, info)
826 	kvm_t *kd;
827 	const struct kinfo_proc *kp;
828 	int nchr;
829 	void (*info)(struct ps_strings *, u_long *, int *);
830 {
831 	const struct proc *p = &kp->kp_proc;
832 	char **ap;
833 	u_long addr;
834 	int cnt;
835 	struct ps_strings arginfo;
836 
837 	/*
838 	 * Pointers are stored at the top of the user stack.
839 	 */
840 	if (p->p_stat == SZOMB)
841 		return (0);
842 	cnt = kvm_uread(kd, p, kd->usrstack - sizeof(arginfo),
843 	    (char *)&arginfo, sizeof(arginfo));
844 	if (cnt != sizeof(arginfo))
845 		return (0);
846 
847 	(*info)(&arginfo, &addr, &cnt);
848 	if (cnt == 0)
849 		return (0);
850 	ap = kvm_argv(kd, p, addr, cnt, nchr);
851 	/*
852 	 * For live kernels, make sure this process didn't go away.
853 	 */
854 	if (ap != 0 && ISALIVE(kd) &&
855 	    !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
856 		ap = 0;
857 	return (ap);
858 }
859 
860 /*
861  * Get the command args.  This code is now machine independent.
862  */
863 char **
864 kvm_getargv(kd, kp, nchr)
865 	kvm_t *kd;
866 	const struct kinfo_proc *kp;
867 	int nchr;
868 {
869 	return (kvm_doargv(kd, kp, nchr, ps_str_a));
870 }
871 
872 char **
873 kvm_getenvv(kd, kp, nchr)
874 	kvm_t *kd;
875 	const struct kinfo_proc *kp;
876 	int nchr;
877 {
878 	return (kvm_doargv(kd, kp, nchr, ps_str_e));
879 }
880 
881 /*
882  * Read from user space.  The user context is given by p.
883  */
884 ssize_t
885 kvm_uread(kd, p, uva, buf, len)
886 	kvm_t *kd;
887 	const struct proc *p;
888 	u_long uva;
889 	char *buf;
890 	size_t len;
891 {
892 	char *cp;
893 
894 	cp = buf;
895 	while (len > 0) {
896 		int cc;
897 		char *dp;
898 		u_long cnt;
899 
900 		dp = _kvm_uread(kd, p, uva, &cnt);
901 		if (dp == 0) {
902 			_kvm_err(kd, 0, "invalid address (%x)", uva);
903 			return (0);
904 		}
905 		cc = MIN(cnt, len);
906 		bcopy(dp, cp, cc);
907 
908 		cp += cc;
909 		uva += cc;
910 		len -= cc;
911 	}
912 	return (ssize_t)(cp - buf);
913 }
914