xref: /netbsd-src/lib/libkvm/kvm_proc.c (revision dc306354b0b29af51801a7632f1e95265a68cd81)
1 /*	$NetBSD: kvm_proc.c,v 1.28 1998/09/27 18:16:00 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*-
40  * Copyright (c) 1989, 1992, 1993
41  *	The Regents of the University of California.  All rights reserved.
42  *
43  * This code is derived from software developed by the Computer Systems
44  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
45  * BG 91-66 and contributed to Berkeley.
46  *
47  * Redistribution and use in source and binary forms, with or without
48  * modification, are permitted provided that the following conditions
49  * are met:
50  * 1. Redistributions of source code must retain the above copyright
51  *    notice, this list of conditions and the following disclaimer.
52  * 2. Redistributions in binary form must reproduce the above copyright
53  *    notice, this list of conditions and the following disclaimer in the
54  *    documentation and/or other materials provided with the distribution.
55  * 3. All advertising materials mentioning features or use of this software
56  *    must display the following acknowledgement:
57  *	This product includes software developed by the University of
58  *	California, Berkeley and its contributors.
59  * 4. Neither the name of the University nor the names of its contributors
60  *    may be used to endorse or promote products derived from this software
61  *    without specific prior written permission.
62  *
63  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
64  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
65  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
66  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
67  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
68  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
69  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
70  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
71  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
72  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
73  * SUCH DAMAGE.
74  */
75 
76 #include <sys/cdefs.h>
77 #if defined(LIBC_SCCS) && !defined(lint)
78 #if 0
79 static char sccsid[] = "@(#)kvm_proc.c	8.3 (Berkeley) 9/23/93";
80 #else
81 __RCSID("$NetBSD: kvm_proc.c,v 1.28 1998/09/27 18:16:00 christos Exp $");
82 #endif
83 #endif /* LIBC_SCCS and not lint */
84 
85 /*
86  * Proc traversal interface for kvm.  ps and w are (probably) the exclusive
87  * users of this code, so we've factored it out into a separate module.
88  * Thus, we keep this grunge out of the other kvm applications (i.e.,
89  * most other applications are interested only in open/close/read/nlist).
90  */
91 
92 #include <sys/param.h>
93 #include <sys/user.h>
94 #include <sys/proc.h>
95 #include <sys/exec.h>
96 #include <sys/stat.h>
97 #include <sys/ioctl.h>
98 #include <sys/tty.h>
99 #include <stdlib.h>
100 #include <string.h>
101 #include <unistd.h>
102 #include <nlist.h>
103 #include <kvm.h>
104 
105 #include <vm/vm.h>
106 #include <vm/vm_param.h>
107 #include <vm/swap_pager.h>
108 
109 #if defined(UVM)
110 #include <uvm/uvm_extern.h>
111 #endif
112 
113 #include <sys/sysctl.h>
114 
115 #include <limits.h>
116 #include <db.h>
117 #include <paths.h>
118 
119 #include "kvm_private.h"
120 
121 #define KREAD(kd, addr, obj) \
122 	(kvm_read(kd, addr, (void *)(obj), sizeof(*obj)) != sizeof(*obj))
123 
124 char		*_kvm_uread __P((kvm_t *, const struct proc *, u_long, u_long *));
125 #if !defined(UVM)
126 int		_kvm_coreinit __P((kvm_t *));
127 int		_kvm_readfromcore __P((kvm_t *, u_long, u_long));
128 int		_kvm_readfrompager __P((kvm_t *, struct vm_object *, u_long));
129 #endif
130 ssize_t		kvm_uread __P((kvm_t *, const struct proc *, u_long, char *,
131 		    size_t));
132 
133 static char	**kvm_argv __P((kvm_t *, const struct proc *, u_long, int,
134 		    int));
135 static int	kvm_deadprocs __P((kvm_t *, int, int, u_long, u_long, u_long,
136 		    int));
137 static char	**kvm_doargv __P((kvm_t *, const struct kinfo_proc *, int,
138 		    void (*)(struct ps_strings *, u_long *, int *)));
139 static int	kvm_proclist __P((kvm_t *, int, int, struct proc *,
140 		    struct kinfo_proc *, int));
141 static int	proc_verify __P((kvm_t *, u_long, const struct proc *));
142 static void	ps_str_a __P((struct ps_strings *, u_long *, int *));
143 static void	ps_str_e __P((struct ps_strings *, u_long *, int *));
144 
145 char *
146 _kvm_uread(kd, p, va, cnt)
147 	kvm_t *kd;
148 	const struct proc *p;
149 	u_long va;
150 	u_long *cnt;
151 {
152 	int true = 1;
153 	u_long addr, head;
154 	u_long offset;
155 	struct vm_map_entry vme;
156 #if defined(UVM)
157 	struct vm_amap amap;
158 	struct vm_anon *anonp, anon;
159 	struct vm_page pg;
160 	u_long slot;
161 #else
162 	struct vm_object vmo;
163 	int rv;
164 #endif
165 
166 	if (kd->swapspc == 0) {
167 		kd->swapspc = (char *)_kvm_malloc(kd, (size_t)kd->nbpg);
168 		if (kd->swapspc == 0)
169 			return (0);
170 	}
171 
172 	/*
173 	 * Look through the address map for the memory object
174 	 * that corresponds to the given virtual address.
175 	 * The header just has the entire valid range.
176 	 */
177 	head = (u_long)&p->p_vmspace->vm_map.header;
178 	addr = head;
179 	while (true) {
180 		if (KREAD(kd, addr, &vme))
181 			return (0);
182 
183 #if defined(UVM)
184 		if (va >= vme.start && va < vme.end &&
185 		    vme.aref.ar_amap != NULL)
186 			break;
187 
188 #else
189 		if (va >= vme.start && va < vme.end &&
190 		    vme.object.vm_object != 0)
191 			break;
192 #endif
193 
194 		addr = (u_long)vme.next;
195 		if (addr == head)
196 			return (0);
197 
198 	}
199 #if defined(UVM)
200 
201 	/*
202 	 * we found the map entry, now to find the object...
203 	 */
204 	if (vme.aref.ar_amap == NULL)
205 		return NULL;
206 
207 	addr = (u_long)vme.aref.ar_amap;
208 	if (KREAD(kd, addr, &amap))
209 		return NULL;
210 
211 	offset = va - vme.start;
212 	slot = offset / kd->nbpg + vme.aref.ar_slotoff;
213 	/* sanity-check slot number */
214 	if (slot  > amap.am_nslot)
215 		return NULL;
216 
217 	addr = (u_long)amap.am_anon + (offset / kd->nbpg) * sizeof(anonp);
218 	if (KREAD(kd, addr, &anonp))
219 		return NULL;
220 
221 	addr = (u_long)anonp;
222 	if (KREAD(kd, addr, &anon))
223 		return NULL;
224 
225 	addr = (u_long)anon.u.an_page;
226 	if (addr) {
227 		if (KREAD(kd, addr, &pg))
228 			return NULL;
229 
230 		if (pread(kd->pmfd, (void *)kd->swapspc, (size_t)kd->nbpg,
231 		    (off_t)pg.phys_addr) != kd->nbpg)
232 			return NULL;
233 	}
234 	else {
235 		if (pread(kd->swfd, (void *)kd->swapspc, (size_t)kd->nbpg,
236 		    (off_t)(anon.an_swslot * kd->nbpg)) != kd->nbpg)
237 			return NULL;
238 	}
239 #else
240 	/*
241 	 * We found the right object -- follow shadow links.
242 	 */
243 	offset = va - vme.start + vme.offset;
244 	addr = (u_long)vme.object.vm_object;
245 
246 	while (1) {
247 		/* Try reading the page from core first. */
248 		if ((rv = _kvm_readfromcore(kd, addr, offset)))
249 			break;
250 
251 		if (KREAD(kd, addr, &vmo))
252 			return (0);
253 
254 		/* If there is a pager here, see if it has the page. */
255 		if (vmo.pager != 0 &&
256 		    (rv = _kvm_readfrompager(kd, &vmo, offset)))
257 			break;
258 
259 		/* Move down the shadow chain. */
260 		addr = (u_long)vmo.shadow;
261 		if (addr == 0)
262 			return (0);
263 		offset += vmo.shadow_offset;
264 	}
265 
266 	if (rv == -1)
267 		return (0);
268 #endif
269 
270 	/* Found the page. */
271 	offset %= kd->nbpg;
272 	*cnt = kd->nbpg - offset;
273 	return (&kd->swapspc[(size_t)offset]);
274 }
275 
276 #if !defined(UVM)
277 
278 #define	vm_page_hash(kd, object, offset) \
279 	(((u_long)object + (u_long)(offset / kd->nbpg)) & kd->vm_page_hash_mask)
280 
281 int
282 _kvm_coreinit(kd)
283 	kvm_t *kd;
284 {
285 	struct nlist nlist[3];
286 
287 	nlist[0].n_name = "_vm_page_buckets";
288 	nlist[1].n_name = "_vm_page_hash_mask";
289 	nlist[2].n_name = 0;
290 	if (kvm_nlist(kd, nlist) != 0)
291 		return (-1);
292 
293 	if (KREAD(kd, nlist[0].n_value, &kd->vm_page_buckets) ||
294 	    KREAD(kd, nlist[1].n_value, &kd->vm_page_hash_mask))
295 		return (-1);
296 
297 	return (0);
298 }
299 
300 int
301 _kvm_readfromcore(kd, object, offset)
302 	kvm_t *kd;
303 	u_long object, offset;
304 {
305 	u_long addr;
306 	struct pglist bucket;
307 	struct vm_page mem;
308 	off_t seekpoint;
309 
310 	if (kd->vm_page_buckets == 0 &&
311 	    _kvm_coreinit(kd))
312 		return (-1);
313 
314 	addr = (u_long)&kd->vm_page_buckets[vm_page_hash(kd, object, offset)];
315 	if (KREAD(kd, addr, &bucket))
316 		return (-1);
317 
318 	addr = (u_long)bucket.tqh_first;
319 	offset &= ~(kd->nbpg -1);
320 	while (1) {
321 		if (addr == 0)
322 			return (0);
323 
324 		if (KREAD(kd, addr, &mem))
325 			return (-1);
326 
327 		if ((u_long)mem.object == object &&
328 		    (u_long)mem.offset == offset)
329 			break;
330 
331 		addr = (u_long)mem.hashq.tqe_next;
332 	}
333 
334 	seekpoint = mem.phys_addr;
335 
336 	if (pread(kd->pmfd, kd->swapspc, kd->nbpg, seekpoint) != kd->nbpg)
337 		return (-1);
338 
339 	return (1);
340 }
341 
342 int
343 _kvm_readfrompager(kd, vmop, offset)
344 	kvm_t *kd;
345 	struct vm_object *vmop;
346 	u_long offset;
347 {
348 	u_long addr;
349 	struct pager_struct pager;
350 	struct swpager swap;
351 	int ix;
352 	struct swblock swb;
353 	off_t seekpoint;
354 
355 	/* Read in the pager info and make sure it's a swap device. */
356 	addr = (u_long)vmop->pager;
357 	if (KREAD(kd, addr, &pager) || pager.pg_type != PG_SWAP)
358 		return (-1);
359 
360 	/* Read in the swap_pager private data. */
361 	addr = (u_long)pager.pg_data;
362 	if (KREAD(kd, addr, &swap))
363 		return (-1);
364 
365 	/*
366 	 * Calculate the paging offset, and make sure it's within the
367 	 * bounds of the pager.
368 	 */
369 	offset += vmop->paging_offset;
370 	ix = offset / dbtob(swap.sw_bsize);
371 #if 0
372 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks)
373 		return (-1);
374 #else
375 	if (swap.sw_blocks == 0 || ix >= swap.sw_nblocks) {
376 		int i;
377 		printf("BUG BUG BUG BUG:\n");
378 		printf("object %p offset %lx pgoffset %lx ",
379 		    vmop, offset - vmop->paging_offset,
380 		    (u_long)vmop->paging_offset);
381 		printf("pager %p swpager %p\n",
382 		    vmop->pager, pager.pg_data);
383 		printf("osize %lx bsize %x blocks %p nblocks %x\n",
384 		    (u_long)swap.sw_osize, swap.sw_bsize, swap.sw_blocks,
385 		    swap.sw_nblocks);
386 		for (i = 0; i < swap.sw_nblocks; i++) {
387 			addr = (u_long)&swap.sw_blocks[i];
388 			if (KREAD(kd, addr, &swb))
389 				return (0);
390 			printf("sw_blocks[%d]: block %x mask %x\n", i,
391 			    swb.swb_block, swb.swb_mask);
392 		}
393 		return (-1);
394 	}
395 #endif
396 
397 	/* Read in the swap records. */
398 	addr = (u_long)&swap.sw_blocks[ix];
399 	if (KREAD(kd, addr, &swb))
400 		return (-1);
401 
402 	/* Calculate offset within pager. */
403 	offset %= dbtob(swap.sw_bsize);
404 
405 	/* Check that the page is actually present. */
406 	if ((swb.swb_mask & (1 << (offset / kd->nbpg))) == 0)
407 		return (0);
408 
409 	if (!ISALIVE(kd))
410 		return (-1);
411 
412 	/* Calculate the physical address and read the page. */
413 	seekpoint = dbtob(swb.swb_block) + (offset & ~(kd->nbpg -1));
414 
415 	if (pread(kd->swfd, kd->swapspc, kd->nbpg, seekpoint) != kd->nbpg)
416 		return (-1);
417 
418 	return (1);
419 }
420 #endif /* !defined(UVM) */
421 
422 /*
423  * Read proc's from memory file into buffer bp, which has space to hold
424  * at most maxcnt procs.
425  */
426 static int
427 kvm_proclist(kd, what, arg, p, bp, maxcnt)
428 	kvm_t *kd;
429 	int what, arg;
430 	struct proc *p;
431 	struct kinfo_proc *bp;
432 	int maxcnt;
433 {
434 	int cnt = 0;
435 	struct eproc eproc;
436 	struct pgrp pgrp;
437 	struct session sess;
438 	struct tty tty;
439 	struct proc proc;
440 
441 	for (; cnt < maxcnt && p != NULL; p = proc.p_list.le_next) {
442 		if (KREAD(kd, (u_long)p, &proc)) {
443 			_kvm_err(kd, kd->program, "can't read proc at %x", p);
444 			return (-1);
445 		}
446 		if (KREAD(kd, (u_long)proc.p_cred, &eproc.e_pcred) == 0)
447 			if (KREAD(kd, (u_long)eproc.e_pcred.pc_ucred,
448 			    &eproc.e_ucred)) {
449 				_kvm_err(kd, kd->program,
450 				    "can't read proc credentials at %x", p);
451 				return -1;
452 			}
453 
454 		switch(what) {
455 
456 		case KERN_PROC_PID:
457 			if (proc.p_pid != (pid_t)arg)
458 				continue;
459 			break;
460 
461 		case KERN_PROC_UID:
462 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
463 				continue;
464 			break;
465 
466 		case KERN_PROC_RUID:
467 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
468 				continue;
469 			break;
470 		}
471 		/*
472 		 * We're going to add another proc to the set.  If this
473 		 * will overflow the buffer, assume the reason is because
474 		 * nprocs (or the proc list) is corrupt and declare an error.
475 		 */
476 		if (cnt >= maxcnt) {
477 			_kvm_err(kd, kd->program, "nprocs corrupt");
478 			return (-1);
479 		}
480 		/*
481 		 * gather eproc
482 		 */
483 		eproc.e_paddr = p;
484 		if (KREAD(kd, (u_long)proc.p_pgrp, &pgrp)) {
485 			_kvm_err(kd, kd->program, "can't read pgrp at %x",
486 				 proc.p_pgrp);
487 			return (-1);
488 		}
489 		eproc.e_sess = pgrp.pg_session;
490 		eproc.e_pgid = pgrp.pg_id;
491 		eproc.e_jobc = pgrp.pg_jobc;
492 		if (KREAD(kd, (u_long)pgrp.pg_session, &sess)) {
493 			_kvm_err(kd, kd->program, "can't read session at %x",
494 				pgrp.pg_session);
495 			return (-1);
496 		}
497 		if ((proc.p_flag & P_CONTROLT) && sess.s_ttyp != NULL) {
498 			if (KREAD(kd, (u_long)sess.s_ttyp, &tty)) {
499 				_kvm_err(kd, kd->program,
500 					 "can't read tty at %x", sess.s_ttyp);
501 				return (-1);
502 			}
503 			eproc.e_tdev = tty.t_dev;
504 			eproc.e_tsess = tty.t_session;
505 			if (tty.t_pgrp != NULL) {
506 				if (KREAD(kd, (u_long)tty.t_pgrp, &pgrp)) {
507 					_kvm_err(kd, kd->program,
508 						 "can't read tpgrp at &x",
509 						tty.t_pgrp);
510 					return (-1);
511 				}
512 				eproc.e_tpgid = pgrp.pg_id;
513 			} else
514 				eproc.e_tpgid = -1;
515 		} else
516 			eproc.e_tdev = NODEV;
517 		eproc.e_flag = sess.s_ttyvp ? EPROC_CTTY : 0;
518 		if (sess.s_leader == p)
519 			eproc.e_flag |= EPROC_SLEADER;
520 		if (proc.p_wmesg)
521 			(void)kvm_read(kd, (u_long)proc.p_wmesg,
522 			    eproc.e_wmesg, WMESGLEN);
523 
524 		(void)kvm_read(kd, (u_long)proc.p_vmspace,
525 		    (void *)&eproc.e_vm, sizeof(eproc.e_vm));
526 
527 		eproc.e_xsize = eproc.e_xrssize = 0;
528 		eproc.e_xccount = eproc.e_xswrss = 0;
529 
530 		switch (what) {
531 
532 		case KERN_PROC_PGRP:
533 			if (eproc.e_pgid != (pid_t)arg)
534 				continue;
535 			break;
536 
537 		case KERN_PROC_TTY:
538 			if ((proc.p_flag & P_CONTROLT) == 0 ||
539 			     eproc.e_tdev != (dev_t)arg)
540 				continue;
541 			break;
542 		}
543 		memcpy(&bp->kp_proc, &proc, sizeof(proc));
544 		memcpy(&bp->kp_eproc, &eproc, sizeof(eproc));
545 		++bp;
546 		++cnt;
547 	}
548 	return (cnt);
549 }
550 
551 /*
552  * Build proc info array by reading in proc list from a crash dump.
553  * Return number of procs read.  maxcnt is the max we will read.
554  */
555 static int
556 kvm_deadprocs(kd, what, arg, a_allproc, a_deadproc, a_zombproc, maxcnt)
557 	kvm_t *kd;
558 	int what, arg;
559 	u_long a_allproc;
560 	u_long a_deadproc;
561 	u_long a_zombproc;
562 	int maxcnt;
563 {
564 	struct kinfo_proc *bp = kd->procbase;
565 	int acnt, dcnt, zcnt;
566 	struct proc *p;
567 
568 	if (KREAD(kd, a_allproc, &p)) {
569 		_kvm_err(kd, kd->program, "cannot read allproc");
570 		return (-1);
571 	}
572 	acnt = kvm_proclist(kd, what, arg, p, bp, maxcnt);
573 	if (acnt < 0)
574 		return (acnt);
575 
576 	if (KREAD(kd, a_deadproc, &p)) {
577 		_kvm_err(kd, kd->program, "cannot read deadproc");
578 		return (-1);
579 	}
580 
581 	dcnt = kvm_proclist(kd, what, arg, p, bp, maxcnt - acnt);
582 	if (dcnt < 0)
583 		dcnt = 0;
584 
585 	if (KREAD(kd, a_zombproc, &p)) {
586 		_kvm_err(kd, kd->program, "cannot read zombproc");
587 		return (-1);
588 	}
589 	zcnt = kvm_proclist(kd, what, arg, p, bp + acnt,
590 	    maxcnt - (acnt + dcnt));
591 	if (zcnt < 0)
592 		zcnt = 0;
593 
594 	return (acnt + zcnt);
595 }
596 
597 struct kinfo_proc *
598 kvm_getprocs(kd, op, arg, cnt)
599 	kvm_t *kd;
600 	int op, arg;
601 	int *cnt;
602 {
603 	size_t size;
604 	int mib[4], st, nprocs;
605 
606 	if (kd->procbase != 0) {
607 		free((void *)kd->procbase);
608 		/*
609 		 * Clear this pointer in case this call fails.  Otherwise,
610 		 * kvm_close() will free it again.
611 		 */
612 		kd->procbase = 0;
613 	}
614 	if (ISALIVE(kd)) {
615 		size = 0;
616 		mib[0] = CTL_KERN;
617 		mib[1] = KERN_PROC;
618 		mib[2] = op;
619 		mib[3] = arg;
620 		st = sysctl(mib, 4, NULL, &size, NULL, 0);
621 		if (st == -1) {
622 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
623 			return (0);
624 		}
625 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
626 		if (kd->procbase == 0)
627 			return (0);
628 		st = sysctl(mib, 4, kd->procbase, &size, NULL, 0);
629 		if (st == -1) {
630 			_kvm_syserr(kd, kd->program, "kvm_getprocs");
631 			return (0);
632 		}
633 		if (size % sizeof(struct kinfo_proc) != 0) {
634 			_kvm_err(kd, kd->program,
635 				"proc size mismatch (%d total, %d chunks)",
636 				size, sizeof(struct kinfo_proc));
637 			return (0);
638 		}
639 		nprocs = size / sizeof(struct kinfo_proc);
640 	} else {
641 		struct nlist nl[5], *p;
642 
643 		nl[0].n_name = "_nprocs";
644 		nl[1].n_name = "_allproc";
645 		nl[2].n_name = "_deadproc";
646 		nl[3].n_name = "_zombproc";
647 		nl[4].n_name = 0;
648 
649 		if (kvm_nlist(kd, nl) != 0) {
650 			for (p = nl; p->n_type != 0; ++p)
651 				;
652 			_kvm_err(kd, kd->program,
653 				 "%s: no such symbol", p->n_name);
654 			return (0);
655 		}
656 		if (KREAD(kd, nl[0].n_value, &nprocs)) {
657 			_kvm_err(kd, kd->program, "can't read nprocs");
658 			return (0);
659 		}
660 		size = nprocs * sizeof(struct kinfo_proc);
661 		kd->procbase = (struct kinfo_proc *)_kvm_malloc(kd, size);
662 		if (kd->procbase == 0)
663 			return (0);
664 
665 		nprocs = kvm_deadprocs(kd, op, arg, nl[1].n_value,
666 		    nl[2].n_value, nl[3].n_value, nprocs);
667 #ifdef notdef
668 		size = nprocs * sizeof(struct kinfo_proc);
669 		(void)realloc(kd->procbase, size);
670 #endif
671 	}
672 	*cnt = nprocs;
673 	return (kd->procbase);
674 }
675 
676 void
677 _kvm_freeprocs(kd)
678 	kvm_t *kd;
679 {
680 	if (kd->procbase) {
681 		free(kd->procbase);
682 		kd->procbase = 0;
683 	}
684 }
685 
686 void *
687 _kvm_realloc(kd, p, n)
688 	kvm_t *kd;
689 	void *p;
690 	size_t n;
691 {
692 	void *np = (void *)realloc(p, n);
693 
694 	if (np == 0)
695 		_kvm_err(kd, kd->program, "out of memory");
696 	return (np);
697 }
698 
699 #ifndef MAX
700 #define MAX(a, b) ((a) > (b) ? (a) : (b))
701 #endif
702 
703 /*
704  * Read in an argument vector from the user address space of process p.
705  * addr if the user-space base address of narg null-terminated contiguous
706  * strings.  This is used to read in both the command arguments and
707  * environment strings.  Read at most maxcnt characters of strings.
708  */
709 static char **
710 kvm_argv(kd, p, addr, narg, maxcnt)
711 	kvm_t *kd;
712 	const struct proc *p;
713 	u_long addr;
714 	int narg;
715 	int maxcnt;
716 {
717 	char *np, *cp, *ep, *ap;
718 	u_long oaddr = (u_long)~0L;
719 	u_long len;
720 	size_t cc;
721 	char **argv;
722 
723 	/*
724 	 * Check that there aren't an unreasonable number of agruments,
725 	 * and that the address is in user space.
726 	 */
727 	if (narg > ARG_MAX || addr < kd->min_uva || addr >= kd->max_uva)
728 		return (0);
729 
730 	if (kd->argv == 0) {
731 		/*
732 		 * Try to avoid reallocs.
733 		 */
734 		kd->argc = MAX(narg + 1, 32);
735 		kd->argv = (char **)_kvm_malloc(kd, kd->argc *
736 						sizeof(*kd->argv));
737 		if (kd->argv == 0)
738 			return (0);
739 	} else if (narg + 1 > kd->argc) {
740 		kd->argc = MAX(2 * kd->argc, narg + 1);
741 		kd->argv = (char **)_kvm_realloc(kd, kd->argv, kd->argc *
742 						sizeof(*kd->argv));
743 		if (kd->argv == 0)
744 			return (0);
745 	}
746 	if (kd->argspc == 0) {
747 		kd->argspc = (char *)_kvm_malloc(kd, (size_t)kd->nbpg);
748 		if (kd->argspc == 0)
749 			return (0);
750 		kd->arglen = kd->nbpg;
751 	}
752 	if (kd->argbuf == 0) {
753 		kd->argbuf = (char *)_kvm_malloc(kd, (size_t)kd->nbpg);
754 		if (kd->argbuf == 0)
755 			return (0);
756 	}
757 	cc = sizeof(char *) * narg;
758 	if (kvm_uread(kd, p, addr, (void *)kd->argv, cc) != cc)
759 		return (0);
760 	ap = np = kd->argspc;
761 	argv = kd->argv;
762 	len = 0;
763 	/*
764 	 * Loop over pages, filling in the argument vector.
765 	 */
766 	while (argv < kd->argv + narg && *argv != 0) {
767 		addr = (u_long)*argv & ~(kd->nbpg - 1);
768 		if (addr != oaddr) {
769 			if (kvm_uread(kd, p, addr, kd->argbuf,
770 			    (size_t)kd->nbpg) != kd->nbpg)
771 				return (0);
772 			oaddr = addr;
773 		}
774 		addr = (u_long)*argv & (kd->nbpg - 1);
775 		cp = kd->argbuf + (size_t)addr;
776 		cc = kd->nbpg - (size_t)addr;
777 		if (maxcnt > 0 && cc > (size_t)(maxcnt - len))
778 			cc = (size_t)(maxcnt - len);
779 		ep = memchr(cp, '\0', cc);
780 		if (ep != 0)
781 			cc = ep - cp + 1;
782 		if (len + cc > kd->arglen) {
783 			int off;
784 			char **pp;
785 			char *op = kd->argspc;
786 
787 			kd->arglen *= 2;
788 			kd->argspc = (char *)_kvm_realloc(kd, kd->argspc,
789 			    (size_t)kd->arglen);
790 			if (kd->argspc == 0)
791 				return (0);
792 			/*
793 			 * Adjust argv pointers in case realloc moved
794 			 * the string space.
795 			 */
796 			off = kd->argspc - op;
797 			for (pp = kd->argv; pp < argv; pp++)
798 				*pp += off;
799 			ap += off;
800 			np += off;
801 		}
802 		memcpy(np, cp, cc);
803 		np += cc;
804 		len += cc;
805 		if (ep != 0) {
806 			*argv++ = ap;
807 			ap = np;
808 		} else
809 			*argv += cc;
810 		if (maxcnt > 0 && len >= maxcnt) {
811 			/*
812 			 * We're stopping prematurely.  Terminate the
813 			 * current string.
814 			 */
815 			if (ep == 0) {
816 				*np = '\0';
817 				*argv++ = ap;
818 			}
819 			break;
820 		}
821 	}
822 	/* Make sure argv is terminated. */
823 	*argv = 0;
824 	return (kd->argv);
825 }
826 
827 static void
828 ps_str_a(p, addr, n)
829 	struct ps_strings *p;
830 	u_long *addr;
831 	int *n;
832 {
833 	*addr = (u_long)p->ps_argvstr;
834 	*n = p->ps_nargvstr;
835 }
836 
837 static void
838 ps_str_e(p, addr, n)
839 	struct ps_strings *p;
840 	u_long *addr;
841 	int *n;
842 {
843 	*addr = (u_long)p->ps_envstr;
844 	*n = p->ps_nenvstr;
845 }
846 
847 /*
848  * Determine if the proc indicated by p is still active.
849  * This test is not 100% foolproof in theory, but chances of
850  * being wrong are very low.
851  */
852 static int
853 proc_verify(kd, kernp, p)
854 	kvm_t *kd;
855 	u_long kernp;
856 	const struct proc *p;
857 {
858 	struct proc kernproc;
859 
860 	/*
861 	 * Just read in the whole proc.  It's not that big relative
862 	 * to the cost of the read system call.
863 	 */
864 	if (kvm_read(kd, kernp, (void *)&kernproc, sizeof(kernproc)) !=
865 	    sizeof(kernproc))
866 		return (0);
867 	return (p->p_pid == kernproc.p_pid &&
868 		(kernproc.p_stat != SZOMB || p->p_stat == SZOMB));
869 }
870 
871 static char **
872 kvm_doargv(kd, kp, nchr, info)
873 	kvm_t *kd;
874 	const struct kinfo_proc *kp;
875 	int nchr;
876 	void (*info)(struct ps_strings *, u_long *, int *);
877 {
878 	const struct proc *p = &kp->kp_proc;
879 	char **ap;
880 	u_long addr;
881 	int cnt;
882 	struct ps_strings arginfo;
883 
884 	/*
885 	 * Pointers are stored at the top of the user stack.
886 	 */
887 	if (p->p_stat == SZOMB)
888 		return (0);
889 	cnt = kvm_uread(kd, p, kd->usrstack - sizeof(arginfo),
890 	    (void *)&arginfo, sizeof(arginfo));
891 	if (cnt != sizeof(arginfo))
892 		return (0);
893 
894 	(*info)(&arginfo, &addr, &cnt);
895 	if (cnt == 0)
896 		return (0);
897 	ap = kvm_argv(kd, p, addr, cnt, nchr);
898 	/*
899 	 * For live kernels, make sure this process didn't go away.
900 	 */
901 	if (ap != 0 && ISALIVE(kd) &&
902 	    !proc_verify(kd, (u_long)kp->kp_eproc.e_paddr, p))
903 		ap = 0;
904 	return (ap);
905 }
906 
907 /*
908  * Get the command args.  This code is now machine independent.
909  */
910 char **
911 kvm_getargv(kd, kp, nchr)
912 	kvm_t *kd;
913 	const struct kinfo_proc *kp;
914 	int nchr;
915 {
916 	return (kvm_doargv(kd, kp, nchr, ps_str_a));
917 }
918 
919 char **
920 kvm_getenvv(kd, kp, nchr)
921 	kvm_t *kd;
922 	const struct kinfo_proc *kp;
923 	int nchr;
924 {
925 	return (kvm_doargv(kd, kp, nchr, ps_str_e));
926 }
927 
928 /*
929  * Read from user space.  The user context is given by p.
930  */
931 ssize_t
932 kvm_uread(kd, p, uva, buf, len)
933 	kvm_t *kd;
934 	const struct proc *p;
935 	u_long uva;
936 	char *buf;
937 	size_t len;
938 {
939 	char *cp;
940 
941 	cp = buf;
942 	while (len > 0) {
943 		size_t cc;
944 		char *dp;
945 		u_long cnt;
946 
947 		dp = _kvm_uread(kd, p, uva, &cnt);
948 		if (dp == 0) {
949 			_kvm_err(kd, 0, "invalid address (%x)", uva);
950 			return (0);
951 		}
952 		cc = (size_t)MIN(cnt, len);
953 		memcpy(cp, dp, cc);
954 		cp += cc;
955 		uva += cc;
956 		len -= cc;
957 	}
958 	return (ssize_t)(cp - buf);
959 }
960