xref: /netbsd-src/lib/libkvm/kvm.c (revision cda4f8f6ee55684e8d311b86c99ea59191e6b74f)
1 /*-
2  * Copyright (c) 1993 Christopher G. Demetriou
3  * Copyright (c) 1989 The Regents of the University of California.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by the University of
17  *	California, Berkeley and its contributors.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #if defined(LIBC_SCCS) && !defined(lint)
36 /* from: static char sccsid[] = "@(#)kvm.c	5.18 (Berkeley) 5/7/91"; */
37 static char rcsid[] = "$Id: kvm.c,v 1.11 1993/07/19 12:37:13 mycroft Exp $";
38 #endif /* LIBC_SCCS and not lint */
39 
40 #include <sys/param.h>
41 #include <sys/user.h>
42 #include <sys/proc.h>
43 #include <sys/ioctl.h>
44 #include <sys/kinfo.h>
45 #include <sys/tty.h>
46 #include <sys/exec.h>
47 #include <machine/vmparam.h>
48 #include <fcntl.h>
49 #include <nlist.h>
50 #include <kvm.h>
51 #include <ndbm.h>
52 #include <limits.h>
53 #include <paths.h>
54 #include <stdio.h>
55 #include <string.h>
56 
57 #ifdef SPPWAIT
58 #define NEWVM
59 #endif
60 
61 #ifdef NEWVM
62 #define	btop(x)		(((unsigned)(x)) >> PGSHIFT)	/* XXX */
63 #define	ptob(x)		((caddr_t)((x) << PGSHIFT))	/* XXX */
64 #include <vm/vm.h>	/* ??? kinfo_proc currently includes this*/
65 #include <vm/vm_page.h>
66 #include <vm/swap_pager.h>
67 #include <sys/kinfo_proc.h>
68 #ifdef hp300
69 #include <hp300/hp300/pte.h>
70 #endif
71 #else /* NEWVM */
72 #include <machine/pte.h>
73 #include <sys/vmmac.h>
74 #include <sys/text.h>
75 #endif /* NEWVM */
76 
77 /*
78  * files
79  */
80 static	const char *unixf, *memf, *kmemf, *swapf;
81 static	int unixx, mem, kmem, swap;
82 static	DBM *db;
83 /*
84  * flags
85  */
86 static	int deadkernel;
87 static	int kvminit = 0;
88 static	int kvmfilesopen = 0;
89 /*
90  * state
91  */
92 static	struct kinfo_proc *kvmprocbase, *kvmprocptr;
93 static	int kvmnprocs;
94 /*
95  * u. buffer
96  */
97 static union {
98 	struct	user user;
99 	char	upages[UPAGES][NBPG];
100 } user;
101 
102 #ifdef NEWVM
103 struct swapblk {
104 	long	offset;		/* offset in swap device */
105 	long	size;		/* remaining size of block in swap device */
106 };
107 #endif
108 /*
109  * random other stuff
110  */
111 #ifndef NEWVM
112 static	struct pte *Usrptmap, *usrpt;
113 static	struct	pte *Sysmap;
114 static	int	Syssize;
115 #endif
116 static	int	dmmin, dmmax;
117 static	int	pcbpf;
118 static	int	nswap;
119 static	char	*tmp;
120 #if defined(hp300)
121 static	int	lowram;
122 static	struct ste *Sysseg;
123 #endif
124 #if defined(i386)
125 static	struct pde *PTD;
126 #endif
127 
128 #define basename(cp)	((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
129 #define	MAXSYMSIZE	256
130 
131 #if defined(hp300)
132 #define pftoc(f)	((f) - lowram)
133 #define iskva(v)	(1)
134 #endif
135 
136 #ifndef pftoc
137 #define pftoc(f)	(f)
138 #endif
139 #ifndef iskva
140 #define iskva(v)	((u_long)(v) & KERNBASE)
141 #endif
142 
143 static struct nlist nl[] = {
144 	{ "_Usrptmap" },
145 #define	X_USRPTMAP	0
146 	{ "_usrpt" },
147 #define	X_USRPT		1
148 	{ "_nswap" },
149 #define	X_NSWAP		2
150 	{ "_dmmin" },
151 #define	X_DMMIN		3
152 	{ "_dmmax" },
153 #define	X_DMMAX		4
154 	{ "_vm_page_buckets" },
155 #define X_VM_PAGE_BUCKETS	5
156 	{ "_vm_page_hash_mask" },
157 #define X_VM_PAGE_HASH_MASK	6
158 	{ "_page_shift" },
159 #define X_PAGE_SHIFT	7
160 	/*
161 	 * everything here and down, only if a dead kernel
162 	 */
163 	{ "_Sysmap" },
164 #define	X_SYSMAP	8
165 #define	X_DEADKERNEL	X_SYSMAP
166 	{ "_Syssize" },
167 #define	X_SYSSIZE	9
168 	{ "_allproc" },
169 #define X_ALLPROC	10
170 	{ "_zombproc" },
171 #define X_ZOMBPROC	11
172 	{ "_nproc" },
173 #define	X_NPROC		12
174 #define	X_LAST		12
175 #if defined(hp300)
176 	{ "_Sysseg" },
177 #define	X_SYSSEG	(X_LAST+1)
178 	{ "_lowram" },
179 #define	X_LOWRAM	(X_LAST+2)
180 #endif
181 #if defined(i386)
182 	{ "_IdlePTD" },
183 #define	X_IdlePTD	(X_LAST+1)
184 #endif
185 	{ "" },
186 };
187 
188 static off_t Vtophys();
189 static void klseek(), seterr(), setsyserr(), vstodb();
190 static int getkvars(), kvm_doprocs(), kvm_init();
191 #ifdef NEWVM
192 static int vatosw();
193 static int findpage();
194 #endif
195 
196 /*
197  * returns 	0 if files were opened now,
198  * 		1 if files were already opened,
199  *		-1 if files could not be opened.
200  */
201 kvm_openfiles(uf, mf, sf)
202 	const char *uf, *mf, *sf;
203 {
204 	if (kvmfilesopen)
205 		return (1);
206 	unixx = mem = kmem = swap = -1;
207 	unixf = (uf == NULL) ? _PATH_UNIX : uf;
208 	memf = (mf == NULL) ? _PATH_MEM : mf;
209 
210 	if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
211 		setsyserr("can't open %s", unixf);
212 		goto failed;
213 	}
214 	if ((mem = open(memf, O_RDONLY, 0)) == -1) {
215 		setsyserr("can't open %s", memf);
216 		goto failed;
217 	}
218 	if (sf != NULL)
219 		swapf = sf;
220 	if (mf != NULL) {
221 		deadkernel++;
222 		kmemf = mf;
223 		kmem = mem;
224 		swap = -1;
225 	} else {
226 		kmemf = _PATH_KMEM;
227 		if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
228 			setsyserr("can't open %s", kmemf);
229 			goto failed;
230 		}
231 		swapf = (sf == NULL) ?  _PATH_DRUM : sf;
232 		/*
233 		 * live kernel - avoid looking up nlist entries
234 		 * past X_DEADKERNEL.
235 		 */
236 		nl[X_DEADKERNEL].n_name = "";
237 	}
238 	if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
239 		seterr("can't open %s", swapf);
240 		goto failed;
241 	}
242 	kvmfilesopen++;
243 	if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
244 		return (-1);
245 	return (0);
246 failed:
247 	kvm_close();
248 	return (-1);
249 }
250 
251 static
252 kvm_init(uf, mf, sf)
253 	char *uf, *mf, *sf;
254 {
255 	if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
256 		return (-1);
257 	if (getkvars() == -1)
258 		return (-1);
259 	kvminit = 1;
260 
261 	return (0);
262 }
263 
264 kvm_close()
265 {
266 	if (unixx != -1) {
267 		close(unixx);
268 		unixx = -1;
269 	}
270 	if (kmem != -1) {
271 		if (kmem != mem)
272 			close(kmem);
273 		/* otherwise kmem is a copy of mem, and will be closed below */
274 		kmem = -1;
275 	}
276 	if (mem != -1) {
277 		close(mem);
278 		mem = -1;
279 	}
280 	if (swap != -1) {
281 		close(swap);
282 		swap = -1;
283 	}
284 	if (db != NULL) {
285 		dbm_close(db);
286 		db = NULL;
287 	}
288 	kvminit = 0;
289 	kvmfilesopen = 0;
290 	deadkernel = 0;
291 #ifndef NEWVM
292 	if (Sysmap) {
293 		free(Sysmap);
294 		Sysmap = NULL;
295 	}
296 #endif
297 }
298 
299 kvm_nlist(nl)
300 	struct nlist *nl;
301 {
302 	datum key, data;
303 	char dbname[MAXPATHLEN];
304 	char dbversion[_POSIX2_LINE_MAX];
305 	char kversion[_POSIX2_LINE_MAX];
306 	int dbversionlen;
307 	char symbuf[MAXSYMSIZE];
308 	struct nlist nbuf, *n;
309 	int num, did;
310 
311 	if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
312 		return (-1);
313 	if (deadkernel)
314 		goto hard2;
315 	/*
316 	 * initialize key datum
317 	 */
318 	key.dptr = symbuf;
319 
320 	if (db != NULL)
321 		goto win;	/* off to the races */
322 	/*
323 	 * open database
324 	 */
325 	sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
326 	if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
327 		goto hard2;
328 	/*
329 	 * read version out of database
330 	 */
331 	bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
332 	key.dsize = (sizeof ("VERSION") - 1);
333 	data = dbm_fetch(db, key);
334 	if (data.dptr == NULL)
335 		goto hard1;
336 	bcopy(data.dptr, dbversion, data.dsize);
337 	dbversionlen = data.dsize;
338 	/*
339 	 * read version string from kernel memory
340 	 */
341 	bcopy("_version", symbuf, sizeof ("_version")-1);
342 	key.dsize = (sizeof ("_version")-1);
343 	data = dbm_fetch(db, key);
344 	if (data.dptr == NULL)
345 		goto hard1;
346 	if (data.dsize != sizeof (struct nlist))
347 		goto hard1;
348 	bcopy(data.dptr, &nbuf, sizeof (struct nlist));
349 	lseek(kmem, nbuf.n_value, 0);
350 	if (read(kmem, kversion, dbversionlen) != dbversionlen)
351 		goto hard1;
352 	/*
353 	 * if they match, we win - otherwise do it the hard way
354 	 */
355 	if (bcmp(dbversion, kversion, dbversionlen) != 0)
356 		goto hard1;
357 	/*
358 	 * getem from the database.
359 	 */
360 win:
361 	num = did = 0;
362 	for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
363 		int len;
364 		/*
365 		 * clear out fields from users buffer
366 		 */
367 		n->n_type = 0;
368 		n->n_other = 0;
369 		n->n_desc = 0;
370 		n->n_value = 0;
371 		/*
372 		 * query db
373 		 */
374 		if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
375 			seterr("symbol too large");
376 			return (-1);
377 		}
378 		(void)strcpy(symbuf, n->n_name);
379 		key.dsize = len;
380 		data = dbm_fetch(db, key);
381 		if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
382 			continue;
383 		bcopy(data.dptr, &nbuf, sizeof (struct nlist));
384 		n->n_value = nbuf.n_value;
385 		n->n_type = nbuf.n_type;
386 		n->n_desc = nbuf.n_desc;
387 		n->n_other = nbuf.n_other;
388 		did++;
389 	}
390 	return (num - did);
391 hard1:
392 	dbm_close(db);
393 	db = NULL;
394 hard2:
395 	num = nlist(unixf, nl);
396 	if (num == -1)
397 		seterr("nlist (hard way) failed");
398 	return (num);
399 }
400 
401 kvm_getprocs(what, arg)
402 	int what, arg;
403 {
404 	static int	ocopysize = -1;
405 
406 	if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
407 		return (NULL);
408 	if (!deadkernel) {
409 		int ret, copysize;
410 
411 		if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
412 			setsyserr("can't get estimate for kerninfo");
413 			return (-1);
414 		}
415 		copysize = ret;
416 		if (copysize > ocopysize || !kvmprocbase) {
417 			if (ocopysize == -1 || !kvmprocbase)
418 				kvmprocbase =
419 					(struct kinfo_proc *)malloc(copysize);
420 			else
421 				kvmprocbase =
422 					(struct kinfo_proc *)realloc(kvmprocbase,
423 								copysize);
424 			if (!kvmprocbase) {
425 				seterr("out of memory");
426 				return (-1);
427 			}
428 		}
429 		ocopysize = copysize;
430 		if ((ret = getkerninfo(what, kvmprocbase, &copysize,
431 		     arg)) == -1) {
432 			setsyserr("can't get proc list");
433 			return (-1);
434 		}
435 		if (copysize % sizeof (struct kinfo_proc)) {
436 			seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
437 				copysize, sizeof (struct kinfo_proc));
438 			return (-1);
439 		}
440 		kvmnprocs = copysize / sizeof (struct kinfo_proc);
441 	} else {
442 		int nproc;
443 
444 		if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
445 		    sizeof (int)) != sizeof (int)) {
446 			seterr("can't read nproc");
447 			return (-1);
448 		}
449 		if ((kvmprocbase = (struct kinfo_proc *)
450 		     malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
451 			seterr("out of memory (addr: %x nproc = %d)",
452 				nl[X_NPROC].n_value, nproc);
453 			return (-1);
454 		}
455 		kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
456 		realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
457 	}
458 	kvmprocptr = kvmprocbase;
459 
460 	return (kvmnprocs);
461 }
462 
463 /*
464  * XXX - should NOT give up so easily - especially since the kernel
465  * may be corrupt (it died).  Should gather as much information as possible.
466  * Follows proc ptrs instead of reading table since table may go
467  * away soon.
468  */
469 static
470 kvm_doprocs(what, arg, buff)
471 	int what, arg;
472 	char *buff;
473 {
474 	struct proc *p, proc;
475 	register char *bp = buff;
476 	int i = 0;
477 	int doingzomb = 0;
478 	struct eproc eproc;
479 	struct pgrp pgrp;
480 	struct session sess;
481 	struct tty tty;
482 #ifndef NEWVM
483 	struct text text;
484 #endif
485 
486 	/* allproc */
487 	if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
488 	    sizeof (struct proc *)) != sizeof (struct proc *)) {
489 		seterr("can't read allproc");
490 		return (-1);
491 	}
492 
493 again:
494 	for (; p; p = proc.p_nxt) {
495 		if (kvm_read(p, &proc, sizeof (struct proc)) !=
496 		    sizeof (struct proc)) {
497 			seterr("can't read proc at %x", p);
498 			return (-1);
499 		}
500 #ifdef NEWVM
501 		if (kvm_read(proc.p_cred, &eproc.e_pcred,
502 		    sizeof (struct pcred)) == sizeof (struct pcred))
503 			(void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
504 			    sizeof (struct ucred));
505 		switch(ki_op(what)) {
506 
507 		case KINFO_PROC_PID:
508 			if (proc.p_pid != (pid_t)arg)
509 				continue;
510 			break;
511 
512 
513 		case KINFO_PROC_UID:
514 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
515 				continue;
516 			break;
517 
518 		case KINFO_PROC_RUID:
519 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
520 				continue;
521 			break;
522 		}
523 #else
524 		switch(ki_op(what)) {
525 
526 		case KINFO_PROC_PID:
527 			if (proc.p_pid != (pid_t)arg)
528 				continue;
529 			break;
530 
531 
532 		case KINFO_PROC_UID:
533 			if (proc.p_uid != (uid_t)arg)
534 				continue;
535 			break;
536 
537 		case KINFO_PROC_RUID:
538 			if (proc.p_ruid != (uid_t)arg)
539 				continue;
540 			break;
541 		}
542 #endif
543 		/*
544 		 * gather eproc
545 		 */
546 		eproc.e_paddr = p;
547 		if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
548 	            sizeof (struct pgrp)) {
549 			seterr("can't read pgrp at %x", proc.p_pgrp);
550 			return (-1);
551 		}
552 		eproc.e_sess = pgrp.pg_session;
553 		eproc.e_pgid = pgrp.pg_id;
554 		eproc.e_jobc = pgrp.pg_jobc;
555 		if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
556 		   != sizeof (struct session)) {
557 			seterr("can't read session at %x", pgrp.pg_session);
558 			return (-1);
559 		}
560 		if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
561 			if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
562 			    != sizeof (struct tty)) {
563 				seterr("can't read tty at %x", sess.s_ttyp);
564 				return (-1);
565 			}
566 			eproc.e_tdev = tty.t_dev;
567 			eproc.e_tsess = tty.t_session;
568 			if (tty.t_pgrp != NULL) {
569 				if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
570 				    pgrp)) != sizeof (struct pgrp)) {
571 					seterr("can't read tpgrp at &x",
572 						tty.t_pgrp);
573 					return (-1);
574 				}
575 				eproc.e_tpgid = pgrp.pg_id;
576 			} else
577 				eproc.e_tpgid = -1;
578 		} else
579 			eproc.e_tdev = NODEV;
580 		if (proc.p_wmesg)
581 			kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
582 #ifdef NEWVM
583 		(void) kvm_read(proc.p_vmspace, &eproc.e_vm,
584 		    sizeof (struct vmspace));
585 		eproc.e_xsize = eproc.e_xrssize =
586 			eproc.e_xccount = eproc.e_xswrss = 0;
587 #else
588 		if (proc.p_textp) {
589 			kvm_read(proc.p_textp, &text, sizeof (text));
590 			eproc.e_xsize = text.x_size;
591 			eproc.e_xrssize = text.x_rssize;
592 			eproc.e_xccount = text.x_ccount;
593 			eproc.e_xswrss = text.x_swrss;
594 		} else {
595 			eproc.e_xsize = eproc.e_xrssize =
596 			  eproc.e_xccount = eproc.e_xswrss = 0;
597 		}
598 #endif
599 
600 		switch(ki_op(what)) {
601 
602 		case KINFO_PROC_PGRP:
603 			if (eproc.e_pgid != (pid_t)arg)
604 				continue;
605 			break;
606 
607 		case KINFO_PROC_TTY:
608 			if ((proc.p_flag&SCTTY) == 0 ||
609 			     eproc.e_tdev != (dev_t)arg)
610 				continue;
611 			break;
612 		}
613 
614 		i++;
615 		bcopy(&proc, bp, sizeof (struct proc));
616 		bp += sizeof (struct proc);
617 		bcopy(&eproc, bp, sizeof (struct eproc));
618 		bp+= sizeof (struct eproc);
619 	}
620 	if (!doingzomb) {
621 		/* zombproc */
622 		if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
623 		    sizeof (struct proc *)) != sizeof (struct proc *)) {
624 			seterr("can't read zombproc");
625 			return (-1);
626 		}
627 		doingzomb = 1;
628 		goto again;
629 	}
630 
631 	return (i);
632 }
633 
634 struct proc *
635 kvm_nextproc()
636 {
637 
638 	if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
639 		return (NULL);
640 	if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
641 		seterr("end of proc list");
642 		return (NULL);
643 	}
644 	return((struct proc *)(kvmprocptr++));
645 }
646 
647 struct eproc *
648 kvm_geteproc(p)
649 	const struct proc *p;
650 {
651 	return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
652 }
653 
654 kvm_setproc()
655 {
656 	kvmprocptr = kvmprocbase;
657 }
658 
659 kvm_freeprocs()
660 {
661 
662 	if (kvmprocbase) {
663 		free(kvmprocbase);
664 		kvmprocbase = NULL;
665 	}
666 }
667 
668 #ifdef NEWVM
669 struct user *
670 kvm_getu(p)
671 	const struct proc *p;
672 {
673 	register struct kinfo_proc *kp = (struct kinfo_proc *)p;
674 	register int i;
675 	register char *up;
676 	u_int vaddr;
677 	struct swapblk swb;
678 
679 	if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
680 		return (NULL);
681 	if (p->p_stat == SZOMB) {
682 		seterr("zombie process");
683 		return (NULL);
684 	}
685 
686 	if ((p->p_flag & SLOAD) == 0) {
687 		vm_offset_t	maddr;
688 
689 		if (swap < 0) {
690 			seterr("no swap");
691 			return (NULL);
692 		}
693 		/*
694 		 * Costly operation, better set enable_swap to zero
695 		 * in vm/vm_glue.c, since paging of user pages isn't
696 		 * done yet anyway.
697 		 */
698 		if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
699 			return NULL;
700 
701 		if (maddr == 0 && swb.size < UPAGES * NBPG)
702 			return NULL;
703 
704 		for (i = 0; i < UPAGES; i++) {
705 			if (maddr) {
706 				(void) lseek(mem, maddr + i * NBPG, 0);
707 				if (read(mem,
708 				    (char *)user.upages[i], NBPG) != NBPG) {
709 					seterr(
710 					    "can't read u for pid %d from %s",
711 					    p->p_pid, swapf);
712 					return NULL;
713 				}
714 			} else {
715 				(void) lseek(swap, swb.offset + i * NBPG, 0);
716 				if (read(swap,
717 				    (char *)user.upages[i], NBPG) != NBPG) {
718 					seterr(
719 					    "can't read u for pid %d from %s",
720 					    p->p_pid, swapf);
721 					return NULL;
722 				}
723 			}
724 		}
725 		return(&user.user);
726 	}
727 	/*
728 	 * Read u-area one page at a time for the benefit of post-mortems
729 	 */
730 	up = (char *) p->p_addr;
731 	for (i = 0; i < UPAGES; i++) {
732 		klseek(kmem, (long)up, 0);
733 		if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
734 			seterr("cant read page %x of u of pid %d from %s",
735 			    up, p->p_pid, kmemf);
736 			return(NULL);
737 		}
738 		up += CLBYTES;
739 	}
740 	pcbpf = (int) btop(p->p_addr);	/* what should this be really? */
741 
742 	kp->kp_eproc.e_vm.vm_rssize =
743 	    kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
744 	return(&user.user);
745 }
746 #else
747 struct user *
748 kvm_getu(p)
749 	const struct proc *p;
750 {
751 	struct pte *pteaddr, apte;
752 	struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
753 	register int i;
754 	int ncl;
755 
756 	if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
757 		return (NULL);
758 	if (p->p_stat == SZOMB) {
759 		seterr("zombie process");
760 		return (NULL);
761 	}
762 	if ((p->p_flag & SLOAD) == 0) {
763 		if (swap < 0) {
764 			seterr("no swap");
765 			return (NULL);
766 		}
767 		(void) lseek(swap, (long)dtob(p->p_swaddr), 0);
768 		if (read(swap, (char *)&user.user, sizeof (struct user)) !=
769 		    sizeof (struct user)) {
770 			seterr("can't read u for pid %d from %s",
771 			    p->p_pid, swapf);
772 			return (NULL);
773 		}
774 		pcbpf = 0;
775 		argaddr0 = 0;
776 		argaddr1 = 0;
777 		return (&user.user);
778 	}
779 	pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
780 	klseek(kmem, (long)pteaddr, 0);
781 	if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
782 		seterr("can't read indir pte to get u for pid %d from %s",
783 		    p->p_pid, kmemf);
784 		return (NULL);
785 	}
786 	lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
787 	if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
788 		seterr("can't read page table for u of pid %d from %s",
789 		    p->p_pid, memf);
790 		return (NULL);
791 	}
792 	if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
793 		argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
794 	else
795 		argaddr0 = 0;
796 	if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
797 		argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
798 	else
799 		argaddr1 = 0;
800 	pcbpf = arguutl[CLSIZE*2].pg_pfnum;
801 	ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
802 	while (--ncl >= 0) {
803 		i = ncl * CLSIZE;
804 		lseek(mem,
805 		      (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
806 		if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
807 			seterr("can't read page %d of u of pid %d from %s",
808 			    arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
809 			return(NULL);
810 		}
811 	}
812 	return (&user.user);
813 }
814 #endif
815 
816 int
817 kvm_procread(p, addr, buf, len)
818 	const struct proc *p;
819 	const unsigned addr, len;
820 	char *buf;
821 {
822 	register struct kinfo_proc *kp = (struct kinfo_proc *) p;
823 	struct swapblk swb;
824 	vm_offset_t swaddr = 0, memaddr = 0;
825 	unsigned real_len;
826 
827 	real_len = len < (CLBYTES - (addr & CLOFSET)) ? len : (CLBYTES - (addr & CLOFSET));
828 
829 #if defined(hp300)
830 	/*
831 	 * XXX DANGER WILL ROBINSON -- i have *no* idea to what extent this
832 	 * works... -- cgd
833 	 */
834 	BREAK HERE!!!
835 #endif
836 #if defined(i386)
837         if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
838                 struct pde pde;
839 
840                 klseek(kmem,
841                 (long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(addr)]), 0);
842 
843                 if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
844                                 && pde.pd_v) {
845 
846                         struct pte pte;
847 
848                         if (lseek(mem, (long)ctob(pde.pd_pfnum) +
849                                         (ptei(addr) * sizeof pte), 0) == -1)
850                                 seterr("kvm_procread: lseek");
851                         if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
852                                 if (pte.pg_v) {
853                                         memaddr = (long)ctob(pte.pg_pfnum) +
854 							(addr % (1 << PGSHIFT));
855                                 }
856                         } else {
857                                 seterr("kvm_procread: read");
858                         }
859                 }
860         }
861 #endif  /* i386 */
862 
863         if (memaddr == 0 && vatosw(p, addr & ~CLOFSET, &memaddr, &swb)) {
864 		if (memaddr != 0) {
865 			memaddr += addr & CLOFSET;
866 		} else {
867 			swaddr = swb.offset + (addr & CLOFSET);
868 			swb.size -= addr & CLOFSET;
869 		}
870         }
871 
872 	if (memaddr) {
873 		if (lseek(mem, memaddr, 0) == -1)
874 			seterr("kvm_getu: lseek");
875 		real_len = read(mem, buf, real_len);
876 		if (real_len == -1) {
877 			seterr("kvm_procread: read");
878 			return 0;
879 		}
880 	} else if (swaddr) {
881 		char bouncebuf[CLBYTES];
882 		unsigned len;
883 		if (lseek(swap, swaddr & ~CLOFSET, 0) == -1) {
884 			seterr("kvm_procread: lseek");
885 			return 0;
886 		}
887 		len = read(swap, bouncebuf, CLBYTES);
888 		if (len == -1 || len <= (swaddr & CLOFSET)) {
889 			seterr("kvm_procread: read");
890 			return 0;
891 		}
892 		len = MIN(len - (swaddr & CLOFSET), real_len);
893 		memcpy(buf, &bouncebuf[swaddr & CLOFSET], len);
894 		return len;
895 	} else
896 		real_len = 0;
897 
898 	return real_len;
899 }
900 
901 int
902 kvm_procreadstr(p, addr, buf, len)
903         const struct proc *p;
904         const unsigned addr;
905 	char *buf;
906 	unsigned len;
907 {
908 	int	done, little;
909 	char	copy[200], *pb;
910 	char	a;
911 
912 	done = 0;
913 	while (len) {
914 		little = kvm_procread(p, addr+done, copy, MIN(len, sizeof copy));
915 		if (little<1)
916 			break;
917 		pb = copy;
918 		while (little--) {
919 			len--;
920 			if( (*buf++ = *pb++) == '\0' )
921 			return done;
922 		done++;
923 		}
924 	}
925 	return done;
926 }
927 
928 char *
929 kvm_getargs(p, up)
930 	const struct proc *p;
931 	const struct user *up;
932 {
933 	static char cmdbuf[ARG_MAX + sizeof(p->p_comm) + 5];
934 	register char *cp, *acp;
935 	int left, rv;
936 	struct ps_strings arginfo;
937 
938 	if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
939 		goto retucomm;
940 
941 	if (kvm_procread(p, PS_STRINGS, (char *)&arginfo, sizeof(arginfo)) !=
942 		sizeof(arginfo))
943 		goto bad;
944 
945 	cp = cmdbuf;
946 	acp = arginfo.ps_argvstr;
947 	left = ARG_MAX + 1;
948 	while (arginfo.ps_nargvstr--) {
949 		if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
950 			acp += rv + 1;
951 			left -= rv + 1;
952 			cp += rv;
953 			*cp++ = ' ';
954 			*cp = '\0';
955 		} else
956 			goto bad;
957 	}
958 	cp-- ; *cp = '\0';
959 
960 	if (cmdbuf[0] == '-' || cmdbuf[0] == '?' || cmdbuf[0] <= ' ') {
961 		(void) strcat(cmdbuf, " (");
962 		(void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
963 		(void) strcat(cmdbuf, ")");
964 	}
965 	return (cmdbuf);
966 
967 bad:
968 	seterr("error locating command name for pid %d", p->p_pid);
969 retucomm:
970 	(void) strcpy(cmdbuf, "(");
971 	(void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
972 	(void) strcat(cmdbuf, ")");
973 	return (cmdbuf);
974 }
975 
976 char *
977 kvm_getenv(p, up)
978 	const struct proc *p;
979 	const struct user *up;
980 {
981 	static char envbuf[ARG_MAX + 1];
982 	register char *cp, *acp;
983 	int left, rv;
984 	struct ps_strings arginfo;
985 
986 	if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
987 		goto retemptyenv;
988 
989 	if (kvm_procread(p, PS_STRINGS, (char *)&arginfo, sizeof(arginfo)) !=
990 		sizeof(arginfo))
991 		goto bad;
992 
993 	cp = envbuf;
994 	acp = arginfo.ps_envstr;
995 	left = ARG_MAX + 1;
996 	while (arginfo.ps_nenvstr--) {
997 		if ((rv = kvm_procreadstr(p, acp, cp, left)) >= 0) {
998 			acp += rv + 1;
999 			left -= rv + 1;
1000 			cp += rv;
1001 			*cp++ = ' ';
1002 			*cp = '\0';
1003 		} else
1004 			goto bad;
1005 	}
1006 	cp-- ; *cp = '\0';
1007 	return (envbuf);
1008 
1009 bad:
1010 	seterr("error locating environment for pid %d", p->p_pid);
1011 retemptyenv:
1012 	envbuf[0] = '\0';
1013 	return (envbuf);
1014 }
1015 
1016 static
1017 getkvars()
1018 {
1019 	if (kvm_nlist(nl) == -1)
1020 		return (-1);
1021 	if (deadkernel) {
1022 		/* We must do the sys map first because klseek uses it */
1023 		long	addr;
1024 
1025 #ifndef NEWVM
1026 		Syssize = nl[X_SYSSIZE].n_value;
1027 		Sysmap = (struct pte *)
1028 			calloc((unsigned) Syssize, sizeof (struct pte));
1029 		if (Sysmap == NULL) {
1030 			seterr("out of space for Sysmap");
1031 			return (-1);
1032 		}
1033 		addr = (long) nl[X_SYSMAP].n_value;
1034 		addr &= ~KERNBASE;
1035 		(void) lseek(kmem, addr, 0);
1036 		if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1037 		    != Syssize * sizeof (struct pte)) {
1038 			seterr("can't read Sysmap");
1039 			return (-1);
1040 		}
1041 #endif
1042 #if defined(hp300)
1043 		addr = (long) nl[X_LOWRAM].n_value;
1044 		(void) lseek(kmem, addr, 0);
1045 		if (read(kmem, (char *) &lowram, sizeof (lowram))
1046 		    != sizeof (lowram)) {
1047 			seterr("can't read lowram");
1048 			return (-1);
1049 		}
1050 		lowram = btop(lowram);
1051 		Sysseg = (struct ste *) malloc(NBPG);
1052 		if (Sysseg == NULL) {
1053 			seterr("out of space for Sysseg");
1054 			return (-1);
1055 		}
1056 		addr = (long) nl[X_SYSSEG].n_value;
1057 		(void) lseek(kmem, addr, 0);
1058 		read(kmem, (char *)&addr, sizeof(addr));
1059 		(void) lseek(kmem, (long)addr, 0);
1060 		if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1061 			seterr("can't read Sysseg");
1062 			return (-1);
1063 		}
1064 #endif
1065 #if defined(i386)
1066 		PTD = (struct pde *) malloc(NBPG);
1067 		if (PTD == NULL) {
1068 			seterr("out of space for PTD");
1069 			return (-1);
1070 		}
1071 		addr = (long) nl[X_IdlePTD].n_value;
1072 		(void) lseek(kmem, addr, 0);
1073 		read(kmem, (char *)&addr, sizeof(addr));
1074 		(void) lseek(kmem, (long)addr, 0);
1075 		if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1076 			seterr("can't read PTD");
1077 			return (-1);
1078 		}
1079 #endif
1080 	}
1081 #ifndef NEWVM
1082 	usrpt = (struct pte *)nl[X_USRPT].n_value;
1083 	Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1084 #endif
1085 	if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1086 	    sizeof (long)) {
1087 		seterr("can't read nswap");
1088 		return (-1);
1089 	}
1090 	if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1091 	    sizeof (long)) {
1092 		seterr("can't read dmmin");
1093 		return (-1);
1094 	}
1095 	if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1096 	    sizeof (long)) {
1097 		seterr("can't read dmmax");
1098 		return (-1);
1099 	}
1100 	return (0);
1101 }
1102 
1103 kvm_read(loc, buf, len)
1104 	void *loc;
1105 	void *buf;
1106 {
1107 	if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1108 		return (-1);
1109 	if (iskva(loc)) {
1110 		klseek(kmem, (off_t) loc, 0);
1111 		if (read(kmem, buf, len) != len) {
1112 			seterr("error reading kmem at %x", loc);
1113 			return (-1);
1114 		}
1115 	} else {
1116 		lseek(mem, (off_t) loc, 0);
1117 		if (read(mem, buf, len) != len) {
1118 			seterr("error reading mem at %x", loc);
1119 			return (-1);
1120 		}
1121 	}
1122 	return (len);
1123 }
1124 
1125 static void
1126 klseek(fd, loc, off)
1127 	int fd;
1128 	off_t loc;
1129 	int off;
1130 {
1131 
1132 	if (deadkernel) {
1133 		if ((loc = Vtophys(loc)) == -1)
1134 			return;
1135 	}
1136 	(void) lseek(fd, (off_t)loc, off);
1137 }
1138 
1139 #ifndef NEWVM
1140 /*
1141  * Given a base/size pair in virtual swap area,
1142  * return a physical base/size pair which is the
1143  * (largest) initial, physically contiguous block.
1144  */
1145 static void
1146 vstodb(vsbase, vssize, dmp, dbp, rev)
1147 	register int vsbase;
1148 	int vssize;
1149 	struct dmap *dmp;
1150 	register struct dblock *dbp;
1151 {
1152 	register int blk = dmmin;
1153 	register swblk_t *ip = dmp->dm_map;
1154 
1155 	vsbase = ctod(vsbase);
1156 	vssize = ctod(vssize);
1157 	if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1158 		/*panic("vstodb")*/;
1159 	while (vsbase >= blk) {
1160 		vsbase -= blk;
1161 		if (blk < dmmax)
1162 			blk *= 2;
1163 		ip++;
1164 	}
1165 	if (*ip <= 0 || *ip + blk > nswap)
1166 		/*panic("vstodb")*/;
1167 	dbp->db_size = MIN(vssize, blk - vsbase);
1168 	dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1169 }
1170 #endif
1171 
1172 #ifdef NEWVM
1173 static off_t
1174 Vtophys(loc)
1175 	u_long	loc;
1176 {
1177 	off_t newloc = (off_t) -1;
1178 #ifdef hp300
1179 	int p, ste, pte;
1180 
1181 	ste = *(int *)&Sysseg[loc >> SG_ISHIFT];
1182 	if ((ste & SG_V) == 0) {
1183 		seterr("vtophys: segment not valid");
1184 		return((off_t) -1);
1185 	}
1186 	p = btop(loc & SG_PMASK);
1187 	newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1188 	(void) lseek(kmem, (long)(newloc-(off_t)ptob(lowram)), 0);
1189 	if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1190 		seterr("vtophys: cannot locate pte");
1191 		return((off_t) -1);
1192 	}
1193 	newloc = pte & PG_FRAME;
1194 	if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1195 		seterr("vtophys: page not valid");
1196 		return((off_t) -1);
1197 	}
1198 	newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1199 #endif
1200 #ifdef i386
1201 	struct pde pde;
1202 	struct pte pte;
1203 	int p;
1204 
1205 	pde = PTD[loc >> PD_SHIFT];
1206 	if (pde.pd_v == 0) {
1207 		seterr("vtophys: page directory entry not valid");
1208 		return((off_t) -1);
1209 	}
1210 	p = btop(loc & PT_MASK);
1211 	newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1212 	(void) lseek(kmem, (long)newloc, 0);
1213 	if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1214 		seterr("vtophys: cannot obtain desired pte");
1215 		return((off_t) -1);
1216 	}
1217 	newloc = pte.pg_pfnum;
1218 	if (pte.pg_v == 0) {
1219 		seterr("vtophys: page table entry not valid");
1220 		return((off_t) -1);
1221 	}
1222 	newloc += (loc & PGOFSET);
1223 #endif
1224 	return((off_t) newloc);
1225 }
1226 #else
1227 static off_t
1228 vtophys(loc)
1229 	long loc;
1230 {
1231 	int p;
1232 	off_t newloc;
1233 	register struct pte *pte;
1234 
1235 	newloc = loc & ~KERNBASE;
1236 	p = btop(newloc);
1237 #if defined(vax) || defined(tahoe)
1238 	if ((loc & KERNBASE) == 0) {
1239 		seterr("vtophys: translating non-kernel address");
1240 		return((off_t) -1);
1241 	}
1242 #endif
1243 	if (p >= Syssize) {
1244 		seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1245 		return((off_t) -1);
1246 	}
1247 	pte = &Sysmap[p];
1248 	if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1249 		seterr("vtophys: page not valid");
1250 		return((off_t) -1);
1251 	}
1252 #if defined(hp300)
1253 	if (pte->pg_pfnum < lowram) {
1254 		seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1255 		return((off_t) -1);
1256 	}
1257 #endif
1258 	loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1259 	return(loc);
1260 }
1261 #endif
1262 
1263 
1264 #ifdef NEWVM
1265 /*
1266  * locate address of unwired or swapped page
1267  */
1268 
1269 #define DEBUG 0
1270 
1271 #define KREAD(off, addr, len) \
1272 	(kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1273 
1274 
1275 static int
1276 vatosw(p, vaddr, maddr, swb)
1277 struct proc	*p ;
1278 vm_offset_t	vaddr;
1279 vm_offset_t	*maddr;
1280 struct swapblk	*swb;
1281 {
1282 	register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1283 	vm_map_t		mp = &kp->kp_eproc.e_vm.vm_map;
1284 	struct vm_object	vm_object;
1285 	struct vm_map_entry	vm_entry;
1286 	struct pager_struct	pager;
1287 	struct swpager		swpager;
1288 	struct swblock		swblock;
1289 	long			addr, off;
1290 	int			i;
1291 
1292 	if (p->p_pid == 0 || p->p_pid == 2)
1293 		return 0;
1294 
1295 	addr = (long)mp->header.next;
1296 	for (i = 0; i < mp->nentries; i++) {
1297 		/* Weed through map entries until vaddr in range */
1298 		if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1299 			setsyserr("vatosw: read vm_map_entry");
1300 			return 0;
1301 		}
1302 		if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1303 				(vm_entry.object.vm_object != 0))
1304 			break;
1305 
1306 		addr = (long)vm_entry.next;
1307 	}
1308 	if (i == mp->nentries) {
1309 		seterr("%u: map not found\n", p->p_pid);
1310 		return 0;
1311 	}
1312 
1313 	if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1314 		seterr("%u: Is a map\n", p->p_pid);
1315 		return 0;
1316 	}
1317 
1318 	/* Locate memory object */
1319 	off = (vaddr - vm_entry.start) + vm_entry.offset;
1320 	addr = (long)vm_entry.object.vm_object;
1321 	while (1) {
1322 		if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1323 			setsyserr("vatosw: read vm_object");
1324 			return 0;
1325 		}
1326 
1327 #if DEBUG
1328 		fprintf(stderr, "%u: find page: object %#x offset %x\n",
1329 				p->p_pid, addr, off);
1330 #endif
1331 
1332 		/* Lookup in page queue */
1333 		if (findpage(addr, off, maddr))
1334 			return 1;
1335 
1336 		if (vm_object.shadow == 0)
1337 			break;
1338 
1339 #if DEBUG
1340 		fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1341 				p->p_pid, addr, off, vm_object.shadow_offset);
1342 #endif
1343 
1344 		addr = (long)vm_object.shadow;
1345 		off += vm_object.shadow_offset;
1346 	}
1347 
1348 	if (!vm_object.pager) {
1349 		seterr("%u: no pager\n", p->p_pid);
1350 		return 0;
1351 	}
1352 
1353 	/* Find address in swap space */
1354 	if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1355 		setsyserr("vatosw: read pager");
1356 		return 0;
1357 	}
1358 	if (pager.pg_type != PG_SWAP) {
1359 		seterr("%u: weird pager\n", p->p_pid);
1360 		return 0;
1361 	}
1362 
1363 	/* Get swap pager data */
1364 	if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1365 		setsyserr("vatosw: read swpager");
1366 		return 0;
1367 	}
1368 
1369 	off += vm_object.paging_offset;
1370 
1371 	/* Read swap block array */
1372 	if (!KREAD((long)swpager.sw_blocks +
1373 			(off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1374 			&swblock, sizeof swblock)) {
1375 		setsyserr("vatosw: read swblock");
1376 		return 0;
1377 	}
1378 	swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1379 	swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1380 	return 1;
1381 }
1382 
1383 
1384 #define atop(x)		(((unsigned)(x)) >> page_shift)
1385 #define vm_page_hash(object, offset) \
1386         (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1387 
1388 static int
1389 findpage(object, offset, maddr)
1390 long			object;
1391 long			offset;
1392 vm_offset_t		*maddr;
1393 {
1394 static	long		vm_page_hash_mask;
1395 static	long		vm_page_buckets;
1396 static	long		page_shift;
1397 	queue_head_t	bucket;
1398 	struct vm_page	mem;
1399 	long		addr, baddr;
1400 
1401 	if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1402 			&vm_page_hash_mask, sizeof (long))) {
1403 		seterr("can't read vm_page_hash_mask");
1404 		return 0;
1405 	}
1406 	if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1407 			&page_shift, sizeof (long))) {
1408 		seterr("can't read page_shift");
1409 		return 0;
1410 	}
1411 	if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1412 			&vm_page_buckets, sizeof (long))) {
1413 		seterr("can't read vm_page_buckets");
1414 		return 0;
1415 	}
1416 
1417 	baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1418 	if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1419 		seterr("can't read vm_page_bucket");
1420 		return 0;
1421 	}
1422 
1423 	addr = (long)bucket.next;
1424 	while (addr != baddr) {
1425 		if (!KREAD(addr, &mem, sizeof (mem))) {
1426 			seterr("can't read vm_page");
1427 			return 0;
1428 		}
1429 		if ((long)mem.object == object && mem.offset == offset) {
1430 			*maddr = (long)mem.phys_addr;
1431 			return 1;
1432 		}
1433 		addr = (long)mem.hashq.next;
1434 	}
1435 	return 0;
1436 }
1437 #endif	/* NEWVM */
1438 
1439 #include <varargs.h>
1440 static char errbuf[_POSIX2_LINE_MAX];
1441 
1442 static void
1443 seterr(va_alist)
1444 	va_dcl
1445 {
1446 	char *fmt;
1447 	va_list ap;
1448 
1449 	va_start(ap);
1450 	fmt = va_arg(ap, char *);
1451 	(void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1452 #if DEBUG
1453 	(void) vfprintf(stderr, fmt, ap);
1454 #endif
1455 	va_end(ap);
1456 }
1457 
1458 static void
1459 setsyserr(va_alist)
1460 	va_dcl
1461 {
1462 	char *fmt, *cp;
1463 	va_list ap;
1464 	extern int errno;
1465 
1466 	va_start(ap);
1467 	fmt = va_arg(ap, char *);
1468 	(void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1469 	for (cp=errbuf; *cp; cp++)
1470 		;
1471 	snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1472 	va_end(ap);
1473 }
1474 
1475 char *
1476 kvm_geterr()
1477 {
1478 	return (errbuf);
1479 }
1480