xref: /netbsd-src/lib/libkvm/kvm.c (revision 811e6386f8c5e4a3521c7003da29ec8673e344fa)
1 /*-
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * PATCHES MAGIC                LEVEL   PATCH THAT GOT US HERE
34  * --------------------         -----   ----------------------
35  * CURRENT PATCH LEVEL:         2       00052
36  * --------------------         -----   ----------------------
37  *
38  * 08 Sep 92	Greenman & Kranenburg	Change vaddr calc, move bogus #endif
39  * 05 Aug 92    David Greenman          Fix kernel namelist db create/use
40  */
41 
42 #if defined(LIBC_SCCS) && !defined(lint)
43 static char sccsid[] = "@(#)kvm.c	5.18 (Berkeley) 5/7/91";
44 #endif /* LIBC_SCCS and not lint */
45 
46 /*
47  *  Updated for 386BSD 0.1 by David Greenman (davidg%implode@percy.rain.com)
48  *     and Paul Kranenburg (pk@cs.few.eur.nl)
49  *  20-Aug-1992
50  */
51 
52 
53 #include <sys/param.h>
54 #include <sys/user.h>
55 #include <sys/proc.h>
56 #include <sys/ioctl.h>
57 #include <sys/kinfo.h>
58 #include <sys/tty.h>
59 #include <machine/vmparam.h>
60 #include <fcntl.h>
61 #include <nlist.h>
62 #include <kvm.h>
63 #include <ndbm.h>
64 #include <limits.h>
65 #include <paths.h>
66 #include <stdio.h>
67 #include <string.h>
68 
69 #ifdef SPPWAIT
70 #define NEWVM
71 #endif
72 
73 #ifdef NEWVM
74 #define	btop(x)		(((unsigned)(x)) >> PGSHIFT)	/* XXX */
75 #define	ptob(x)		((caddr_t)((x) << PGSHIFT))	/* XXX */
76 #include <vm/vm.h>	/* ??? kinfo_proc currently includes this*/
77 #include <vm/vm_page.h>
78 #include <vm/swap_pager.h>
79 #include <sys/kinfo_proc.h>
80 #ifdef hp300
81 #include <hp300/hp300/pte.h>
82 #endif
83 #else /* NEWVM */
84 #include <machine/pte.h>
85 #include <sys/vmmac.h>
86 #include <sys/text.h>
87 #endif /* NEWVM */
88 
89 /*
90  * files
91  */
92 static	const char *unixf, *memf, *kmemf, *swapf;
93 static	int unixx, mem, kmem, swap;
94 static	DBM *db;
95 /*
96  * flags
97  */
98 static	int deadkernel;
99 static	int kvminit = 0;
100 static	int kvmfilesopen = 0;
101 /*
102  * state
103  */
104 static	struct kinfo_proc *kvmprocbase, *kvmprocptr;
105 static	int kvmnprocs;
106 /*
107  * u. buffer
108  */
109 static union {
110 	struct	user user;
111 	char	upages[UPAGES][NBPG];
112 } user;
113 
114 #ifdef NEWVM
115 struct swapblk {
116 	long	offset;		/* offset in swap device */
117 	long	size;		/* remaining size of block in swap device */
118 };
119 #endif
120 /*
121  * random other stuff
122  */
123 #ifndef NEWVM
124 static	struct pte *Usrptmap, *usrpt;
125 static	struct	pte *Sysmap;
126 static	int	Syssize;
127 #endif
128 static	int	dmmin, dmmax;
129 static	int	pcbpf;
130 static	int	argaddr0;	/* XXX */
131 static	int	argaddr1;
132 static	int	swaddr;
133 static	int	nswap;
134 static	char	*tmp;
135 #if defined(hp300)
136 static	int	lowram;
137 static	struct ste *Sysseg;
138 #endif
139 #if defined(i386)
140 static	struct pde *PTD;
141 #endif
142 
143 #define basename(cp)	((tmp=rindex((cp), '/')) ? tmp+1 : (cp))
144 #define	MAXSYMSIZE	256
145 
146 #if defined(hp300)
147 #define pftoc(f)	((f) - lowram)
148 #define iskva(v)	(1)
149 #endif
150 
151 #ifndef pftoc
152 #define pftoc(f)	(f)
153 #endif
154 #ifndef iskva
155 #define iskva(v)	((u_long)(v) & KERNBASE)
156 #endif
157 
158 static struct nlist nl[] = {
159 	{ "_Usrptmap" },
160 #define	X_USRPTMAP	0
161 	{ "_usrpt" },
162 #define	X_USRPT		1
163 	{ "_nswap" },
164 #define	X_NSWAP		2
165 	{ "_dmmin" },
166 #define	X_DMMIN		3
167 	{ "_dmmax" },
168 #define	X_DMMAX		4
169 	{ "_vm_page_buckets" },
170 #define X_VM_PAGE_BUCKETS	5
171 	{ "_vm_page_hash_mask" },
172 #define X_VM_PAGE_HASH_MASK	6
173 	{ "_page_shift" },
174 #define X_PAGE_SHIFT	7
175 	/*
176 	 * everything here and down, only if a dead kernel
177 	 */
178 	{ "_Sysmap" },
179 #define	X_SYSMAP	8
180 #define	X_DEADKERNEL	X_SYSMAP
181 	{ "_Syssize" },
182 #define	X_SYSSIZE	9
183 	{ "_allproc" },
184 #define X_ALLPROC	10
185 	{ "_zombproc" },
186 #define X_ZOMBPROC	11
187 	{ "_nproc" },
188 #define	X_NPROC		12
189 #define	X_LAST		12
190 #if defined(hp300)
191 	{ "_Sysseg" },
192 #define	X_SYSSEG	(X_LAST+1)
193 	{ "_lowram" },
194 #define	X_LOWRAM	(X_LAST+2)
195 #endif
196 #if defined(i386)
197 	{ "_IdlePTD" },
198 #define	X_IdlePTD	(X_LAST+1)
199 #endif
200 	{ "" },
201 };
202 
203 static off_t Vtophys();
204 static void klseek(), seterr(), setsyserr(), vstodb();
205 static int getkvars(), kvm_doprocs(), kvm_init();
206 #ifdef NEWVM
207 static int vatosw();
208 static int findpage();
209 #endif
210 
211 /*
212  * returns 	0 if files were opened now,
213  * 		1 if files were already opened,
214  *		-1 if files could not be opened.
215  */
216 kvm_openfiles(uf, mf, sf)
217 	const char *uf, *mf, *sf;
218 {
219 	if (kvmfilesopen)
220 		return (1);
221 	unixx = mem = kmem = swap = -1;
222 	unixf = (uf == NULL) ? _PATH_UNIX : uf;
223 	memf = (mf == NULL) ? _PATH_MEM : mf;
224 
225 	if ((unixx = open(unixf, O_RDONLY, 0)) == -1) {
226 		setsyserr("can't open %s", unixf);
227 		goto failed;
228 	}
229 	if ((mem = open(memf, O_RDONLY, 0)) == -1) {
230 		setsyserr("can't open %s", memf);
231 		goto failed;
232 	}
233 	if (sf != NULL)
234 		swapf = sf;
235 	if (mf != NULL) {
236 		deadkernel++;
237 		kmemf = mf;
238 		kmem = mem;
239 		swap = -1;
240 	} else {
241 		kmemf = _PATH_KMEM;
242 		if ((kmem = open(kmemf, O_RDONLY, 0)) == -1) {
243 			setsyserr("can't open %s", kmemf);
244 			goto failed;
245 		}
246 		swapf = (sf == NULL) ?  _PATH_DRUM : sf;
247 		/*
248 		 * live kernel - avoid looking up nlist entries
249 		 * past X_DEADKERNEL.
250 		 */
251 		nl[X_DEADKERNEL].n_name = "";
252 	}
253 	if (swapf != NULL && ((swap = open(swapf, O_RDONLY, 0)) == -1)) {
254 		seterr("can't open %s", swapf);
255 		goto failed;
256 	}
257 	kvmfilesopen++;
258 	if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1) /*XXX*/
259 		return (-1);
260 	return (0);
261 failed:
262 	kvm_close();
263 	return (-1);
264 }
265 
266 static
267 kvm_init(uf, mf, sf)
268 	char *uf, *mf, *sf;
269 {
270 	if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
271 		return (-1);
272 	if (getkvars() == -1)
273 		return (-1);
274 	kvminit = 1;
275 
276 	return (0);
277 }
278 
279 kvm_close()
280 {
281 	if (unixx != -1) {
282 		close(unixx);
283 		unixx = -1;
284 	}
285 	if (kmem != -1) {
286 		if (kmem != mem)
287 			close(kmem);
288 		/* otherwise kmem is a copy of mem, and will be closed below */
289 		kmem = -1;
290 	}
291 	if (mem != -1) {
292 		close(mem);
293 		mem = -1;
294 	}
295 	if (swap != -1) {
296 		close(swap);
297 		swap = -1;
298 	}
299 	if (db != NULL) {
300 		dbm_close(db);
301 		db = NULL;
302 	}
303 	kvminit = 0;
304 	kvmfilesopen = 0;
305 	deadkernel = 0;
306 #ifndef NEWVM
307 	if (Sysmap) {
308 		free(Sysmap);
309 		Sysmap = NULL;
310 	}
311 #endif
312 }
313 
314 kvm_nlist(nl)
315 	struct nlist *nl;
316 {
317 	datum key, data;
318 	char dbname[MAXPATHLEN];
319 	char dbversion[_POSIX2_LINE_MAX];
320 	char kversion[_POSIX2_LINE_MAX];
321 	int dbversionlen;
322 	char symbuf[MAXSYMSIZE];
323 	struct nlist nbuf, *n;
324 	int num, did;
325 
326 	if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
327 		return (-1);
328 	if (deadkernel)
329 		goto hard2;
330 	/*
331 	 * initialize key datum
332 	 */
333 	key.dptr = symbuf;
334 
335 	if (db != NULL)
336 		goto win;	/* off to the races */
337 	/*
338 	 * open database
339 	 */
340 	sprintf(dbname, "%s/kvm_%s", _PATH_VARRUN, basename(unixf));
341 	if ((db = dbm_open(dbname, O_RDONLY, 0)) == NULL)
342 		goto hard2;
343 	/*
344 	 * read version out of database
345 	 */
346 	bcopy("VERSION", symbuf, sizeof ("VERSION")-1);
347 	key.dsize = (sizeof ("VERSION") - 1);
348 	data = dbm_fetch(db, key);
349 	if (data.dptr == NULL)
350 		goto hard1;
351 	bcopy(data.dptr, dbversion, data.dsize);
352 	dbversionlen = data.dsize;
353 	/*
354 	 * read version string from kernel memory
355 	 */
356 	bcopy("_version", symbuf, sizeof ("_version")-1);
357 	key.dsize = (sizeof ("_version")-1);
358 	data = dbm_fetch(db, key);
359 	if (data.dptr == NULL)
360 		goto hard1;
361 	if (data.dsize != sizeof (struct nlist))
362 		goto hard1;
363 	bcopy(data.dptr, &nbuf, sizeof (struct nlist));
364 	lseek(kmem, nbuf.n_value, 0);
365 	if (read(kmem, kversion, dbversionlen) != dbversionlen)
366 		goto hard1;
367 	/*
368 	 * if they match, we win - otherwise do it the hard way
369 	 */
370 	if (bcmp(dbversion, kversion, dbversionlen) != 0)
371 		goto hard1;
372 	/*
373 	 * getem from the database.
374 	 */
375 win:
376 	num = did = 0;
377 	for (n = nl; n->n_name && n->n_name[0]; n++, num++) {
378 		int len;
379 		/*
380 		 * clear out fields from users buffer
381 		 */
382 		n->n_type = 0;
383 		n->n_other = 0;
384 		n->n_desc = 0;
385 		n->n_value = 0;
386 		/*
387 		 * query db
388 		 */
389 		if ((len = strlen(n->n_name)) > MAXSYMSIZE) {
390 			seterr("symbol too large");
391 			return (-1);
392 		}
393 		(void)strcpy(symbuf, n->n_name);
394 		key.dsize = len;
395 		data = dbm_fetch(db, key);
396 		if (data.dptr == NULL || data.dsize != sizeof (struct nlist))
397 			continue;
398 		bcopy(data.dptr, &nbuf, sizeof (struct nlist));
399 		n->n_value = nbuf.n_value;
400 		n->n_type = nbuf.n_type;
401 		n->n_desc = nbuf.n_desc;
402 		n->n_other = nbuf.n_other;
403 		did++;
404 	}
405 	return (num - did);
406 hard1:
407 	dbm_close(db);
408 	db = NULL;
409 hard2:
410 	num = nlist(unixf, nl);
411 	if (num == -1)
412 		seterr("nlist (hard way) failed");
413 	return (num);
414 }
415 
416 kvm_getprocs(what, arg)
417 	int what, arg;
418 {
419 	static int	ocopysize = -1;
420 
421 	if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
422 		return (NULL);
423 	if (!deadkernel) {
424 		int ret, copysize;
425 
426 		if ((ret = getkerninfo(what, NULL, NULL, arg)) == -1) {
427 			setsyserr("can't get estimate for kerninfo");
428 			return (-1);
429 		}
430 		copysize = ret;
431 		if (copysize > ocopysize &&
432 			(kvmprocbase = (struct kinfo_proc *)malloc(copysize))
433 								     == NULL) {
434 			seterr("out of memory");
435 			return (-1);
436 		}
437 		ocopysize = copysize;
438 		if ((ret = getkerninfo(what, kvmprocbase, &copysize,
439 		     arg)) == -1) {
440 			setsyserr("can't get proc list");
441 			return (-1);
442 		}
443 		if (copysize % sizeof (struct kinfo_proc)) {
444 			seterr("proc size mismatch (got %d total, kinfo_proc: %d)",
445 				copysize, sizeof (struct kinfo_proc));
446 			return (-1);
447 		}
448 		kvmnprocs = copysize / sizeof (struct kinfo_proc);
449 	} else {
450 		int nproc;
451 
452 		if (kvm_read((void *) nl[X_NPROC].n_value, &nproc,
453 		    sizeof (int)) != sizeof (int)) {
454 			seterr("can't read nproc");
455 			return (-1);
456 		}
457 		if ((kvmprocbase = (struct kinfo_proc *)
458 		     malloc(nproc * sizeof (struct kinfo_proc))) == NULL) {
459 			seterr("out of memory (addr: %x nproc = %d)",
460 				nl[X_NPROC].n_value, nproc);
461 			return (-1);
462 		}
463 		kvmnprocs = kvm_doprocs(what, arg, kvmprocbase);
464 		realloc(kvmprocbase, kvmnprocs * sizeof (struct kinfo_proc));
465 	}
466 	kvmprocptr = kvmprocbase;
467 
468 	return (kvmnprocs);
469 }
470 
471 /*
472  * XXX - should NOT give up so easily - especially since the kernel
473  * may be corrupt (it died).  Should gather as much information as possible.
474  * Follows proc ptrs instead of reading table since table may go
475  * away soon.
476  */
477 static
478 kvm_doprocs(what, arg, buff)
479 	int what, arg;
480 	char *buff;
481 {
482 	struct proc *p, proc;
483 	register char *bp = buff;
484 	int i = 0;
485 	int doingzomb = 0;
486 	struct eproc eproc;
487 	struct pgrp pgrp;
488 	struct session sess;
489 	struct tty tty;
490 #ifndef NEWVM
491 	struct text text;
492 #endif
493 
494 	/* allproc */
495 	if (kvm_read((void *) nl[X_ALLPROC].n_value, &p,
496 	    sizeof (struct proc *)) != sizeof (struct proc *)) {
497 		seterr("can't read allproc");
498 		return (-1);
499 	}
500 
501 again:
502 	for (; p; p = proc.p_nxt) {
503 		if (kvm_read(p, &proc, sizeof (struct proc)) !=
504 		    sizeof (struct proc)) {
505 			seterr("can't read proc at %x", p);
506 			return (-1);
507 		}
508 #ifdef NEWVM
509 		if (kvm_read(proc.p_cred, &eproc.e_pcred,
510 		    sizeof (struct pcred)) == sizeof (struct pcred))
511 			(void) kvm_read(eproc.e_pcred.pc_ucred, &eproc.e_ucred,
512 			    sizeof (struct ucred));
513 		switch(ki_op(what)) {
514 
515 		case KINFO_PROC_PID:
516 			if (proc.p_pid != (pid_t)arg)
517 				continue;
518 			break;
519 
520 
521 		case KINFO_PROC_UID:
522 			if (eproc.e_ucred.cr_uid != (uid_t)arg)
523 				continue;
524 			break;
525 
526 		case KINFO_PROC_RUID:
527 			if (eproc.e_pcred.p_ruid != (uid_t)arg)
528 				continue;
529 			break;
530 		}
531 #else
532 		switch(ki_op(what)) {
533 
534 		case KINFO_PROC_PID:
535 			if (proc.p_pid != (pid_t)arg)
536 				continue;
537 			break;
538 
539 
540 		case KINFO_PROC_UID:
541 			if (proc.p_uid != (uid_t)arg)
542 				continue;
543 			break;
544 
545 		case KINFO_PROC_RUID:
546 			if (proc.p_ruid != (uid_t)arg)
547 				continue;
548 			break;
549 		}
550 #endif
551 		/*
552 		 * gather eproc
553 		 */
554 		eproc.e_paddr = p;
555 		if (kvm_read(proc.p_pgrp, &pgrp, sizeof (struct pgrp)) !=
556 	            sizeof (struct pgrp)) {
557 			seterr("can't read pgrp at %x", proc.p_pgrp);
558 			return (-1);
559 		}
560 		eproc.e_sess = pgrp.pg_session;
561 		eproc.e_pgid = pgrp.pg_id;
562 		eproc.e_jobc = pgrp.pg_jobc;
563 		if (kvm_read(pgrp.pg_session, &sess, sizeof (struct session))
564 		   != sizeof (struct session)) {
565 			seterr("can't read session at %x", pgrp.pg_session);
566 			return (-1);
567 		}
568 		if ((proc.p_flag&SCTTY) && sess.s_ttyp != NULL) {
569 			if (kvm_read(sess.s_ttyp, &tty, sizeof (struct tty))
570 			    != sizeof (struct tty)) {
571 				seterr("can't read tty at %x", sess.s_ttyp);
572 				return (-1);
573 			}
574 			eproc.e_tdev = tty.t_dev;
575 			eproc.e_tsess = tty.t_session;
576 			if (tty.t_pgrp != NULL) {
577 				if (kvm_read(tty.t_pgrp, &pgrp, sizeof (struct
578 				    pgrp)) != sizeof (struct pgrp)) {
579 					seterr("can't read tpgrp at &x",
580 						tty.t_pgrp);
581 					return (-1);
582 				}
583 				eproc.e_tpgid = pgrp.pg_id;
584 			} else
585 				eproc.e_tpgid = -1;
586 		} else
587 			eproc.e_tdev = NODEV;
588 		if (proc.p_wmesg)
589 			kvm_read(proc.p_wmesg, eproc.e_wmesg, WMESGLEN);
590 #ifdef NEWVM
591 		(void) kvm_read(proc.p_vmspace, &eproc.e_vm,
592 		    sizeof (struct vmspace));
593 		eproc.e_xsize = eproc.e_xrssize =
594 			eproc.e_xccount = eproc.e_xswrss = 0;
595 #else
596 		if (proc.p_textp) {
597 			kvm_read(proc.p_textp, &text, sizeof (text));
598 			eproc.e_xsize = text.x_size;
599 			eproc.e_xrssize = text.x_rssize;
600 			eproc.e_xccount = text.x_ccount;
601 			eproc.e_xswrss = text.x_swrss;
602 		} else {
603 			eproc.e_xsize = eproc.e_xrssize =
604 			  eproc.e_xccount = eproc.e_xswrss = 0;
605 		}
606 #endif
607 
608 		switch(ki_op(what)) {
609 
610 		case KINFO_PROC_PGRP:
611 			if (eproc.e_pgid != (pid_t)arg)
612 				continue;
613 			break;
614 
615 		case KINFO_PROC_TTY:
616 			if ((proc.p_flag&SCTTY) == 0 ||
617 			     eproc.e_tdev != (dev_t)arg)
618 				continue;
619 			break;
620 		}
621 
622 		i++;
623 		bcopy(&proc, bp, sizeof (struct proc));
624 		bp += sizeof (struct proc);
625 		bcopy(&eproc, bp, sizeof (struct eproc));
626 		bp+= sizeof (struct eproc);
627 	}
628 	if (!doingzomb) {
629 		/* zombproc */
630 		if (kvm_read((void *) nl[X_ZOMBPROC].n_value, &p,
631 		    sizeof (struct proc *)) != sizeof (struct proc *)) {
632 			seterr("can't read zombproc");
633 			return (-1);
634 		}
635 		doingzomb = 1;
636 		goto again;
637 	}
638 
639 	return (i);
640 }
641 
642 struct proc *
643 kvm_nextproc()
644 {
645 
646 	if (!kvmprocbase && kvm_getprocs(0, 0) == -1)
647 		return (NULL);
648 	if (kvmprocptr >= (kvmprocbase + kvmnprocs)) {
649 		seterr("end of proc list");
650 		return (NULL);
651 	}
652 	return((struct proc *)(kvmprocptr++));
653 }
654 
655 struct eproc *
656 kvm_geteproc(p)
657 	const struct proc *p;
658 {
659 	return ((struct eproc *)(((char *)p) + sizeof (struct proc)));
660 }
661 
662 kvm_setproc()
663 {
664 	kvmprocptr = kvmprocbase;
665 }
666 
667 kvm_freeprocs()
668 {
669 
670 	if (kvmprocbase) {
671 		free(kvmprocbase);
672 		kvmprocbase = NULL;
673 	}
674 }
675 
676 #ifdef i386
677 /* See also ./sys/kern/kern_execve.c */
678 #define ARGSIZE		(roundup(ARG_MAX, NBPG))
679 #endif
680 
681 #ifdef NEWVM
682 struct user *
683 kvm_getu(p)
684 	const struct proc *p;
685 {
686 	register struct kinfo_proc *kp = (struct kinfo_proc *)p;
687 	register int i;
688 	register char *up;
689 	u_int vaddr;
690 	struct swapblk swb;
691 
692 	if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
693 		return (NULL);
694 	if (p->p_stat == SZOMB) {
695 		seterr("zombie process");
696 		return (NULL);
697 	}
698 
699 	argaddr0 = argaddr1 = swaddr = 0;
700 	if ((p->p_flag & SLOAD) == 0) {
701 		vm_offset_t	maddr;
702 
703 		if (swap < 0) {
704 			seterr("no swap");
705 			return (NULL);
706 		}
707 		/*
708 		 * Costly operation, better set enable_swap to zero
709 		 * in vm/vm_glue.c, since paging of user pages isn't
710 		 * done yet anyway.
711 		 */
712 		if (vatosw(p, USRSTACK + i * NBPG, &maddr, &swb) == 0)
713 			return NULL;
714 
715 		if (maddr == 0 && swb.size < UPAGES * NBPG)
716 			return NULL;
717 
718 		for (i = 0; i < UPAGES; i++) {
719 			if (maddr) {
720 				(void) lseek(mem, maddr + i * NBPG, 0);
721 				if (read(mem,
722 				    (char *)user.upages[i], NBPG) != NBPG) {
723 					seterr(
724 					    "can't read u for pid %d from %s",
725 					    p->p_pid, swapf);
726 					return NULL;
727 				}
728 			} else {
729 				(void) lseek(swap, swb.offset + i * NBPG, 0);
730 				if (read(swap,
731 				    (char *)user.upages[i], NBPG) != NBPG) {
732 					seterr(
733 					    "can't read u for pid %d from %s",
734 					    p->p_pid, swapf);
735 					return NULL;
736 				}
737 			}
738 		}
739 		return(&user.user);
740 	}
741 	/*
742 	 * Read u-area one page at a time for the benefit of post-mortems
743 	 */
744 	up = (char *) p->p_addr;
745 	for (i = 0; i < UPAGES; i++) {
746 		klseek(kmem, (long)up, 0);
747 		if (read(kmem, user.upages[i], CLBYTES) != CLBYTES) {
748 			seterr("cant read page %x of u of pid %d from %s",
749 			    up, p->p_pid, kmemf);
750 			return(NULL);
751 		}
752 		up += CLBYTES;
753 	}
754 	pcbpf = (int) btop(p->p_addr);	/* what should this be really? */
755 	/*
756 	 * Conjure up a physical address for the arguments.
757 	 */
758 #ifdef hp300
759 	if (kp->kp_eproc.e_vm.vm_pmap.pm_ptab) {
760 		struct pte pte[CLSIZE*2];
761 
762 		klseek(kmem,
763 		    (long)&kp->kp_eproc.e_vm.vm_pmap.pm_ptab
764 		    [btoc(USRSTACK-CLBYTES*2)], 0);
765 		if (read(kmem, (char *)&pte, sizeof(pte)) == sizeof(pte)) {
766 #if CLBYTES < 2048
767 			argaddr0 = ctob(pftoc(pte[CLSIZE*0].pg_pfnum));
768 #endif
769 			argaddr1 = ctob(pftoc(pte[CLSIZE*1].pg_pfnum));
770 		}
771 	}
772 #endif
773 	kp->kp_eproc.e_vm.vm_rssize =
774 	    kp->kp_eproc.e_vm.vm_pmap.pm_stats.resident_count; /* XXX */
775 
776 	vaddr = (u_int)kp->kp_eproc.e_vm.vm_maxsaddr + MAXSSIZ - ARGSIZE;
777 
778 #ifdef i386
779 	if (kp->kp_eproc.e_vm.vm_pmap.pm_pdir) {
780 		struct pde pde;
781 
782 		klseek(kmem,
783 		(long)(&kp->kp_eproc.e_vm.vm_pmap.pm_pdir[pdei(vaddr)]), 0);
784 
785 		if (read(kmem, (char *)&pde, sizeof pde) == sizeof pde
786 				&& pde.pd_v) {
787 
788 			struct pte pte;
789 
790 			if (lseek(mem, (long)ctob(pde.pd_pfnum) +
791 					(ptei(vaddr) * sizeof pte), 0) == -1)
792 				seterr("kvm_getu: lseek");
793 			if (read(mem, (char *)&pte, sizeof pte) == sizeof pte) {
794 				if (pte.pg_v) {
795 					argaddr1 = (long)ctob(pte.pg_pfnum);
796 				} else {
797 					goto hard;
798 				}
799 			} else {
800 				seterr("kvm_getu: read");
801 			}
802 		} else {
803 			goto hard;
804 		}
805 	}
806 #endif	/* i386 */
807 
808 hard:
809 	if (vatosw(p, vaddr, &argaddr1, &swb)) {
810 		if (argaddr1 == 0 && swb.size >= ARGSIZE)
811 			swaddr = swb.offset;
812 	}
813 
814 	return(&user.user);
815 }
816 #else
817 struct user *
818 kvm_getu(p)
819 	const struct proc *p;
820 {
821 	struct pte *pteaddr, apte;
822 	struct pte arguutl[HIGHPAGES+(CLSIZE*2)];
823 	register int i;
824 	int ncl;
825 
826 	if (kvminit == 0 && kvm_init(NULL, NULL, NULL, 0) == -1)
827 		return (NULL);
828 	if (p->p_stat == SZOMB) {
829 		seterr("zombie process");
830 		return (NULL);
831 	}
832 	if ((p->p_flag & SLOAD) == 0) {
833 		if (swap < 0) {
834 			seterr("no swap");
835 			return (NULL);
836 		}
837 		(void) lseek(swap, (long)dtob(p->p_swaddr), 0);
838 		if (read(swap, (char *)&user.user, sizeof (struct user)) !=
839 		    sizeof (struct user)) {
840 			seterr("can't read u for pid %d from %s",
841 			    p->p_pid, swapf);
842 			return (NULL);
843 		}
844 		pcbpf = 0;
845 		argaddr0 = 0;
846 		argaddr1 = 0;
847 		return (&user.user);
848 	}
849 	pteaddr = &Usrptmap[btokmx(p->p_p0br) + p->p_szpt - 1];
850 	klseek(kmem, (long)pteaddr, 0);
851 	if (read(kmem, (char *)&apte, sizeof(apte)) != sizeof(apte)) {
852 		seterr("can't read indir pte to get u for pid %d from %s",
853 		    p->p_pid, kmemf);
854 		return (NULL);
855 	}
856 	lseek(mem, (long)ctob(pftoc(apte.pg_pfnum+1)) - sizeof(arguutl), 0);
857 	if (read(mem, (char *)arguutl, sizeof(arguutl)) != sizeof(arguutl)) {
858 		seterr("can't read page table for u of pid %d from %s",
859 		    p->p_pid, memf);
860 		return (NULL);
861 	}
862 	if (arguutl[0].pg_fod == 0 && arguutl[0].pg_pfnum)
863 		argaddr0 = ctob(pftoc(arguutl[0].pg_pfnum));
864 	else
865 		argaddr0 = 0;
866 	if (arguutl[CLSIZE*1].pg_fod == 0 && arguutl[CLSIZE*1].pg_pfnum)
867 		argaddr1 = ctob(pftoc(arguutl[CLSIZE*1].pg_pfnum));
868 	else
869 		argaddr1 = 0;
870 	pcbpf = arguutl[CLSIZE*2].pg_pfnum;
871 	ncl = (sizeof (struct user) + CLBYTES - 1) / CLBYTES;
872 	while (--ncl >= 0) {
873 		i = ncl * CLSIZE;
874 		lseek(mem,
875 		      (long)ctob(pftoc(arguutl[(CLSIZE*2)+i].pg_pfnum)), 0);
876 		if (read(mem, user.upages[i], CLBYTES) != CLBYTES) {
877 			seterr("can't read page %d of u of pid %d from %s",
878 			    arguutl[(CLSIZE*2)+i].pg_pfnum, p->p_pid, memf);
879 			return(NULL);
880 		}
881 	}
882 	return (&user.user);
883 }
884 #endif
885 
886 char *
887 kvm_getargs(p, up)
888 	const struct proc *p;
889 	const struct user *up;
890 {
891 #ifdef i386
892 	/* See also ./sys/kern/kern_execve.c */
893 	static char cmdbuf[ARGSIZE];
894 	static union {
895 		char	argc[ARGSIZE];
896 		int	argi[ARGSIZE/sizeof (int)];
897 	} argspac;
898 #else
899 	static char cmdbuf[CLBYTES*2];
900 	static union {
901 		char	argc[CLBYTES*2];
902 		int	argi[CLBYTES*2/sizeof (int)];
903 	} argspac;
904 #endif
905 	register char *cp;
906 	register int *ip;
907 	char c;
908 	int nbad;
909 #ifndef NEWVM
910 	struct dblock db;
911 #endif
912 	const char *file;
913 	int stkoff = 0;
914 
915 #if defined(NEWVM) && defined(hp300)
916 	stkoff = 20;			/* XXX for sigcode */
917 #endif
918 	if (up == NULL || p->p_pid == 0 || p->p_pid == 2)
919 		goto retucomm;
920 	if ((p->p_flag & SLOAD) == 0 || argaddr1 == 0) {
921 #ifdef NEWVM
922 		if (swaddr == 0)
923 			goto retucomm;	/* XXX for now */
924 #ifdef i386
925 		(void) lseek(swap, swaddr, 0);
926 		if (read(swap, &argspac.argc[0], ARGSIZE) != ARGSIZE)
927 			goto bad;
928 #else
929 		if (argaddr0) {
930 			lseek(swap, (long)argaddr0, 0);
931 			if (read(swap, (char *)&argspac, CLBYTES) != CLBYTES)
932 				goto bad;
933 		} else
934 			bzero(&argspac, CLBYTES);
935 		lseek(swap, (long)argaddr1, 0);
936 		if (read(swap, &argspac.argc[CLBYTES], CLBYTES) != CLBYTES)
937 			goto bad;
938 #endif
939 #else
940 		if (swap < 0 || p->p_ssize == 0)
941 			goto retucomm;
942 		vstodb(0, CLSIZE, &up->u_smap, &db, 1);
943 		(void) lseek(swap, (long)dtob(db.db_base), 0);
944 		if (read(swap, (char *)&argspac.argc[CLBYTES], CLBYTES)
945 			!= CLBYTES)
946 			goto bad;
947 		vstodb(1, CLSIZE, &up->u_smap, &db, 1);
948 		(void) lseek(swap, (long)dtob(db.db_base), 0);
949 		if (read(swap, (char *)&argspac.argc[0], CLBYTES) != CLBYTES)
950 			goto bad;
951 		file = swapf;
952 #endif
953 	} else {
954 #ifdef i386
955 		lseek(mem, (long)argaddr1, 0);
956 		if (read(mem, &argspac.argc[0], ARGSIZE) != ARGSIZE)
957 			goto bad;
958 #else
959 		if (argaddr0) {
960 			lseek(mem, (long)argaddr0, 0);
961 			if (read(mem, (char *)&argspac, CLBYTES) != CLBYTES)
962 				goto bad;
963 		} else
964 			bzero(&argspac, CLBYTES);
965 		lseek(mem, (long)argaddr1, 0);
966 		if (read(mem, &argspac.argc[CLBYTES], CLBYTES) != CLBYTES)
967 			goto bad;
968 #endif
969 		file = (char *) memf;
970 	}
971 
972 	nbad = 0;
973 #ifdef i386
974 	ip = &argspac.argi[(ARGSIZE-ARG_MAX)/sizeof (int)];
975 
976 	for (cp = (char *)ip; cp < &argspac.argc[ARGSIZE-stkoff]; cp++) {
977 #else
978 	ip = &argspac.argi[CLBYTES*2/sizeof (int)];
979 	ip -= 2;                /* last arg word and .long 0 */
980 	ip -= stkoff / sizeof (int);
981 	while (*--ip) {
982 		if (ip == argspac.argi)
983 			goto retucomm;
984 	}
985 	*(char *)ip = ' ';
986 	ip++;
987 
988 	for (cp = (char *)ip; cp < &argspac.argc[CLBYTES*2-stkoff]; cp++) {
989 #endif
990 		c = *cp;
991 		if (c == 0) {	/* convert null between arguments to space */
992 			*cp = ' ';
993 			if (*(cp+1) == 0) break;	/* if null argument follows then no more args */
994 			}
995 		else if (c < ' ' || c > 0176) {
996 			if (++nbad >= 5*(0+1)) {	/* eflg -> 0 XXX */ /* limit number of bad chars to 5 */
997 				*cp++ = '?';
998 				break;
999 			}
1000 			*cp = '?';
1001 		}
1002 		else if (0 == 0 && c == '=') {		/* eflg -> 0 XXX */
1003 			while (*--cp != ' ')
1004 				if (cp <= (char *)ip)
1005 					break;
1006 			break;
1007 		}
1008 	}
1009 	*cp = 0;
1010 	while (*--cp == ' ')
1011 		*cp = 0;
1012 	cp = (char *)ip;
1013 	(void) strcpy(cmdbuf, cp);
1014 	if (cp[0] == '-' || cp[0] == '?' || cp[0] <= ' ') {
1015 		(void) strcat(cmdbuf, " (");
1016 		(void) strncat(cmdbuf, p->p_comm, sizeof(p->p_comm));
1017 		(void) strcat(cmdbuf, ")");
1018 	}
1019 	return (cmdbuf);
1020 
1021 bad:
1022 	seterr("error locating command name for pid %d from %s",
1023 	    p->p_pid, file);
1024 retucomm:
1025 	(void) strcpy(cmdbuf, " (");
1026 	(void) strncat(cmdbuf, p->p_comm, sizeof (p->p_comm));
1027 	(void) strcat(cmdbuf, ")");
1028 	return (cmdbuf);
1029 }
1030 
1031 
1032 static
1033 getkvars()
1034 {
1035 	if (kvm_nlist(nl) == -1)
1036 		return (-1);
1037 	if (deadkernel) {
1038 		/* We must do the sys map first because klseek uses it */
1039 		long	addr;
1040 
1041 #ifndef NEWVM
1042 		Syssize = nl[X_SYSSIZE].n_value;
1043 		Sysmap = (struct pte *)
1044 			calloc((unsigned) Syssize, sizeof (struct pte));
1045 		if (Sysmap == NULL) {
1046 			seterr("out of space for Sysmap");
1047 			return (-1);
1048 		}
1049 		addr = (long) nl[X_SYSMAP].n_value;
1050 		addr &= ~KERNBASE;
1051 		(void) lseek(kmem, addr, 0);
1052 		if (read(kmem, (char *) Sysmap, Syssize * sizeof (struct pte))
1053 		    != Syssize * sizeof (struct pte)) {
1054 			seterr("can't read Sysmap");
1055 			return (-1);
1056 		}
1057 #endif
1058 #if defined(hp300)
1059 		addr = (long) nl[X_LOWRAM].n_value;
1060 		(void) lseek(kmem, addr, 0);
1061 		if (read(kmem, (char *) &lowram, sizeof (lowram))
1062 		    != sizeof (lowram)) {
1063 			seterr("can't read lowram");
1064 			return (-1);
1065 		}
1066 		lowram = btop(lowram);
1067 		Sysseg = (struct ste *) malloc(NBPG);
1068 		if (Sysseg == NULL) {
1069 			seterr("out of space for Sysseg");
1070 			return (-1);
1071 		}
1072 		addr = (long) nl[X_SYSSEG].n_value;
1073 		(void) lseek(kmem, addr, 0);
1074 		read(kmem, (char *)&addr, sizeof(addr));
1075 		(void) lseek(kmem, (long)addr, 0);
1076 		if (read(kmem, (char *) Sysseg, NBPG) != NBPG) {
1077 			seterr("can't read Sysseg");
1078 			return (-1);
1079 		}
1080 #endif
1081 #if defined(i386)
1082 		PTD = (struct pde *) malloc(NBPG);
1083 		if (PTD == NULL) {
1084 			seterr("out of space for PTD");
1085 			return (-1);
1086 		}
1087 		addr = (long) nl[X_IdlePTD].n_value;
1088 		(void) lseek(kmem, addr, 0);
1089 		read(kmem, (char *)&addr, sizeof(addr));
1090 		(void) lseek(kmem, (long)addr, 0);
1091 		if (read(kmem, (char *) PTD, NBPG) != NBPG) {
1092 			seterr("can't read PTD");
1093 			return (-1);
1094 		}
1095 #endif
1096 	}
1097 #ifndef NEWVM
1098 	usrpt = (struct pte *)nl[X_USRPT].n_value;
1099 	Usrptmap = (struct pte *)nl[X_USRPTMAP].n_value;
1100 #endif
1101 	if (kvm_read((void *) nl[X_NSWAP].n_value, &nswap, sizeof (long)) !=
1102 	    sizeof (long)) {
1103 		seterr("can't read nswap");
1104 		return (-1);
1105 	}
1106 	if (kvm_read((void *) nl[X_DMMIN].n_value, &dmmin, sizeof (long)) !=
1107 	    sizeof (long)) {
1108 		seterr("can't read dmmin");
1109 		return (-1);
1110 	}
1111 	if (kvm_read((void *) nl[X_DMMAX].n_value, &dmmax, sizeof (long)) !=
1112 	    sizeof (long)) {
1113 		seterr("can't read dmmax");
1114 		return (-1);
1115 	}
1116 	return (0);
1117 }
1118 
1119 kvm_read(loc, buf, len)
1120 	void *loc;
1121 	void *buf;
1122 {
1123 	if (kvmfilesopen == 0 && kvm_openfiles(NULL, NULL, NULL) == -1)
1124 		return (-1);
1125 	if (iskva(loc)) {
1126 		klseek(kmem, (off_t) loc, 0);
1127 		if (read(kmem, buf, len) != len) {
1128 			seterr("error reading kmem at %x", loc);
1129 			return (-1);
1130 		}
1131 	} else {
1132 		lseek(mem, (off_t) loc, 0);
1133 		if (read(mem, buf, len) != len) {
1134 			seterr("error reading mem at %x", loc);
1135 			return (-1);
1136 		}
1137 	}
1138 	return (len);
1139 }
1140 
1141 static void
1142 klseek(fd, loc, off)
1143 	int fd;
1144 	off_t loc;
1145 	int off;
1146 {
1147 
1148 	if (deadkernel) {
1149 		if ((loc = Vtophys(loc)) == -1)
1150 			return;
1151 	}
1152 	(void) lseek(fd, (off_t)loc, off);
1153 }
1154 
1155 #ifndef NEWVM
1156 /*
1157  * Given a base/size pair in virtual swap area,
1158  * return a physical base/size pair which is the
1159  * (largest) initial, physically contiguous block.
1160  */
1161 static void
1162 vstodb(vsbase, vssize, dmp, dbp, rev)
1163 	register int vsbase;
1164 	int vssize;
1165 	struct dmap *dmp;
1166 	register struct dblock *dbp;
1167 {
1168 	register int blk = dmmin;
1169 	register swblk_t *ip = dmp->dm_map;
1170 
1171 	vsbase = ctod(vsbase);
1172 	vssize = ctod(vssize);
1173 	if (vsbase < 0 || vsbase + vssize > dmp->dm_size)
1174 		/*panic("vstodb")*/;
1175 	while (vsbase >= blk) {
1176 		vsbase -= blk;
1177 		if (blk < dmmax)
1178 			blk *= 2;
1179 		ip++;
1180 	}
1181 	if (*ip <= 0 || *ip + blk > nswap)
1182 		/*panic("vstodb")*/;
1183 	dbp->db_size = MIN(vssize, blk - vsbase);
1184 	dbp->db_base = *ip + (rev ? blk - (vsbase + dbp->db_size) : vsbase);
1185 }
1186 #endif
1187 
1188 #ifdef NEWVM
1189 static off_t
1190 Vtophys(loc)
1191 	u_long	loc;
1192 {
1193 	off_t newloc = (off_t) -1;
1194 #ifdef hp300
1195 	int p, ste, pte;
1196 
1197 	ste = *(int *)&Sysseg[loc >> SG_ISHIFT];
1198 	if ((ste & SG_V) == 0) {
1199 		seterr("vtophys: segment not valid");
1200 		return((off_t) -1);
1201 	}
1202 	p = btop(loc & SG_PMASK);
1203 	newloc = (ste & SG_FRAME) + (p * sizeof(struct pte));
1204 	(void) lseek(kmem, (long)(newloc-(off_t)ptob(lowram)), 0);
1205 	if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1206 		seterr("vtophys: cannot locate pte");
1207 		return((off_t) -1);
1208 	}
1209 	newloc = pte & PG_FRAME;
1210 	if (pte == PG_NV || newloc < (off_t)ptob(lowram)) {
1211 		seterr("vtophys: page not valid");
1212 		return((off_t) -1);
1213 	}
1214 	newloc = (newloc - (off_t)ptob(lowram)) + (loc & PGOFSET);
1215 #endif
1216 #ifdef i386
1217 	struct pde pde;
1218 	struct pte pte;
1219 	int p;
1220 
1221 	pde = PTD[loc >> PD_SHIFT];
1222 	if (pde.pd_v == 0) {
1223 		seterr("vtophys: page directory entry not valid");
1224 		return((off_t) -1);
1225 	}
1226 	p = btop(loc & PT_MASK);
1227 	newloc = pde.pd_pfnum + (p * sizeof(struct pte));
1228 	(void) lseek(kmem, (long)newloc, 0);
1229 	if (read(kmem, (char *)&pte, sizeof pte) != sizeof pte) {
1230 		seterr("vtophys: cannot obtain desired pte");
1231 		return((off_t) -1);
1232 	}
1233 	newloc = pte.pg_pfnum;
1234 	if (pte.pg_v == 0) {
1235 		seterr("vtophys: page table entry not valid");
1236 		return((off_t) -1);
1237 	}
1238 	newloc += (loc & PGOFSET);
1239 #endif
1240 	return((off_t) newloc);
1241 }
1242 #else
1243 static off_t
1244 vtophys(loc)
1245 	long loc;
1246 {
1247 	int p;
1248 	off_t newloc;
1249 	register struct pte *pte;
1250 
1251 	newloc = loc & ~KERNBASE;
1252 	p = btop(newloc);
1253 #if defined(vax) || defined(tahoe)
1254 	if ((loc & KERNBASE) == 0) {
1255 		seterr("vtophys: translating non-kernel address");
1256 		return((off_t) -1);
1257 	}
1258 #endif
1259 	if (p >= Syssize) {
1260 		seterr("vtophys: page out of bound (%d>=%d)", p, Syssize);
1261 		return((off_t) -1);
1262 	}
1263 	pte = &Sysmap[p];
1264 	if (pte->pg_v == 0 && (pte->pg_fod || pte->pg_pfnum == 0)) {
1265 		seterr("vtophys: page not valid");
1266 		return((off_t) -1);
1267 	}
1268 #if defined(hp300)
1269 	if (pte->pg_pfnum < lowram) {
1270 		seterr("vtophys: non-RAM page (%d<%d)", pte->pg_pfnum, lowram);
1271 		return((off_t) -1);
1272 	}
1273 #endif
1274 	loc = (long) (ptob(pftoc(pte->pg_pfnum)) + (loc & PGOFSET));
1275 	return(loc);
1276 }
1277 #endif
1278 
1279 
1280 #ifdef NEWVM
1281 /*
1282  * locate address of unwired or swapped page
1283  */
1284 
1285 #define DEBUG 0
1286 
1287 #define KREAD(off, addr, len) \
1288 	(kvm_read((void *)(off), (char *)(addr), (len)) == (len))
1289 
1290 
1291 static int
1292 vatosw(p, vaddr, maddr, swb)
1293 struct proc	*p ;
1294 vm_offset_t	vaddr;
1295 vm_offset_t	*maddr;
1296 struct swapblk	*swb;
1297 {
1298 	register struct kinfo_proc *kp = (struct kinfo_proc *)p;
1299 	vm_map_t		mp = &kp->kp_eproc.e_vm.vm_map;
1300 	struct vm_object	vm_object;
1301 	struct vm_map_entry	vm_entry;
1302 	struct pager_struct	pager;
1303 	struct swpager		swpager;
1304 	struct swblock		swblock;
1305 	long			addr, off;
1306 	int			i;
1307 
1308 	if (p->p_pid == 0 || p->p_pid == 2)
1309 		return 0;
1310 
1311 	addr = (long)mp->header.next;
1312 	for (i = 0; i < mp->nentries; i++) {
1313 		/* Weed through map entries until vaddr in range */
1314 		if (!KREAD(addr, &vm_entry, sizeof(vm_entry))) {
1315 			setsyserr("vatosw: read vm_map_entry");
1316 			return 0;
1317 		}
1318 		if ((vaddr >= vm_entry.start) && (vaddr <= vm_entry.end) &&
1319 				(vm_entry.object.vm_object != 0))
1320 			break;
1321 
1322 		addr = (long)vm_entry.next;
1323 	}
1324 	if (i == mp->nentries) {
1325 		seterr("%u: map not found\n", p->p_pid);
1326 		return 0;
1327 	}
1328 
1329 	if (vm_entry.is_a_map || vm_entry.is_sub_map) {
1330 		seterr("%u: Is a map\n", p->p_pid);
1331 		return 0;
1332 	}
1333 
1334 	/* Locate memory object */
1335 	off = (vaddr - vm_entry.start) + vm_entry.offset;
1336 	addr = (long)vm_entry.object.vm_object;
1337 	while (1) {
1338 		if (!KREAD(addr, &vm_object, sizeof vm_object)) {
1339 			setsyserr("vatosw: read vm_object");
1340 			return 0;
1341 		}
1342 
1343 #if DEBUG
1344 		fprintf(stderr, "%u: find page: object %#x offset %x\n",
1345 				p->p_pid, addr, off);
1346 #endif
1347 
1348 		/* Lookup in page queue */
1349 		if (findpage(addr, off, maddr))
1350 			return 1;
1351 
1352 		if (vm_object.shadow == 0)
1353 			break;
1354 
1355 #if DEBUG
1356 		fprintf(stderr, "%u: shadow obj at %x: offset %x+%x\n",
1357 				p->p_pid, addr, off, vm_object.shadow_offset);
1358 #endif
1359 
1360 		addr = (long)vm_object.shadow;
1361 		off += vm_object.shadow_offset;
1362 	}
1363 
1364 	if (!vm_object.pager) {
1365 		seterr("%u: no pager\n", p->p_pid);
1366 		return 0;
1367 	}
1368 
1369 	/* Find address in swap space */
1370 	if (!KREAD(vm_object.pager, &pager, sizeof pager)) {
1371 		setsyserr("vatosw: read pager");
1372 		return 0;
1373 	}
1374 	if (pager.pg_type != PG_SWAP) {
1375 		seterr("%u: weird pager\n", p->p_pid);
1376 		return 0;
1377 	}
1378 
1379 	/* Get swap pager data */
1380 	if (!KREAD(pager.pg_data, &swpager, sizeof swpager)) {
1381 		setsyserr("vatosw: read swpager");
1382 		return 0;
1383 	}
1384 
1385 	off += vm_object.paging_offset;
1386 
1387 	/* Read swap block array */
1388 	if (!KREAD((long)swpager.sw_blocks +
1389 			(off/dbtob(swpager.sw_bsize)) * sizeof swblock,
1390 			&swblock, sizeof swblock)) {
1391 		setsyserr("vatosw: read swblock");
1392 		return 0;
1393 	}
1394 	swb->offset = dbtob(swblock.swb_block)+ (off % dbtob(swpager.sw_bsize));
1395 	swb->size = dbtob(swpager.sw_bsize) - (off % dbtob(swpager.sw_bsize));
1396 	return 1;
1397 }
1398 
1399 
1400 #define atop(x)		(((unsigned)(x)) >> page_shift)
1401 #define vm_page_hash(object, offset) \
1402         (((unsigned)object+(unsigned)atop(offset))&vm_page_hash_mask)
1403 
1404 static int
1405 findpage(object, offset, maddr)
1406 long			object;
1407 long			offset;
1408 vm_offset_t		*maddr;
1409 {
1410 static	long		vm_page_hash_mask;
1411 static	long		vm_page_buckets;
1412 static	long		page_shift;
1413 	queue_head_t	bucket;
1414 	struct vm_page	mem;
1415 	long		addr, baddr;
1416 
1417 	if (vm_page_hash_mask == 0 && !KREAD(nl[X_VM_PAGE_HASH_MASK].n_value,
1418 			&vm_page_hash_mask, sizeof (long))) {
1419 		seterr("can't read vm_page_hash_mask");
1420 		return 0;
1421 	}
1422 	if (page_shift == 0 && !KREAD(nl[X_PAGE_SHIFT].n_value,
1423 			&page_shift, sizeof (long))) {
1424 		seterr("can't read page_shift");
1425 		return 0;
1426 	}
1427 	if (vm_page_buckets == 0 && !KREAD(nl[X_VM_PAGE_BUCKETS].n_value,
1428 			&vm_page_buckets, sizeof (long))) {
1429 		seterr("can't read vm_page_buckets");
1430 		return 0;
1431 	}
1432 
1433 	baddr = vm_page_buckets + vm_page_hash(object,offset) * sizeof(queue_head_t);
1434 	if (!KREAD(baddr, &bucket, sizeof (bucket))) {
1435 		seterr("can't read vm_page_bucket");
1436 		return 0;
1437 	}
1438 
1439 	addr = (long)bucket.next;
1440 	while (addr != baddr) {
1441 		if (!KREAD(addr, &mem, sizeof (mem))) {
1442 			seterr("can't read vm_page");
1443 			return 0;
1444 		}
1445 		if ((long)mem.object == object && mem.offset == offset) {
1446 			*maddr = (long)mem.phys_addr;
1447 			return 1;
1448 		}
1449 		addr = (long)mem.hashq.next;
1450 	}
1451 	return 0;
1452 }
1453 #endif	/* NEWVM */
1454 
1455 #include <varargs.h>
1456 static char errbuf[_POSIX2_LINE_MAX];
1457 
1458 static void
1459 seterr(va_alist)
1460 	va_dcl
1461 {
1462 	char *fmt;
1463 	va_list ap;
1464 
1465 	va_start(ap);
1466 	fmt = va_arg(ap, char *);
1467 	(void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1468 #if DEBUG
1469 	(void) vfprintf(stderr, fmt, ap);
1470 #endif
1471 	va_end(ap);
1472 }
1473 
1474 static void
1475 setsyserr(va_alist)
1476 	va_dcl
1477 {
1478 	char *fmt, *cp;
1479 	va_list ap;
1480 	extern int errno;
1481 
1482 	va_start(ap);
1483 	fmt = va_arg(ap, char *);
1484 	(void) vsnprintf(errbuf, _POSIX2_LINE_MAX, fmt, ap);
1485 	for (cp=errbuf; *cp; cp++)
1486 		;
1487 	snprintf(cp, _POSIX2_LINE_MAX - (cp - errbuf), ": %s", strerror(errno));
1488 	va_end(ap);
1489 }
1490 
1491 char *
1492 kvm_geterr()
1493 {
1494 	return (errbuf);
1495 }
1496