xref: /openbsd-src/lib/libkvm/kvm.c (revision fb8aa7497fded39583f40e800732f9c046411717)
1 /*	$OpenBSD: kvm.c,v 1.61 2016/05/14 14:24:54 kettenis Exp $ */
2 /*	$NetBSD: kvm.c,v 1.43 1996/05/05 04:31:59 gwr Exp $	*/
3 
4 /*-
5  * Copyright (c) 1989, 1992, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software developed by the Computer Systems
9  * Engineering group at Lawrence Berkeley Laboratory under DARPA contract
10  * BG 91-66 and contributed to Berkeley.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  */
36 
37 #include <sys/param.h>	/* MAXCOMLEN MID_MACHINE */
38 #include <sys/proc.h>
39 #include <sys/ioctl.h>
40 #include <sys/stat.h>
41 #include <sys/sysctl.h>
42 
43 #include <sys/core.h>
44 #include <sys/exec.h>
45 #include <sys/kcore.h>
46 
47 #include <errno.h>
48 #include <ctype.h>
49 #include <db.h>
50 #include <fcntl.h>
51 #include <libgen.h>
52 #include <limits.h>
53 #include <nlist.h>
54 #include <paths.h>
55 #include <stdio.h>
56 #include <stdlib.h>
57 #include <string.h>
58 #include <unistd.h>
59 #include <kvm.h>
60 #include <stdarg.h>
61 
62 #include "kvm_private.h"
63 
64 extern int __fdnlist(int, struct nlist *);
65 
66 static int	kvm_dbopen(kvm_t *, const char *);
67 static int	kvm_opennamelist(kvm_t *, const char *);
68 static int	_kvm_get_header(kvm_t *);
69 static kvm_t	*_kvm_open(kvm_t *, const char *, const char *, const char *,
70 		     int, char *);
71 static int	clear_gap(kvm_t *, FILE *, int);
72 
73 char *
74 kvm_geterr(kvm_t *kd)
75 {
76 	return (kd->errbuf);
77 }
78 
79 /*
80  * Wrapper around pread.
81  */
82 ssize_t
83 _kvm_pread(kvm_t *kd, int fd, void *buf, size_t nbytes, off_t offset)
84 {
85 	ssize_t rval;
86 
87 	errno = 0;
88 	rval = pread(fd, buf, nbytes, offset);
89 	if (rval == -1 || errno != 0) {
90 		_kvm_syserr(kd, kd->program, "pread");
91 	}
92 	return (rval);
93 }
94 
95 /*
96  * Wrapper around pwrite.
97  */
98 ssize_t
99 _kvm_pwrite(kvm_t *kd, int fd, const void *buf, size_t nbytes, off_t offset)
100 {
101 	ssize_t rval;
102 
103 	errno = 0;
104 	rval = pwrite(fd, buf, nbytes, offset);
105 	if (rval == -1 || errno != 0) {
106 		_kvm_syserr(kd, kd->program, "pwrite");
107 	}
108 	return (rval);
109 }
110 
111 /*
112  * Report an error using printf style arguments.  "program" is kd->program
113  * on hard errors, and 0 on soft errors, so that under sun error emulation,
114  * only hard errors are printed out (otherwise, programs like gdb will
115  * generate tons of error messages when trying to access bogus pointers).
116  */
117 void
118 _kvm_err(kvm_t *kd, const char *program, const char *fmt, ...)
119 {
120 	va_list ap;
121 
122 	va_start(ap, fmt);
123 	if (program != NULL) {
124 		(void)fprintf(stderr, "%s: ", program);
125 		(void)vfprintf(stderr, fmt, ap);
126 		(void)fputc('\n', stderr);
127 	} else
128 		(void)vsnprintf(kd->errbuf,
129 		    sizeof(kd->errbuf), fmt, ap);
130 
131 	va_end(ap);
132 }
133 
134 void
135 _kvm_syserr(kvm_t *kd, const char *program, const char *fmt, ...)
136 {
137 	va_list ap;
138 	size_t n;
139 
140 	va_start(ap, fmt);
141 	if (program != NULL) {
142 		(void)fprintf(stderr, "%s: ", program);
143 		(void)vfprintf(stderr, fmt, ap);
144 		(void)fprintf(stderr, ": %s\n", strerror(errno));
145 	} else {
146 		char *cp = kd->errbuf;
147 
148 		(void)vsnprintf(cp, sizeof(kd->errbuf), fmt, ap);
149 		n = strlen(cp);
150 		(void)snprintf(&cp[n], sizeof(kd->errbuf) - n, ": %s",
151 		    strerror(errno));
152 	}
153 	va_end(ap);
154 }
155 
156 void *
157 _kvm_malloc(kvm_t *kd, size_t n)
158 {
159 	void *p;
160 
161 	if ((p = malloc(n)) == NULL)
162 		_kvm_err(kd, kd->program, "%s", strerror(errno));
163 	return (p);
164 }
165 
166 void *
167 _kvm_realloc(kvm_t *kd, void *p, size_t n)
168 {
169 	if ((p = realloc(p, n)) == NULL)
170 		_kvm_err(kd, kd->program, "%s", strerror(errno));
171 	return (p);
172 }
173 
174 static kvm_t *
175 _kvm_open(kvm_t *kd, const char *uf, const char *mf, const char *sf,
176     int flag, char *errout)
177 {
178 	struct stat st;
179 
180 	kd->db = 0;
181 	kd->pmfd = -1;
182 	kd->vmfd = -1;
183 	kd->swfd = -1;
184 	kd->nlfd = -1;
185 	kd->alive = 0;
186 	kd->filebase = NULL;
187 	kd->procbase = NULL;
188 	kd->nbpg = getpagesize();
189 	kd->swapspc = 0;
190 	kd->argspc = 0;
191 	kd->argbuf = 0;
192 	kd->argv = 0;
193 	kd->vmst = NULL;
194 	kd->vm_page_buckets = 0;
195 	kd->kcore_hdr = 0;
196 	kd->cpu_dsize = 0;
197 	kd->cpu_data = 0;
198 	kd->dump_off = 0;
199 
200 	if (flag & KVM_NO_FILES) {
201 		kd->alive = 1;
202 		return (kd);
203 	}
204 
205 	if (uf && strlen(uf) >= PATH_MAX) {
206 		_kvm_err(kd, kd->program, "exec file name too long");
207 		goto failed;
208 	}
209 	if (flag != O_RDONLY && flag != O_WRONLY && flag != O_RDWR) {
210 		_kvm_err(kd, kd->program, "bad flags arg");
211 		goto failed;
212 	}
213 	flag |= O_CLOEXEC;
214 
215 	if (mf == 0)
216 		mf = _PATH_MEM;
217 
218 	if ((kd->pmfd = open(mf, flag)) < 0) {
219 		_kvm_syserr(kd, kd->program, "%s", mf);
220 		goto failed;
221 	}
222 	if (fstat(kd->pmfd, &st) < 0) {
223 		_kvm_syserr(kd, kd->program, "%s", mf);
224 		goto failed;
225 	}
226 	if (S_ISCHR(st.st_mode)) {
227 		/*
228 		 * If this is a character special device, then check that
229 		 * it's /dev/mem.  If so, open kmem too.  (Maybe we should
230 		 * make it work for either /dev/mem or /dev/kmem -- in either
231 		 * case you're working with a live kernel.)
232 		 */
233 		if (strcmp(mf, _PATH_MEM) != 0) {	/* XXX */
234 			_kvm_err(kd, kd->program,
235 				 "%s: not physical memory device", mf);
236 			goto failed;
237 		}
238 		if ((kd->vmfd = open(_PATH_KMEM, flag)) < 0) {
239 			_kvm_syserr(kd, kd->program, "%s", _PATH_KMEM);
240 			goto failed;
241 		}
242 		kd->alive = 1;
243 		if (sf != NULL && (kd->swfd = open(sf, flag)) < 0) {
244 			_kvm_syserr(kd, kd->program, "%s", sf);
245 			goto failed;
246 		}
247 		/*
248 		 * Open kvm nlist database.  We only try to use
249 		 * the pre-built database if the namelist file name
250 		 * pointer is NULL.  If the database cannot or should
251 		 * not be opened, open the namelist argument so we
252 		 * revert to slow nlist() calls.
253 		 * If no file is specified, try opening _PATH_KSYMS and
254 		 * fall back to _PATH_UNIX.
255 		 */
256 		if (kvm_dbopen(kd, uf ? uf : _PATH_UNIX) == -1 &&
257 		    kvm_opennamelist(kd, uf))
258 			goto failed;
259 	} else {
260 		/*
261 		 * This is a crash dump.
262 		 * Initialize the virtual address translation machinery,
263 		 * but first setup the namelist fd.
264 		 * If no file is specified, try opening _PATH_KSYMS and
265 		 * fall back to _PATH_UNIX.
266 		 */
267 		if (kvm_opennamelist(kd, uf))
268 			goto failed;
269 
270 		/*
271 		 * If there is no valid core header, fail silently here.
272 		 * The address translations however will fail without
273 		 * header. Things can be made to run by calling
274 		 * kvm_dump_mkheader() before doing any translation.
275 		 */
276 		if (_kvm_get_header(kd) == 0) {
277 			if (_kvm_initvtop(kd) < 0)
278 				goto failed;
279 		}
280 	}
281 	return (kd);
282 failed:
283 	/*
284 	 * Copy out the error if doing sane error semantics.
285 	 */
286 	if (errout != 0)
287 		(void)strlcpy(errout, kd->errbuf, _POSIX2_LINE_MAX);
288 	(void)kvm_close(kd);
289 	return (0);
290 }
291 
292 static int
293 kvm_opennamelist(kvm_t *kd, const char *uf)
294 {
295 	int fd;
296 
297 	if (uf != NULL)
298 		fd = open(uf, O_RDONLY | O_CLOEXEC);
299 	else {
300 		fd = open(_PATH_KSYMS, O_RDONLY | O_CLOEXEC);
301 		uf = _PATH_UNIX;
302 		if (fd == -1)
303 			fd = open(uf, O_RDONLY | O_CLOEXEC);
304 	}
305 	if (fd == -1) {
306 		_kvm_syserr(kd, kd->program, "%s", uf);
307 		return (-1);
308 	}
309 
310 	kd->nlfd = fd;
311 	return (0);
312 }
313 
314 /*
315  * The kernel dump file (from savecore) contains:
316  *    kcore_hdr_t kcore_hdr;
317  *    kcore_seg_t cpu_hdr;
318  *    (opaque)    cpu_data; (size is cpu_hdr.c_size)
319  *    kcore_seg_t mem_hdr;
320  *    (memory)    mem_data; (size is mem_hdr.c_size)
321  *
322  * Note: khdr is padded to khdr.c_hdrsize;
323  * cpu_hdr and mem_hdr are padded to khdr.c_seghdrsize
324  */
325 static int
326 _kvm_get_header(kvm_t *kd)
327 {
328 	kcore_hdr_t	kcore_hdr;
329 	kcore_seg_t	cpu_hdr;
330 	kcore_seg_t	mem_hdr;
331 	size_t		offset;
332 	ssize_t		sz;
333 
334 	/*
335 	 * Read the kcore_hdr_t
336 	 */
337 	sz = _kvm_pread(kd, kd->pmfd, &kcore_hdr, sizeof(kcore_hdr), (off_t)0);
338 	if (sz != sizeof(kcore_hdr)) {
339 		return (-1);
340 	}
341 
342 	/*
343 	 * Currently, we only support dump-files made by the current
344 	 * architecture...
345 	 */
346 	if ((CORE_GETMAGIC(kcore_hdr) != KCORE_MAGIC) ||
347 	    (CORE_GETMID(kcore_hdr) != MID_MACHINE))
348 		return (-1);
349 
350 	/*
351 	 * Currently, we only support exactly 2 segments: cpu-segment
352 	 * and data-segment in exactly that order.
353 	 */
354 	if (kcore_hdr.c_nseg != 2)
355 		return (-1);
356 
357 	/*
358 	 * Save away the kcore_hdr.  All errors after this
359 	 * should do a to "goto fail" to deallocate things.
360 	 */
361 	kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr));
362 	if (kd->kcore_hdr == NULL)
363 		goto fail;
364 	memcpy(kd->kcore_hdr, &kcore_hdr, sizeof(kcore_hdr));
365 	offset = kcore_hdr.c_hdrsize;
366 
367 	/*
368 	 * Read the CPU segment header
369 	 */
370 	sz = _kvm_pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)offset);
371 	if (sz != sizeof(cpu_hdr)) {
372 		goto fail;
373 	}
374 
375 	if ((CORE_GETMAGIC(cpu_hdr) != KCORESEG_MAGIC) ||
376 	    (CORE_GETFLAG(cpu_hdr) != CORE_CPU))
377 		goto fail;
378 	offset += kcore_hdr.c_seghdrsize;
379 
380 	/*
381 	 * Read the CPU segment DATA.
382 	 */
383 	kd->cpu_dsize = cpu_hdr.c_size;
384 	kd->cpu_data = _kvm_malloc(kd, (size_t)cpu_hdr.c_size);
385 	if (kd->cpu_data == NULL)
386 		goto fail;
387 
388 	sz = _kvm_pread(kd, kd->pmfd, kd->cpu_data, (size_t)cpu_hdr.c_size,
389 	    (off_t)offset);
390 	if (sz != (size_t)cpu_hdr.c_size) {
391 		goto fail;
392 	}
393 
394 	offset += cpu_hdr.c_size;
395 
396 	/*
397 	 * Read the next segment header: data segment
398 	 */
399 	sz = _kvm_pread(kd, kd->pmfd, &mem_hdr, sizeof(mem_hdr), (off_t)offset);
400 	if (sz != sizeof(mem_hdr)) {
401 		goto fail;
402 	}
403 
404 	offset += kcore_hdr.c_seghdrsize;
405 
406 	if ((CORE_GETMAGIC(mem_hdr) != KCORESEG_MAGIC) ||
407 	    (CORE_GETFLAG(mem_hdr) != CORE_DATA))
408 		goto fail;
409 
410 	kd->dump_off = offset;
411 	return (0);
412 
413 fail:
414 	free(kd->kcore_hdr);
415 	kd->kcore_hdr = NULL;
416 	if (kd->cpu_data != NULL) {
417 		free(kd->cpu_data);
418 		kd->cpu_data = NULL;
419 		kd->cpu_dsize = 0;
420 	}
421 
422 	return (-1);
423 }
424 
425 /*
426  * The format while on the dump device is: (new format)
427  *    kcore_seg_t cpu_hdr;
428  *    (opaque)    cpu_data; (size is cpu_hdr.c_size)
429  *    kcore_seg_t mem_hdr;
430  *    (memory)    mem_data; (size is mem_hdr.c_size)
431  */
432 int
433 kvm_dump_mkheader(kvm_t *kd, off_t dump_off)
434 {
435 	kcore_seg_t	cpu_hdr;
436 	int	hdr_size;
437 	ssize_t sz;
438 
439 	if (kd->kcore_hdr != NULL) {
440 	    _kvm_err(kd, kd->program, "already has a dump header");
441 	    return (-1);
442 	}
443 	if (ISALIVE(kd)) {
444 		_kvm_err(kd, kd->program, "don't use on live kernel");
445 		return (-1);
446 	}
447 
448 	/*
449 	 * Validate new format crash dump
450 	 */
451 	sz = _kvm_pread(kd, kd->pmfd, &cpu_hdr, sizeof(cpu_hdr), (off_t)dump_off);
452 	if (sz != sizeof(cpu_hdr)) {
453 		return (-1);
454 	}
455 	if ((CORE_GETMAGIC(cpu_hdr) != KCORE_MAGIC)
456 		|| (CORE_GETMID(cpu_hdr) != MID_MACHINE)) {
457 		_kvm_err(kd, 0, "invalid magic in cpu_hdr");
458 		return (-1);
459 	}
460 	hdr_size = ALIGN(sizeof(cpu_hdr));
461 
462 	/*
463 	 * Read the CPU segment.
464 	 */
465 	kd->cpu_dsize = cpu_hdr.c_size;
466 	kd->cpu_data = _kvm_malloc(kd, kd->cpu_dsize);
467 	if (kd->cpu_data == NULL)
468 		goto fail;
469 
470 	sz = _kvm_pread(kd, kd->pmfd, kd->cpu_data, (size_t)cpu_hdr.c_size,
471 	    (off_t)dump_off+hdr_size);
472 	if (sz != (ssize_t)cpu_hdr.c_size) {
473 		_kvm_err(kd, 0, "invalid size in cpu_hdr");
474 		goto fail;
475 	}
476 	hdr_size += kd->cpu_dsize;
477 
478 	/*
479 	 * Leave phys mem pointer at beginning of memory data
480 	 */
481 	kd->dump_off = dump_off + hdr_size;
482 	errno = 0;
483 	if (lseek(kd->pmfd, kd->dump_off, SEEK_SET) != kd->dump_off && errno != 0) {
484 		_kvm_err(kd, 0, "invalid dump offset - lseek");
485 		goto fail;
486 	}
487 
488 	/*
489 	 * Create a kcore_hdr.
490 	 */
491 	kd->kcore_hdr = _kvm_malloc(kd, sizeof(kcore_hdr_t));
492 	if (kd->kcore_hdr == NULL)
493 		goto fail;
494 
495 	kd->kcore_hdr->c_hdrsize    = ALIGN(sizeof(kcore_hdr_t));
496 	kd->kcore_hdr->c_seghdrsize = ALIGN(sizeof(kcore_seg_t));
497 	kd->kcore_hdr->c_nseg       = 2;
498 	CORE_SETMAGIC(*(kd->kcore_hdr), KCORE_MAGIC, MID_MACHINE,0);
499 
500 	/*
501 	 * Now that we have a valid header, enable translations.
502 	 */
503 	if (_kvm_initvtop(kd) == 0)
504 		/* Success */
505 		return (hdr_size);
506 
507 fail:
508 	free(kd->kcore_hdr);
509 	kd->kcore_hdr = NULL;
510 	if (kd->cpu_data != NULL) {
511 		free(kd->cpu_data);
512 		kd->cpu_data = NULL;
513 		kd->cpu_dsize = 0;
514 	}
515 	return (-1);
516 }
517 
518 static int
519 clear_gap(kvm_t *kd, FILE *fp, int size)
520 {
521 	if (size <= 0) /* XXX - < 0 should never happen */
522 		return (0);
523 	while (size-- > 0) {
524 		if (fputc(0, fp) == EOF) {
525 			_kvm_syserr(kd, kd->program, "clear_gap");
526 			return (-1);
527 		}
528 	}
529 	return (0);
530 }
531 
532 /*
533  * Write the dump header info to 'fp'. Note that we can't use fseek(3) here
534  * because 'fp' might be a file pointer obtained by zopen().
535  */
536 int
537 kvm_dump_wrtheader(kvm_t *kd, FILE *fp, int dumpsize)
538 {
539 	kcore_seg_t	seghdr;
540 	long		offset;
541 	int		gap;
542 
543 	if (kd->kcore_hdr == NULL || kd->cpu_data == NULL) {
544 		_kvm_err(kd, kd->program, "no valid dump header(s)");
545 		return (-1);
546 	}
547 
548 	/*
549 	 * Write the generic header
550 	 */
551 	offset = 0;
552 	if (fwrite(kd->kcore_hdr, sizeof(kcore_hdr_t), 1, fp) < 1) {
553 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
554 		return (-1);
555 	}
556 	offset += kd->kcore_hdr->c_hdrsize;
557 	gap     = kd->kcore_hdr->c_hdrsize - sizeof(kcore_hdr_t);
558 	if (clear_gap(kd, fp, gap) == -1)
559 		return (-1);
560 
561 	/*
562 	 * Write the cpu header
563 	 */
564 	CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_CPU);
565 	seghdr.c_size = (u_long)ALIGN(kd->cpu_dsize);
566 	if (fwrite(&seghdr, sizeof(seghdr), 1, fp) < 1) {
567 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
568 		return (-1);
569 	}
570 	offset += kd->kcore_hdr->c_seghdrsize;
571 	gap     = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
572 	if (clear_gap(kd, fp, gap) == -1)
573 		return (-1);
574 
575 	if (fwrite(kd->cpu_data, kd->cpu_dsize, 1, fp) < 1) {
576 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
577 		return (-1);
578 	}
579 	offset += seghdr.c_size;
580 	gap     = seghdr.c_size - kd->cpu_dsize;
581 	if (clear_gap(kd, fp, gap) == -1)
582 		return (-1);
583 
584 	/*
585 	 * Write the actual dump data segment header
586 	 */
587 	CORE_SETMAGIC(seghdr, KCORESEG_MAGIC, 0, CORE_DATA);
588 	seghdr.c_size = dumpsize;
589 	if (fwrite(&seghdr, sizeof(seghdr), 1, fp) < 1) {
590 		_kvm_syserr(kd, kd->program, "kvm_dump_wrtheader");
591 		return (-1);
592 	}
593 	offset += kd->kcore_hdr->c_seghdrsize;
594 	gap     = kd->kcore_hdr->c_seghdrsize - sizeof(seghdr);
595 	if (clear_gap(kd, fp, gap) == -1)
596 		return (-1);
597 
598 	return (offset);
599 }
600 
601 kvm_t *
602 kvm_openfiles(const char *uf, const char *mf, const char *sf,
603     int flag, char *errout)
604 {
605 	kvm_t *kd;
606 
607 	if ((kd = malloc(sizeof(*kd))) == NULL) {
608 		(void)strlcpy(errout, strerror(errno), _POSIX2_LINE_MAX);
609 		return (0);
610 	}
611 	kd->program = 0;
612 	return (_kvm_open(kd, uf, mf, sf, flag, errout));
613 }
614 
615 kvm_t *
616 kvm_open(const char *uf, const char *mf, const char *sf, int flag,
617     const char *program)
618 {
619 	kvm_t *kd;
620 
621 	if ((kd = malloc(sizeof(*kd))) == NULL && program != NULL) {
622 		(void)fprintf(stderr, "%s: %s\n", program, strerror(errno));
623 		return (0);
624 	}
625 	kd->program = program;
626 	return (_kvm_open(kd, uf, mf, sf, flag, NULL));
627 }
628 
629 int
630 kvm_close(kvm_t *kd)
631 {
632 	int error = 0;
633 
634 	if (kd->pmfd >= 0)
635 		error |= close(kd->pmfd);
636 	if (kd->vmfd >= 0)
637 		error |= close(kd->vmfd);
638 	kd->alive = 0;
639 	if (kd->nlfd >= 0)
640 		error |= close(kd->nlfd);
641 	if (kd->swfd >= 0)
642 		error |= close(kd->swfd);
643 	if (kd->db != 0)
644 		error |= (kd->db->close)(kd->db);
645 	if (kd->vmst)
646 		_kvm_freevtop(kd);
647 	kd->cpu_dsize = 0;
648 	if (kd->cpu_data != NULL)
649 		free((void *)kd->cpu_data);
650 	if (kd->kcore_hdr != NULL)
651 		free((void *)kd->kcore_hdr);
652 	free(kd->filebase);
653 	free(kd->procbase);
654 	if (kd->swapspc != 0)
655 		free((void *)kd->swapspc);
656 	if (kd->argspc != 0)
657 		free((void *)kd->argspc);
658 	if (kd->argbuf != 0)
659 		free((void *)kd->argbuf);
660 	if (kd->argv != 0)
661 		free((void *)kd->argv);
662 	free((void *)kd);
663 
664 	return (error);
665 }
666 
667 /*
668  * Set up state necessary to do queries on the kernel namelist
669  * data base.  If the data base is out-of-data/incompatible with
670  * given executable, set up things so we revert to standard nlist call.
671  * Only called for live kernels.  Return 0 on success, -1 on failure.
672  */
673 static int
674 kvm_dbopen(kvm_t *kd, const char *uf)
675 {
676 	char dbversion[_POSIX2_LINE_MAX], kversion[_POSIX2_LINE_MAX];
677 	char dbname[PATH_MAX];
678 	struct nlist nitem;
679 	size_t dbversionlen;
680 	DBT rec;
681 
682 	uf = basename(uf);
683 
684 	(void)snprintf(dbname, sizeof(dbname), "%skvm_%s.db", _PATH_VARDB, uf);
685 	kd->db = dbopen(dbname, O_RDONLY, 0, DB_HASH, NULL);
686 	if (kd->db == NULL) {
687 		switch (errno) {
688 		case ENOENT:
689 			/* No kvm_bsd.db, fall back to /bsd silently */
690 			break;
691 		case EFTYPE:
692 			_kvm_err(kd, kd->program,
693 			    "file %s is incorrectly formatted", dbname);
694 			break;
695 		case EINVAL:
696 			_kvm_err(kd, kd->program,
697 			    "invalid argument to dbopen()");
698 			break;
699 		default:
700 			_kvm_err(kd, kd->program, "unknown dbopen() error");
701 			break;
702 		}
703 		return (-1);
704 	}
705 
706 	/*
707 	 * read version out of database
708 	 */
709 	rec.data = VRS_KEY;
710 	rec.size = sizeof(VRS_KEY) - 1;
711 	if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
712 		goto close;
713 	if (rec.data == 0 || rec.size > sizeof(dbversion))
714 		goto close;
715 
716 	bcopy(rec.data, dbversion, rec.size);
717 	dbversionlen = rec.size;
718 
719 	/*
720 	 * Read version string from kernel memory.
721 	 * Since we are dealing with a live kernel, we can call kvm_read()
722 	 * at this point.
723 	 */
724 	rec.data = VRS_SYM;
725 	rec.size = sizeof(VRS_SYM) - 1;
726 	if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
727 		goto close;
728 	if (rec.data == 0 || rec.size != sizeof(struct nlist))
729 		goto close;
730 	bcopy(rec.data, &nitem, sizeof(nitem));
731 	if (kvm_read(kd, (u_long)nitem.n_value, kversion, dbversionlen) !=
732 	    dbversionlen)
733 		goto close;
734 	/*
735 	 * If they match, we win - otherwise clear out kd->db so
736 	 * we revert to slow nlist().
737 	 */
738 	if (bcmp(dbversion, kversion, dbversionlen) == 0)
739 		return (0);
740 close:
741 	(void)(kd->db->close)(kd->db);
742 	kd->db = 0;
743 
744 	return (-1);
745 }
746 
747 int
748 kvm_nlist(kvm_t *kd, struct nlist *nl)
749 {
750 	struct nlist *p;
751 	int nvalid, rv;
752 
753 	/*
754 	 * If we can't use the data base, revert to the
755 	 * slow library call.
756 	 */
757 	if (kd->db == 0) {
758 		rv = __fdnlist(kd->nlfd, nl);
759 		if (rv == -1)
760 			_kvm_err(kd, 0, "bad namelist");
761 		return (rv);
762 	}
763 
764 	/*
765 	 * We can use the kvm data base.  Go through each nlist entry
766 	 * and look it up with a db query.
767 	 */
768 	nvalid = 0;
769 	for (p = nl; p->n_name && p->n_name[0]; ++p) {
770 		size_t len;
771 		DBT rec;
772 
773 		if ((len = strlen(p->n_name)) > 4096) {
774 			/* sanity */
775 			_kvm_err(kd, kd->program, "symbol too large");
776 			return (-1);
777 		}
778 		rec.data = p->n_name;
779 		rec.size = len;
780 
781 		/*
782 		 * Make sure that n_value = 0 when the symbol isn't found
783 		 */
784 		p->n_value = 0;
785 
786 		if ((kd->db->get)(kd->db, (DBT *)&rec, (DBT *)&rec, 0))
787 			continue;
788 		if (rec.data == 0 || rec.size != sizeof(struct nlist))
789 			continue;
790 		++nvalid;
791 		/*
792 		 * Avoid alignment issues.
793 		 */
794 		bcopy(&((struct nlist *)rec.data)->n_type,
795 		    &p->n_type, sizeof(p->n_type));
796 		bcopy(&((struct nlist *)rec.data)->n_value,
797 		    &p->n_value, sizeof(p->n_value));
798 	}
799 	/*
800 	 * Return the number of entries that weren't found.
801 	 */
802 	return ((p - nl) - nvalid);
803 }
804 
805 int
806 kvm_dump_inval(kvm_t *kd)
807 {
808 	struct nlist	nl[2];
809 	u_long		x;
810 	paddr_t		pa;
811 
812 	if (ISALIVE(kd)) {
813 		_kvm_err(kd, kd->program, "clearing dump on live kernel");
814 		return (-1);
815 	}
816 	nl[0].n_name = "_dumpmag";
817 	nl[1].n_name = NULL;
818 
819 	if (kvm_nlist(kd, nl) == -1) {
820 		_kvm_err(kd, 0, "bad namelist");
821 		return (-1);
822 	}
823 
824 	if (nl[0].n_value == 0) {
825 		_kvm_err(kd, nl[0].n_name, "not in name list");
826 		return (-1);
827 	}
828 
829 	if (_kvm_kvatop(kd, (u_long)nl[0].n_value, &pa) == 0)
830 		return (-1);
831 
832 	x = 0;
833 	if (_kvm_pwrite(kd, kd->pmfd, &x, sizeof(x),
834 	    (off_t)_kvm_pa2off(kd, pa)) != sizeof(x)) {
835 		_kvm_err(kd, 0, "cannot invalidate dump");
836 		return (-1);
837 	}
838 	return (0);
839 }
840 
841 ssize_t
842 kvm_read(kvm_t *kd, u_long kva, void *buf, size_t len)
843 {
844 	ssize_t cc;
845 	void *cp;
846 
847 	if (ISALIVE(kd)) {
848 		/*
849 		 * We're using /dev/kmem.  Just read straight from the
850 		 * device and let the active kernel do the address translation.
851 		 */
852 		cc = _kvm_pread(kd, kd->vmfd, buf, len, (off_t)kva);
853 		if (cc == -1) {
854 			_kvm_err(kd, 0, "invalid address (%lx)", kva);
855 			return (-1);
856 		} else if (cc < len)
857 			_kvm_err(kd, kd->program, "short read");
858 		return (cc);
859 	} else {
860 		if ((kd->kcore_hdr == NULL) || (kd->cpu_data == NULL)) {
861 			_kvm_err(kd, kd->program, "no valid dump header");
862 			return (-1);
863 		}
864 		cp = buf;
865 		while (len > 0) {
866 			paddr_t	pa;
867 
868 			/* In case of error, _kvm_kvatop sets the err string */
869 			cc = _kvm_kvatop(kd, kva, &pa);
870 			if (cc == 0)
871 				return (-1);
872 			if (cc > len)
873 				cc = len;
874 			cc = _kvm_pread(kd, kd->pmfd, cp, (size_t)cc,
875 			    (off_t)_kvm_pa2off(kd, pa));
876 			if (cc == -1) {
877 				_kvm_syserr(kd, 0, _PATH_MEM);
878 				break;
879 			}
880 			/*
881 			 * If kvm_kvatop returns a bogus value or our core
882 			 * file is truncated, we might wind up seeking beyond
883 			 * the end of the core file in which case the read will
884 			 * return 0 (EOF).
885 			 */
886 			if (cc == 0)
887 				break;
888 			cp = (char *)cp + cc;
889 			kva += cc;
890 			len -= cc;
891 		}
892 		return ((char *)cp - (char *)buf);
893 	}
894 	/* NOTREACHED */
895 }
896 
897 ssize_t
898 kvm_write(kvm_t *kd, u_long kva, const void *buf, size_t len)
899 {
900 	int cc;
901 
902 	if (ISALIVE(kd)) {
903 		/*
904 		 * Just like kvm_read, only we write.
905 		 */
906 		cc = _kvm_pwrite(kd, kd->vmfd, buf, len, (off_t)kva);
907 		if (cc == -1) {
908 			_kvm_err(kd, 0, "invalid address (%lx)", kva);
909 			return (-1);
910 		} else if (cc < len)
911 			_kvm_err(kd, kd->program, "short write");
912 		return (cc);
913 	} else {
914 		_kvm_err(kd, kd->program,
915 		    "kvm_write not implemented for dead kernels");
916 		return (-1);
917 	}
918 	/* NOTREACHED */
919 }
920