1 /* $OpenBSD: kvm_alpha.c,v 1.14 2006/03/20 15:11:48 mickey Exp $ */ 2 /* $NetBSD: kvm_alpha.c,v 1.5 1996/10/01 21:12:05 cgd Exp $ */ 3 4 /* 5 * Copyright (c) 1994, 1995 Carnegie-Mellon University. 6 * All rights reserved. 7 * 8 * Author: Chris G. Demetriou 9 * 10 * Permission to use, copy, modify and distribute this software and 11 * its documentation is hereby granted, provided that both the copyright 12 * notice and this permission notice appear in all copies of the 13 * software, derivative works or modified versions, and any portions 14 * thereof, and that both notices appear in supporting documentation. 15 * 16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 19 * 20 * Carnegie Mellon requests users of this software to return to 21 * 22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 23 * School of Computer Science 24 * Carnegie Mellon University 25 * Pittsburgh PA 15213-3890 26 * 27 * any improvements or extensions that they make and grant Carnegie the 28 * rights to redistribute these changes. 29 */ 30 31 #define __KVM_ALPHA_PRIVATE /* see <machine/pte.h> */ 32 33 #include <sys/param.h> 34 #include <sys/user.h> 35 #include <sys/proc.h> 36 #include <sys/stat.h> 37 #include <sys/kcore.h> 38 #include <machine/kcore.h> 39 #include <unistd.h> 40 #include <stdlib.h> 41 #include <nlist.h> 42 #include <kvm.h> 43 44 #include <uvm/uvm_extern.h> 45 #include <machine/vmparam.h> 46 #include <machine/pmap.h> 47 48 #include <limits.h> 49 #include <db.h> 50 51 #include "kvm_private.h" 52 53 struct vmstate { 54 vsize_t page_shift; 55 }; 56 57 void 58 _kvm_freevtop(kvm_t *kd) 59 { 60 61 /* Not actually used for anything right now, but safe. */ 62 if (kd->vmst != NULL) { 63 free(kd->vmst); 64 kd->vmst = NULL; 65 } 66 } 67 68 int 69 _kvm_initvtop(kvm_t *kd) 70 { 71 cpu_kcore_hdr_t *cpu_kh; 72 struct vmstate *vm; 73 74 vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm)); 75 if (vm == NULL) 76 return (-1); 77 78 cpu_kh = kd->cpu_data; 79 80 /* Compute page_shift. */ 81 for (vm->page_shift = 0; (1L << vm->page_shift) < cpu_kh->page_size; 82 vm->page_shift++) 83 /* nothing */ ; 84 if ((1L << vm->page_shift) != cpu_kh->page_size) { 85 free(vm); 86 return (-1); 87 } 88 89 kd->vmst = vm; 90 return (0); 91 } 92 93 int 94 _kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa) 95 { 96 cpu_kcore_hdr_t *cpu_kh; 97 struct vmstate *vm; 98 int rv, page_off; 99 alpha_pt_entry_t pte; 100 off_t pteoff; 101 102 if (!kd->vmst) { 103 _kvm_err(kd, 0, "vatop called before initvtop"); 104 return (0); 105 } 106 107 if (ISALIVE(kd)) { 108 _kvm_err(kd, 0, "vatop called in live kernel!"); 109 return (0); 110 } 111 112 cpu_kh = kd->cpu_data; 113 vm = kd->vmst; 114 page_off = va & (cpu_kh->page_size - 1); 115 116 #ifndef PAGE_SHIFT 117 #define PAGE_SHIFT vm->page_shift 118 #endif 119 120 if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) { 121 /* 122 * Direct-mapped address: just convert it. 123 */ 124 125 *pa = ALPHA_K0SEG_TO_PHYS(va); 126 rv = cpu_kh->page_size - page_off; 127 } else if (va >= ALPHA_K1SEG_BASE && va <= ALPHA_K1SEG_END) { 128 /* 129 * Real kernel virtual address: do the translation. 130 */ 131 132 /* Find and read the L1 PTE. */ 133 pteoff = cpu_kh->lev1map_pa + 134 l1pte_index(va) * sizeof(alpha_pt_entry_t); 135 if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte), 136 (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) { 137 _kvm_syserr(kd, 0, "could not read L1 PTE"); 138 goto lose; 139 } 140 141 /* Find and read the L2 PTE. */ 142 if ((pte & ALPHA_PTE_VALID) == 0) { 143 _kvm_err(kd, 0, "invalid translation (invalid L1 PTE)"); 144 goto lose; 145 } 146 pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size + 147 l2pte_index(va) * sizeof(alpha_pt_entry_t); 148 if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte), 149 (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) { 150 _kvm_syserr(kd, 0, "could not read L2 PTE"); 151 goto lose; 152 } 153 154 /* Find and read the L3 PTE. */ 155 if ((pte & ALPHA_PTE_VALID) == 0) { 156 _kvm_err(kd, 0, "invalid translation (invalid L2 PTE)"); 157 goto lose; 158 } 159 pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size + 160 l3pte_index(va) * sizeof(alpha_pt_entry_t); 161 if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte), 162 (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) { 163 _kvm_syserr(kd, 0, "could not read L3 PTE"); 164 goto lose; 165 } 166 167 /* Fill in the PA. */ 168 if ((pte & ALPHA_PTE_VALID) == 0) { 169 _kvm_err(kd, 0, "invalid translation (invalid L3 PTE)"); 170 goto lose; 171 } 172 *pa = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size + page_off; 173 rv = cpu_kh->page_size - page_off; 174 } else { 175 /* 176 * Bogus address (not in KV space): punt. 177 */ 178 179 _kvm_err(kd, 0, "invalid kernel virtual address"); 180 lose: 181 *pa = -1; 182 rv = 0; 183 } 184 185 return (rv); 186 } 187 188 /* 189 * Translate a physical address to a file-offset in the crash-dump. 190 */ 191 off_t 192 _kvm_pa2off(kvm_t *kd, paddr_t pa) 193 { 194 cpu_kcore_hdr_t *cpu_kh; 195 phys_ram_seg_t *ramsegs; 196 off_t off; 197 int i; 198 199 cpu_kh = kd->cpu_data; 200 ramsegs = (phys_ram_seg_t *)((char *)cpu_kh + ALIGN(sizeof *cpu_kh)); 201 202 off = 0; 203 for (i = 0; i < cpu_kh->nmemsegs; i++) { 204 if (pa >= ramsegs[i].start && 205 (pa - ramsegs[i].start) < ramsegs[i].size) { 206 off += (pa - ramsegs[i].start); 207 break; 208 } 209 off += ramsegs[i].size; 210 } 211 return (kd->dump_off + off); 212 } 213