1 /* $OpenBSD: kvm_sparc64.c,v 1.7 2008/03/30 14:49:45 kettenis Exp $ */ 2 /* $NetBSD: kvm_sparc64.c,v 1.7 2001/08/05 03:33:15 matt Exp $ */ 3 4 /*- 5 * Copyright (c) 1992, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software developed by the Computer Systems 9 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract 10 * BG 91-66 and contributed to Berkeley. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 #include <sys/cdefs.h> 38 39 /* 40 * Sparc machine dependent routines for kvm. Hopefully, the forthcoming 41 * vm code will one day obsolete this module. 42 */ 43 44 #include <sys/param.h> 45 #include <sys/exec.h> 46 #include <sys/user.h> 47 #include <sys/proc.h> 48 #include <sys/stat.h> 49 #include <sys/core.h> 50 #include <sys/kcore.h> 51 #include <unistd.h> 52 #include <nlist.h> 53 #include <kvm.h> 54 55 #include <uvm/uvm_extern.h> 56 57 #include <machine/vmparam.h> 58 #include <machine/pmap.h> 59 #include <machine/kcore.h> 60 61 #include <limits.h> 62 #include <db.h> 63 64 #include "kvm_private.h" 65 66 /* 67 * UltraSPARC T1 & T2 implement only a 40-bit real address range, just 68 * like older UltraSPARC CPUs. 69 */ 70 #define TLB_PA_MASK SUN4U_TLB_PA_MASK 71 72 int _kvm_kvatop(kvm_t *, u_long, u_long *); 73 74 void 75 _kvm_freevtop(kvm_t *kd) 76 { 77 if (kd->vmst != NULL) { 78 _kvm_err(kd, kd->program, "_kvm_freevtop: internal error"); 79 kd->vmst = NULL; 80 } 81 } 82 83 /* 84 * Prepare for translation of kernel virtual addresses into offsets 85 * into crash dump files. We use the MMU specific goop written at the 86 * front of the crash dump by pmap_dumpmmu(). 87 * 88 * We should read in and cache the ksegs here to speed up operations... 89 */ 90 int 91 _kvm_initvtop(kvm_t *kd) 92 { 93 kd->nbpg = 0x2000; 94 95 return (0); 96 } 97 98 /* 99 * Translate a kernel virtual address to a physical address using the 100 * mapping information in kd->vm. Returns the result in pa, and returns 101 * the number of bytes that are contiguously available from this 102 * physical address. This routine is used only for crashdumps. 103 */ 104 int 105 _kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa) 106 { 107 cpu_kcore_hdr_t *cpup = kd->cpu_data; 108 u_long kernbase = cpup->kernbase; 109 uint64_t *pseg, *pdir, *ptbl; 110 int64_t data; 111 112 if (va < kernbase) 113 goto lose; 114 115 /* Handle the wired 4MB TTEs */ 116 if (va > cpup->ktextbase && va < (cpup->ktextbase + cpup->ktextsz)) { 117 u_long vaddr; 118 119 vaddr = va - cpup->ktextbase; 120 *pa = cpup->ktextp + vaddr; 121 return (cpup->ktextsz - vaddr); 122 } 123 124 if (va > cpup->kdatabase && va < (cpup->kdatabase + cpup->kdatasz)) { 125 u_long vaddr; 126 127 vaddr = va - cpup->kdatabase; 128 *pa = cpup->kdatap + vaddr; 129 return (cpup->kdatasz - vaddr); 130 } 131 132 133 /* 134 * Parse kernel page table. 135 */ 136 pseg = (uint64_t *)(u_long)cpup->segmapoffset; 137 if (pread(kd->pmfd, &pdir, sizeof(pdir), 138 _kvm_pa2off(kd, (u_long)&pseg[va_to_seg(va)])) != sizeof(pdir)) { 139 _kvm_syserr(kd, 0, "could not read L1 PTE"); 140 goto lose; 141 } 142 143 if (!pdir) { 144 _kvm_err(kd, 0, "invalid L1 PTE"); 145 goto lose; 146 } 147 148 if (pread(kd->pmfd, &ptbl, sizeof(ptbl), 149 _kvm_pa2off(kd, (u_long)&pdir[va_to_dir(va)])) != sizeof(ptbl)) { 150 _kvm_syserr(kd, 0, "could not read L2 PTE"); 151 goto lose; 152 } 153 154 if (!ptbl) { 155 _kvm_err(kd, 0, "invalid L2 PTE"); 156 goto lose; 157 } 158 159 if (pread(kd->pmfd, &data, sizeof(data), 160 _kvm_pa2off(kd, (u_long)&ptbl[va_to_pte(va)])) != sizeof(data)) { 161 _kvm_syserr(kd, 0, "could not read TTE"); 162 goto lose; 163 } 164 165 if (data >= 0) { 166 _kvm_err(kd, 0, "invalid L2 TTE"); 167 goto lose; 168 } 169 170 /* 171 * Calculate page offsets and things. 172 * 173 * XXXX -- We could support multiple page sizes. 174 */ 175 va = va & (kd->nbpg - 1); 176 data &= TLB_PA_MASK; 177 *pa = data + va; 178 179 /* 180 * Parse and translate our TTE. 181 */ 182 return (kd->nbpg - va); 183 184 lose: 185 *pa = -1; 186 _kvm_err(kd, 0, "invalid address (%lx)", va); 187 return (0); 188 } 189 190 191 /* 192 * Translate a physical address to a file-offset in the crash-dump. 193 */ 194 off_t 195 _kvm_pa2off(kvm_t *kd, paddr_t pa) 196 { 197 cpu_kcore_hdr_t *cpup = kd->cpu_data; 198 phys_ram_seg_t *mp; 199 off_t off; 200 int nmem; 201 202 /* 203 * Layout of CPU segment: 204 * cpu_kcore_hdr_t; 205 * [alignment] 206 * phys_ram_seg_t[cpup->nmemseg]; 207 */ 208 mp = (phys_ram_seg_t *)((long)kd->cpu_data + cpup->memsegoffset); 209 off = 0; 210 211 /* Translate (sparse) pfnum to (packed) dump offset */ 212 for (nmem = cpup->nmemseg; --nmem >= 0; mp++) { 213 if (mp->start <= pa && pa < mp->start + mp->size) 214 break; 215 off += mp->size; 216 } 217 if (nmem < 0) { 218 _kvm_err(kd, 0, "invalid address (%lx)", pa); 219 return (-1); 220 } 221 222 return (kd->dump_off + off + pa - mp->start); 223 } 224