1 /* $NetBSD: kvm_sparc64.c,v 1.6 2000/10/10 20:44:17 he Exp $ */ 2 3 /*- 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software developed by the Computer Systems 8 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract 9 * BG 91-66 and contributed to Berkeley. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 #if defined(LIBC_SCCS) && !defined(lint) 42 #if 0 43 static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93"; 44 #else 45 __RCSID("$NetBSD: kvm_sparc64.c,v 1.6 2000/10/10 20:44:17 he Exp $"); 46 #endif 47 #endif /* LIBC_SCCS and not lint */ 48 49 /* 50 * Sparc machine dependent routines for kvm. Hopefully, the forthcoming 51 * vm code will one day obsolete this module. 52 */ 53 54 #include <sys/param.h> 55 #include <sys/exec.h> 56 #include <sys/user.h> 57 #include <sys/proc.h> 58 #include <sys/stat.h> 59 #include <sys/core.h> 60 #include <sys/kcore.h> 61 #include <unistd.h> 62 #include <nlist.h> 63 #include <kvm.h> 64 65 #include <uvm/uvm_extern.h> 66 67 #include <machine/kcore.h> 68 69 #include <limits.h> 70 #include <db.h> 71 72 #include "kvm_private.h" 73 74 int _kvm_kvatop __P((kvm_t *, u_long, u_long *)); 75 76 void 77 _kvm_freevtop(kd) 78 kvm_t *kd; 79 { 80 if (kd->vmst != 0) { 81 _kvm_err(kd, kd->program, "_kvm_freevtop: internal error"); 82 kd->vmst = 0; 83 } 84 } 85 86 /* 87 * Prepare for translation of kernel virtual addresses into offsets 88 * into crash dump files. We use the MMU specific goop written at the 89 * front of the crash dump by pmap_dumpmmu(). 90 * 91 * We should read in and cache the ksegs here to speed up operations... 92 */ 93 int 94 _kvm_initvtop(kd) 95 kvm_t *kd; 96 { 97 kd->nbpg = 0x2000; 98 99 return (0); 100 } 101 102 /* 103 * Translate a kernel virtual address to a physical address using the 104 * mapping information in kd->vm. Returns the result in pa, and returns 105 * the number of bytes that are contiguously available from this 106 * physical address. This routine is used only for crashdumps. 107 */ 108 int 109 _kvm_kvatop(kd, va, pa) 110 kvm_t *kd; 111 u_long va; 112 u_long *pa; 113 { 114 cpu_kcore_hdr_t *cpup = kd->cpu_data; 115 u_long kernbase = cpup->kernbase; 116 uint64_t *pseg, *pdir, *ptbl; 117 int64_t data; 118 119 if (va < kernbase) 120 goto lose; 121 122 /* Handle the wired 4MB TTEs */ 123 if (va > cpup->ktextbase && va < (cpup->ktextbase + cpup->ktextsz)) { 124 u_long vaddr; 125 126 vaddr = va - cpup->ktextbase; 127 *pa = cpup->ktextp + vaddr; 128 return (cpup->ktextsz - vaddr); 129 } 130 131 if (va > cpup->kdatabase && va < (cpup->kdatabase + cpup->kdatasz)) { 132 u_long vaddr; 133 134 vaddr = va - cpup->kdatabase; 135 *pa = cpup->kdatap + vaddr; 136 return (cpup->kdatasz - vaddr); 137 } 138 139 140 /* 141 * Parse kernel page table. 142 */ 143 pseg = (uint64_t *)(u_long)cpup->segmapoffset; 144 if (pread(kd->pmfd, &pdir, sizeof(pdir), 145 _kvm_pa2off(kd, (u_long)&pseg[va_to_seg(va)])) 146 != sizeof(pdir)) { 147 _kvm_syserr(kd, 0, "could not read L1 PTE"); 148 goto lose; 149 } 150 151 if (!pdir) { 152 _kvm_err(kd, 0, "invalid L1 PTE"); 153 goto lose; 154 } 155 156 if (pread(kd->pmfd, &ptbl, sizeof(ptbl), 157 _kvm_pa2off(kd, (u_long)&pdir[va_to_dir(va)])) 158 != sizeof(ptbl)) { 159 _kvm_syserr(kd, 0, "could not read L2 PTE"); 160 goto lose; 161 } 162 163 if (!ptbl) { 164 _kvm_err(kd, 0, "invalid L2 PTE"); 165 goto lose; 166 } 167 168 if (pread(kd->pmfd, &data, sizeof(data), 169 _kvm_pa2off(kd, (u_long)&ptbl[va_to_pte(va)])) 170 != sizeof(data)) { 171 _kvm_syserr(kd, 0, "could not read TTE"); 172 goto lose; 173 } 174 175 if (data >= 0) { 176 _kvm_err(kd, 0, "invalid L2 TTE"); 177 goto lose; 178 } 179 180 /* 181 * Calculate page offsets and things. 182 * 183 * XXXX -- We could support multiple page sizes. 184 */ 185 va = va & (kd->nbpg - 1); 186 data &= TLB_PA_MASK; 187 *pa = data + va; 188 189 /* 190 * Parse and trnslate our TTE. 191 */ 192 193 return (kd->nbpg - va); 194 195 lose: 196 *pa = -1; 197 _kvm_err(kd, 0, "invalid address (%lx)", va); 198 return (0); 199 } 200 201 202 /* 203 * Translate a physical address to a file-offset in the crash-dump. 204 */ 205 off_t 206 _kvm_pa2off(kd, pa) 207 kvm_t *kd; 208 u_long pa; 209 { 210 cpu_kcore_hdr_t *cpup = kd->cpu_data; 211 phys_ram_seg_t *mp; 212 off_t off; 213 int nmem; 214 215 /* 216 * Layout of CPU segment: 217 * cpu_kcore_hdr_t; 218 * [alignment] 219 * phys_ram_seg_t[cpup->nmemseg]; 220 */ 221 mp = (phys_ram_seg_t *)((long)kd->cpu_data + cpup->memsegoffset); 222 off = 0; 223 224 /* Translate (sparse) pfnum to (packed) dump offset */ 225 for (nmem = cpup->nmemseg; --nmem >= 0; mp++) { 226 if (mp->start <= pa && pa < mp->start + mp->size) 227 break; 228 off += mp->size; 229 } 230 if (nmem < 0) { 231 _kvm_err(kd, 0, "invalid address (%lx)", pa); 232 return (-1); 233 } 234 235 return (kd->dump_off + off + pa - mp->start); 236 } 237 238 /* 239 * Machine-dependent initialization for ALL open kvm descriptors, 240 * not just those for a kernel crash dump. Some architectures 241 * have to deal with these NOT being constants! (i.e. m68k) 242 */ 243 int 244 _kvm_mdopen(kd) 245 kvm_t *kd; 246 { 247 u_long max_uva; 248 extern struct ps_strings *__ps_strings; 249 250 max_uva = (u_long) (__ps_strings + 1); 251 kd->usrstack = max_uva; 252 kd->max_uva = max_uva; 253 kd->min_uva = 0; 254 255 return (0); 256 } 257