1 /* $NetBSD: kvm_sparc.c,v 1.18 1998/06/30 20:29:40 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software developed by the Computer Systems 8 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract 9 * BG 91-66 and contributed to Berkeley. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 */ 39 40 #include <sys/cdefs.h> 41 #if defined(LIBC_SCCS) && !defined(lint) 42 #if 0 43 static char sccsid[] = "@(#)kvm_sparc.c 8.1 (Berkeley) 6/4/93"; 44 #else 45 __RCSID("$NetBSD: kvm_sparc.c,v 1.18 1998/06/30 20:29:40 thorpej Exp $"); 46 #endif 47 #endif /* LIBC_SCCS and not lint */ 48 49 /* 50 * Sparc machine dependent routines for kvm. Hopefully, the forthcoming 51 * vm code will one day obsolete this module. 52 */ 53 54 #include <sys/param.h> 55 #include <sys/exec.h> 56 #include <sys/user.h> 57 #include <sys/proc.h> 58 #include <sys/stat.h> 59 #include <sys/core.h> 60 #include <sys/kcore.h> 61 #include <unistd.h> 62 #include <nlist.h> 63 #include <kvm.h> 64 65 #include <vm/vm.h> 66 #include <vm/vm_param.h> 67 #include <machine/kcore.h> 68 69 #include <limits.h> 70 #include <db.h> 71 72 #include "kvm_private.h" 73 74 75 static int cputyp = -1; 76 static int pgshift; 77 static int nptesg; /* [sun4/sun4c] only */ 78 79 #define VA_VPG(va) ((cputyp == CPU_SUN4C || cputyp == CPU_SUN4M) \ 80 ? VA_SUN4C_VPG(va) \ 81 : VA_SUN4_VPG(va)) 82 83 #define VA_OFF(va) (va & (kd->nbpg - 1)) 84 85 int _kvm_kvatop44c __P((kvm_t *, u_long, u_long *)); 86 int _kvm_kvatop4m __P((kvm_t *, u_long, u_long *)); 87 88 void 89 _kvm_freevtop(kd) 90 kvm_t *kd; 91 { 92 if (kd->vmst != 0) { 93 _kvm_err(kd, kd->program, "_kvm_freevtop: internal error"); 94 kd->vmst = 0; 95 } 96 } 97 98 /* 99 * Prepare for translation of kernel virtual addresses into offsets 100 * into crash dump files. We use the MMU specific goop written at the 101 * front of the crash dump by pmap_dumpmmu(). 102 */ 103 int 104 _kvm_initvtop(kd) 105 kvm_t *kd; 106 { 107 cpu_kcore_hdr_t *cpup = kd->cpu_data; 108 109 switch (cputyp = cpup->cputype) { 110 case CPU_SUN4: 111 kd->nbpg = 8196; 112 pgshift = 13; 113 break; 114 case CPU_SUN4C: 115 case CPU_SUN4M: 116 kd->nbpg = 4096; 117 pgshift = 12; 118 break; 119 default: 120 _kvm_err(kd, kd->program, "Unsupported CPU type"); 121 return (-1); 122 } 123 nptesg = NBPSG / kd->nbpg; 124 return (0); 125 } 126 127 /* 128 * Translate a kernel virtual address to a physical address using the 129 * mapping information in kd->vm. Returns the result in pa, and returns 130 * the number of bytes that are contiguously available from this 131 * physical address. This routine is used only for crashdumps. 132 */ 133 int 134 _kvm_kvatop(kd, va, pa) 135 kvm_t *kd; 136 u_long va; 137 u_long *pa; 138 { 139 if (cputyp == -1) 140 if (_kvm_initvtop(kd) != 0) 141 return (-1); 142 143 return ((cputyp == CPU_SUN4M) 144 ? _kvm_kvatop4m(kd, va, pa) 145 : _kvm_kvatop44c(kd, va, pa)); 146 } 147 148 /* 149 * (note: sun4 3-level MMU not yet supported) 150 */ 151 int 152 _kvm_kvatop44c(kd, va, pa) 153 kvm_t *kd; 154 u_long va; 155 u_long *pa; 156 { 157 int vr, vs, pte; 158 cpu_kcore_hdr_t *cpup = kd->cpu_data; 159 struct segmap *sp, *segmaps; 160 int *ptes; 161 int nkreg, nureg; 162 u_long kernbase = cpup->kernbase; 163 164 if (va < kernbase) 165 goto err; 166 167 /* 168 * Layout of CPU segment: 169 * cpu_kcore_hdr_t; 170 * [alignment] 171 * phys_ram_seg_t[cpup->nmemseg]; 172 * segmap[cpup->nsegmap]; 173 * ptes[cpup->npmegs]; 174 */ 175 segmaps = (struct segmap *)((long)kd->cpu_data + cpup->segmapoffset); 176 ptes = (int *)((int)kd->cpu_data + cpup->pmegoffset); 177 nkreg = ((int)((-(unsigned)kernbase) / NBPRG)); 178 nureg = 256 - nkreg; 179 180 vr = VA_VREG(va); 181 vs = VA_VSEG(va); 182 183 sp = &segmaps[(vr-nureg)*NSEGRG + vs]; 184 if (sp->sg_npte == 0) 185 goto err; 186 if (sp->sg_pmeg == cpup->npmeg - 1) /* =seginval */ 187 goto err; 188 pte = ptes[sp->sg_pmeg * nptesg + VA_VPG(va)]; 189 if ((pte & PG_V) != 0) { 190 long p, off = VA_OFF(va); 191 192 p = (pte & PG_PFNUM) << pgshift; 193 *pa = p + off; 194 return (kd->nbpg - off); 195 } 196 err: 197 _kvm_err(kd, 0, "invalid address (%x)", va); 198 return (0); 199 } 200 201 int 202 _kvm_kvatop4m(kd, va, pa) 203 kvm_t *kd; 204 u_long va; 205 u_long *pa; 206 { 207 cpu_kcore_hdr_t *cpup = kd->cpu_data; 208 int vr, vs; 209 int pte; 210 off_t foff; 211 struct segmap *sp, *segmaps; 212 int nkreg, nureg; 213 u_long kernbase = cpup->kernbase; 214 215 if (va < kernbase) 216 goto err; 217 218 /* 219 * Layout of CPU segment: 220 * cpu_kcore_hdr_t; 221 * [alignment] 222 * phys_ram_seg_t[cpup->nmemseg]; 223 * segmap[cpup->nsegmap]; 224 */ 225 segmaps = (struct segmap *)((long)kd->cpu_data + cpup->segmapoffset); 226 nkreg = ((int)((-(unsigned)kernbase) / NBPRG)); 227 nureg = 256 - nkreg; 228 229 vr = VA_VREG(va); 230 vs = VA_VSEG(va); 231 232 sp = &segmaps[(vr-nureg)*NSEGRG + vs]; 233 if (sp->sg_npte == 0) 234 goto err; 235 236 /* XXX - assume page tables in initial kernel DATA or BSS. */ 237 foff = _kvm_pa2off(kd, (u_long)&sp->sg_pte[VA_VPG(va)] - kernbase); 238 if (foff == (off_t)-1) 239 return (0); 240 241 if (pread(kd->pmfd, &pte, sizeof(pte), foff) != sizeof(pte)) { 242 _kvm_syserr(kd, kd->program, "cannot read pte for %x", va); 243 return (0); 244 } 245 246 if ((pte & SRMMU_TETYPE) == SRMMU_TEPTE) { 247 long p, off = VA_OFF(va); 248 249 p = (pte & SRMMU_PPNMASK) << SRMMU_PPNPASHIFT; 250 *pa = p + off; 251 return (kd->nbpg - off); 252 } 253 err: 254 _kvm_err(kd, 0, "invalid address (%x)", va); 255 return (0); 256 } 257 258 /* 259 * Translate a physical address to a file-offset in the crash-dump. 260 */ 261 off_t 262 _kvm_pa2off(kd, pa) 263 kvm_t *kd; 264 u_long pa; 265 { 266 cpu_kcore_hdr_t *cpup = kd->cpu_data; 267 phys_ram_seg_t *mp; 268 off_t off; 269 int nmem; 270 271 /* 272 * Layout of CPU segment: 273 * cpu_kcore_hdr_t; 274 * [alignment] 275 * phys_ram_seg_t[cpup->nmemseg]; 276 */ 277 mp = (phys_ram_seg_t *)((int)kd->cpu_data + cpup->memsegoffset); 278 off = 0; 279 280 /* Translate (sparse) pfnum to (packed) dump offset */ 281 for (nmem = cpup->nmemseg; --nmem >= 0; mp++) { 282 if (mp->start <= pa && pa < mp->start + mp->size) 283 break; 284 off += mp->size; 285 } 286 if (nmem < 0) { 287 _kvm_err(kd, 0, "invalid address (%x)", pa); 288 return (-1); 289 } 290 291 return (kd->dump_off + off + pa - mp->start); 292 } 293 294 /* 295 * Machine-dependent initialization for ALL open kvm descriptors, 296 * not just those for a kernel crash dump. Some architectures 297 * have to deal with these NOT being constants! (i.e. m68k) 298 */ 299 int 300 _kvm_mdopen(kd) 301 kvm_t *kd; 302 { 303 u_long max_uva; 304 extern struct ps_strings *__ps_strings; 305 306 max_uva = (u_long) (__ps_strings + 1); 307 kd->usrstack = max_uva; 308 kd->max_uva = max_uva; 309 kd->min_uva = 0; 310 311 return (0); 312 } 313