1 /* $OpenBSD: kvm_sparc64.c,v 1.11 2021/10/06 00:42:47 deraadt Exp $ */ 2 /* $NetBSD: kvm_sparc64.c,v 1.7 2001/08/05 03:33:15 matt Exp $ */ 3 4 /*- 5 * Copyright (c) 1992, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software developed by the Computer Systems 9 * Engineering group at Lawrence Berkeley Laboratory under DARPA contract 10 * BG 91-66 and contributed to Berkeley. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 */ 36 37 /* 38 * Sparc machine dependent routines for kvm. Hopefully, the forthcoming 39 * vm code will one day obsolete this module. 40 */ 41 42 #include <sys/param.h> /* MAXCOMLEN PAGE_SIZE */ 43 #include <sys/types.h> 44 #include <sys/signal.h> 45 #include <sys/exec.h> 46 #include <sys/proc.h> 47 #include <sys/stat.h> 48 #include <sys/core.h> 49 #include <sys/kcore.h> 50 #include <unistd.h> 51 #include <nlist.h> 52 #include <kvm.h> 53 54 #include <uvm/uvm_extern.h> 55 56 #include <machine/vmparam.h> 57 #include <machine/pmap.h> 58 #include <machine/kcore.h> 59 60 #include <limits.h> 61 #include <db.h> 62 63 #include "kvm_private.h" 64 65 /* 66 * UltraSPARC T1 & T2 implement only a 40-bit real address range, just 67 * like older UltraSPARC CPUs. 68 */ 69 #define TLB_PA_MASK SUN4U_TLB_PA_MASK 70 71 int _kvm_kvatop(kvm_t *, u_long, u_long *); 72 73 void 74 _kvm_freevtop(kvm_t *kd) 75 { 76 if (kd->vmst != NULL) { 77 _kvm_err(kd, kd->program, "_kvm_freevtop: internal error"); 78 kd->vmst = NULL; 79 } 80 } 81 82 /* 83 * Prepare for translation of kernel virtual addresses into offsets 84 * into crash dump files. We use the MMU specific goop written at the 85 * front of the crash dump by pmap_dumpmmu(). 86 * 87 * We should read in and cache the ksegs here to speed up operations... 88 */ 89 int 90 _kvm_initvtop(kvm_t *kd) 91 { 92 return (0); 93 } 94 95 /* 96 * Translate a kernel virtual address to a physical address using the 97 * mapping information in kd->vm. Returns the result in pa, and returns 98 * the number of bytes that are contiguously available from this 99 * physical address. This routine is used only for crashdumps. 100 */ 101 int 102 _kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa) 103 { 104 cpu_kcore_hdr_t *cpup = kd->cpu_data; 105 u_long kernbase = cpup->kernbase; 106 uint64_t *pseg, *pdir, *ptbl; 107 int64_t data; 108 109 if (va < kernbase) 110 goto lose; 111 112 /* Handle the wired 4MB TTEs */ 113 if (va > cpup->ktextbase && va < (cpup->ktextbase + cpup->ktextsz)) { 114 u_long vaddr; 115 116 vaddr = va - cpup->ktextbase; 117 *pa = cpup->ktextp + vaddr; 118 return (cpup->ktextsz - vaddr); 119 } 120 121 if (va > cpup->kdatabase && va < (cpup->kdatabase + cpup->kdatasz)) { 122 u_long vaddr; 123 124 vaddr = va - cpup->kdatabase; 125 *pa = cpup->kdatap + vaddr; 126 return (cpup->kdatasz - vaddr); 127 } 128 129 130 /* 131 * Parse kernel page table. 132 */ 133 pseg = (uint64_t *)(u_long)cpup->segmapoffset; 134 if (pread(kd->pmfd, &pdir, sizeof(pdir), 135 _kvm_pa2off(kd, (u_long)&pseg[va_to_seg(va)])) != sizeof(pdir)) { 136 _kvm_syserr(kd, 0, "could not read L1 PTE"); 137 goto lose; 138 } 139 140 if (!pdir) { 141 _kvm_err(kd, 0, "invalid L1 PTE"); 142 goto lose; 143 } 144 145 if (pread(kd->pmfd, &ptbl, sizeof(ptbl), 146 _kvm_pa2off(kd, (u_long)&pdir[va_to_dir(va)])) != sizeof(ptbl)) { 147 _kvm_syserr(kd, 0, "could not read L2 PTE"); 148 goto lose; 149 } 150 151 if (!ptbl) { 152 _kvm_err(kd, 0, "invalid L2 PTE"); 153 goto lose; 154 } 155 156 if (pread(kd->pmfd, &data, sizeof(data), 157 _kvm_pa2off(kd, (u_long)&ptbl[va_to_pte(va)])) != sizeof(data)) { 158 _kvm_syserr(kd, 0, "could not read TTE"); 159 goto lose; 160 } 161 162 if (data >= 0) { 163 _kvm_err(kd, 0, "invalid L2 TTE"); 164 goto lose; 165 } 166 167 /* 168 * Calculate page offsets and things. 169 * 170 * XXXX -- We could support multiple page sizes. 171 */ 172 va = va & (kd->nbpg - 1); 173 data &= TLB_PA_MASK; 174 *pa = data + va; 175 176 /* 177 * Parse and translate our TTE. 178 */ 179 return (kd->nbpg - va); 180 181 lose: 182 *pa = -1; 183 _kvm_err(kd, 0, "invalid address (%lx)", va); 184 return (0); 185 } 186 187 188 /* 189 * Translate a physical address to a file-offset in the crash-dump. 190 */ 191 off_t 192 _kvm_pa2off(kvm_t *kd, paddr_t pa) 193 { 194 cpu_kcore_hdr_t *cpup = kd->cpu_data; 195 phys_ram_seg_t *mp; 196 off_t off; 197 int nmem; 198 199 /* 200 * Layout of CPU segment: 201 * cpu_kcore_hdr_t; 202 * [alignment] 203 * phys_ram_seg_t[cpup->nmemseg]; 204 */ 205 mp = (phys_ram_seg_t *)((long)kd->cpu_data + cpup->memsegoffset); 206 off = 0; 207 208 /* Translate (sparse) pfnum to (packed) dump offset */ 209 for (nmem = cpup->nmemseg; --nmem >= 0; mp++) { 210 if (mp->start <= pa && pa < mp->start + mp->size) 211 break; 212 off += mp->size; 213 } 214 if (nmem < 0) { 215 _kvm_err(kd, 0, "invalid address (%lx)", pa); 216 return (-1); 217 } 218 219 return (kd->dump_off + off + pa - mp->start); 220 } 221