1 /* $OpenBSD: kvm_alpha.c,v 1.17 2021/12/01 16:53:28 deraadt Exp $ */
2 /* $NetBSD: kvm_alpha.c,v 1.5 1996/10/01 21:12:05 cgd Exp $ */
3
4 /*
5 * Copyright (c) 1994, 1995 Carnegie-Mellon University.
6 * All rights reserved.
7 *
8 * Author: Chris G. Demetriou
9 *
10 * Permission to use, copy, modify and distribute this software and
11 * its documentation is hereby granted, provided that both the copyright
12 * notice and this permission notice appear in all copies of the
13 * software, derivative works or modified versions, and any portions
14 * thereof, and that both notices appear in supporting documentation.
15 *
16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
18 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
19 *
20 * Carnegie Mellon requests users of this software to return to
21 *
22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
23 * School of Computer Science
24 * Carnegie Mellon University
25 * Pittsburgh PA 15213-3890
26 *
27 * any improvements or extensions that they make and grant Carnegie the
28 * rights to redistribute these changes.
29 */
30
31 #define __KVM_ALPHA_PRIVATE /* see <machine/pte.h> */
32
33 #include <sys/types.h>
34 #include <sys/signal.h>
35 #include <sys/proc.h>
36 #include <sys/stat.h>
37 #include <sys/kcore.h>
38 #include <machine/kcore.h>
39 #include <unistd.h>
40 #include <stdlib.h>
41 #include <nlist.h>
42 #include <kvm.h>
43
44 #include <uvm/uvm_extern.h>
45 #include <machine/vmparam.h>
46 #include <machine/pmap.h>
47
48 #include <limits.h>
49 #include <db.h>
50
51 #include "kvm_private.h"
52
53 struct vmstate {
54 vsize_t page_shift;
55 };
56
57 void
_kvm_freevtop(kvm_t * kd)58 _kvm_freevtop(kvm_t *kd)
59 {
60
61 /* Not actually used for anything right now, but safe. */
62 free(kd->vmst);
63 kd->vmst = NULL;
64 }
65
66 int
_kvm_initvtop(kvm_t * kd)67 _kvm_initvtop(kvm_t *kd)
68 {
69 cpu_kcore_hdr_t *cpu_kh;
70 struct vmstate *vm;
71
72 vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
73 if (vm == NULL)
74 return (-1);
75
76 cpu_kh = kd->cpu_data;
77
78 /* Compute page_shift. */
79 for (vm->page_shift = 0; (1L << vm->page_shift) < cpu_kh->page_size;
80 vm->page_shift++)
81 /* nothing */ ;
82 if ((1L << vm->page_shift) != cpu_kh->page_size) {
83 free(vm);
84 return (-1);
85 }
86
87 kd->vmst = vm;
88 return (0);
89 }
90
91 int
_kvm_kvatop(kvm_t * kd,u_long va,paddr_t * pa)92 _kvm_kvatop(kvm_t *kd, u_long va, paddr_t *pa)
93 {
94 cpu_kcore_hdr_t *cpu_kh;
95 struct vmstate *vm;
96 int rv, page_off;
97 alpha_pt_entry_t pte;
98 off_t pteoff;
99
100 if (!kd->vmst) {
101 _kvm_err(kd, 0, "vatop called before initvtop");
102 return (0);
103 }
104
105 if (ISALIVE(kd)) {
106 _kvm_err(kd, 0, "vatop called in live kernel!");
107 return (0);
108 }
109
110 cpu_kh = kd->cpu_data;
111 vm = kd->vmst;
112 page_off = va & (cpu_kh->page_size - 1);
113
114 #ifndef PAGE_SHIFT
115 #define PAGE_SHIFT vm->page_shift
116 #endif
117
118 if (va >= ALPHA_K0SEG_BASE && va <= ALPHA_K0SEG_END) {
119 /*
120 * Direct-mapped address: just convert it.
121 */
122
123 *pa = ALPHA_K0SEG_TO_PHYS(va);
124 rv = cpu_kh->page_size - page_off;
125 } else if (va >= ALPHA_K1SEG_BASE && va <= ALPHA_K1SEG_END) {
126 /*
127 * Real kernel virtual address: do the translation.
128 */
129
130 /* Find and read the L1 PTE. */
131 pteoff = cpu_kh->lev1map_pa +
132 l1pte_index(va) * sizeof(alpha_pt_entry_t);
133 if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte),
134 (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
135 _kvm_syserr(kd, 0, "could not read L1 PTE");
136 goto lose;
137 }
138
139 /* Find and read the L2 PTE. */
140 if ((pte & ALPHA_PTE_VALID) == 0) {
141 _kvm_err(kd, 0, "invalid translation (invalid L1 PTE)");
142 goto lose;
143 }
144 pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
145 l2pte_index(va) * sizeof(alpha_pt_entry_t);
146 if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte),
147 (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
148 _kvm_syserr(kd, 0, "could not read L2 PTE");
149 goto lose;
150 }
151
152 /* Find and read the L3 PTE. */
153 if ((pte & ALPHA_PTE_VALID) == 0) {
154 _kvm_err(kd, 0, "invalid translation (invalid L2 PTE)");
155 goto lose;
156 }
157 pteoff = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size +
158 l3pte_index(va) * sizeof(alpha_pt_entry_t);
159 if (_kvm_pread(kd, kd->pmfd, (char *)&pte, sizeof(pte),
160 (off_t)_kvm_pa2off(kd, pteoff)) != sizeof(pte)) {
161 _kvm_syserr(kd, 0, "could not read L3 PTE");
162 goto lose;
163 }
164
165 /* Fill in the PA. */
166 if ((pte & ALPHA_PTE_VALID) == 0) {
167 _kvm_err(kd, 0, "invalid translation (invalid L3 PTE)");
168 goto lose;
169 }
170 *pa = ALPHA_PTE_TO_PFN(pte) * cpu_kh->page_size + page_off;
171 rv = cpu_kh->page_size - page_off;
172 } else {
173 /*
174 * Bogus address (not in KV space): punt.
175 */
176
177 _kvm_err(kd, 0, "invalid kernel virtual address");
178 lose:
179 *pa = -1;
180 rv = 0;
181 }
182
183 return (rv);
184 }
185
186 /*
187 * Translate a physical address to a file-offset in the crash-dump.
188 */
189 off_t
_kvm_pa2off(kvm_t * kd,paddr_t pa)190 _kvm_pa2off(kvm_t *kd, paddr_t pa)
191 {
192 cpu_kcore_hdr_t *cpu_kh;
193 phys_ram_seg_t *ramsegs;
194 off_t off;
195 int i;
196
197 cpu_kh = kd->cpu_data;
198 ramsegs = (phys_ram_seg_t *)((char *)cpu_kh + _ALIGN(sizeof *cpu_kh));
199
200 off = 0;
201 for (i = 0; i < cpu_kh->nmemsegs; i++) {
202 if (pa >= ramsegs[i].start &&
203 (pa - ramsegs[i].start) < ramsegs[i].size) {
204 off += (pa - ramsegs[i].start);
205 break;
206 }
207 off += ramsegs[i].size;
208 }
209 return (kd->dump_off + off);
210 }
211