xref: /netbsd-src/sys/arch/aarch64/aarch64/db_interface.c (revision 90313c06e62e910bf0d1bb24faa9d17dcefd0ab6)
1 /* $NetBSD: db_interface.c,v 1.24 2024/02/07 04:20:26 msaitoh Exp $ */
2 
3 /*
4  * Copyright (c) 2017 Ryo Shimizu
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
20  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
24  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
25  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.24 2024/02/07 04:20:26 msaitoh Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/types.h>
34 
35 #include <uvm/uvm.h>
36 #include <uvm/uvm_ddb.h>
37 #include <uvm/uvm_prot.h>
38 #ifdef __HAVE_PMAP_PV_TRACK
39 #include <uvm/pmap/pmap_pvt.h>
40 #endif
41 
42 #include <aarch64/armreg.h>
43 #include <aarch64/db_machdep.h>
44 #include <aarch64/locore.h>
45 #include <aarch64/machdep.h>
46 #include <aarch64/pmap.h>
47 
48 #include <arm/cpufunc.h>
49 
50 #include <ddb/db_access.h>
51 #include <ddb/db_command.h>
52 #include <ddb/db_output.h>
53 #include <ddb/db_variables.h>
54 #include <ddb/db_sym.h>
55 #include <ddb/db_extern.h>
56 #include <ddb/db_interface.h>
57 
58 #include <dev/cons.h>
59 
60 db_regs_t ddb_regs;
61 
62 static bool
db_accessible_address(vaddr_t addr,bool readonly)63 db_accessible_address(vaddr_t addr, bool readonly)
64 {
65 	register_t s;
66 	uint64_t par;
67 	int space;
68 
69 	space = aarch64_addressspace(addr);
70 	if (space != AARCH64_ADDRSPACE_LOWER &&
71 	    space != AARCH64_ADDRSPACE_UPPER)
72 		return false;
73 
74 	s = daif_disable(DAIF_I|DAIF_F);
75 
76 	switch (aarch64_addressspace(addr)) {
77 	case AARCH64_ADDRSPACE_LOWER:
78 		if (readonly)
79 			reg_s1e0r_write(addr);
80 		else
81 			reg_s1e0w_write(addr);
82 		break;
83 	case AARCH64_ADDRSPACE_UPPER:
84 		if (readonly)
85 			reg_s1e1r_write(addr);
86 		else
87 			reg_s1e1w_write(addr);
88 		break;
89 	}
90 	isb();
91 	par = reg_par_el1_read();
92 
93 	reg_daif_write(s);
94 
95 	return ((par & PAR_F) == 0);
96 }
97 
98 void
db_read_bytes(vaddr_t addr,size_t size,char * data)99 db_read_bytes(vaddr_t addr, size_t size, char *data)
100 {
101 	vaddr_t lastpage = -1;
102 	const char *src;
103 
104 	for (src = (const char *)addr; size > 0;) {
105 		const vaddr_t va = (vaddr_t)src;
106 		uintptr_t tmp;
107 
108 		if (lastpage != atop(va) && !db_accessible_address(va, true)) {
109 			db_printf("address %p is invalid\n", src);
110 			memset(data, 0, size);	/* stubs are filled by zero */
111 			return;
112 		}
113 		lastpage = atop(va);
114 
115 		if (aarch64_pan_enabled)
116 			reg_pan_write(0); /* disable PAN */
117 
118 		tmp = (uintptr_t)src | (uintptr_t)data;
119 		if (size >= 8 && (tmp & 7) == 0) {
120 			*(uint64_t *)data = *(const uint64_t *)src;
121 			src += 8;
122 			data += 8;
123 			size -= 8;
124 		} else if (size >= 4 && (tmp & 3) == 0) {
125 			*(uint32_t *)data = *(const uint32_t *)src;
126 			src += 4;
127 			data += 4;
128 			size -= 4;
129 		} else if (size >= 2 && (tmp & 1) == 0) {
130 			*(uint16_t *)data = *(const uint16_t *)src;
131 			src += 2;
132 			data += 2;
133 			size -= 2;
134 		} else {
135 			*data++ = *src++;
136 			size--;
137 		}
138 
139 		if (aarch64_pan_enabled)
140 			reg_pan_write(1); /* enable PAN */
141 	}
142 }
143 
144 static void
db_write_text(vaddr_t addr,size_t size,const char * data)145 db_write_text(vaddr_t addr, size_t size, const char *data)
146 {
147 	pt_entry_t *ptep, pte;
148 	size_t s;
149 
150 	/*
151 	 * consider page boundary, and
152 	 * it works even if kernel_text is mapped with L2 or L3.
153 	 */
154 	if (atop(addr) != atop(addr + size - 1)) {
155 		s = PAGE_SIZE - (addr & PAGE_MASK);
156 		db_write_text(addr, s, data);
157 		addr += s;
158 		size -= s;
159 		data += s;
160 	}
161 	while (size > 0) {
162 		ptep = kvtopte(addr);
163 		KASSERT(ptep != NULL);
164 
165 		/*
166 		 * change to writable.  it is required to keep execute permission.
167 		 * because if the block/page to which the target address belongs is
168 		 * the same as the block/page to which this function belongs, then
169 		 * if PROT_EXECUTE is dropped and TLB is invalidated, the program
170 		 * will stop...
171 		 */
172 		/* old pte is returned by pmap_kvattr */
173 		pte = pmap_kvattr(ptep, VM_PROT_EXECUTE | VM_PROT_READ | VM_PROT_WRITE);
174 		/* dsb(ishst) included in aarch64_tlbi_by_va */
175 		aarch64_tlbi_by_va(addr);
176 
177 		s = size;
178 		if (size > PAGE_SIZE)
179 			s = PAGE_SIZE;
180 
181 		memcpy((void *)addr, data, s);
182 		cpu_icache_sync_range(addr, size);
183 
184 		/* restore pte */
185 		*ptep = pte;
186 		/* dsb(ishst) included in aarch64_tlbi_by_va */
187 		aarch64_tlbi_by_va(addr);
188 
189 		addr += s;
190 		size -= s;
191 		data += s;
192 	}
193 }
194 
195 void
db_write_bytes(vaddr_t addr,size_t size,const char * data)196 db_write_bytes(vaddr_t addr, size_t size, const char *data)
197 {
198 	vaddr_t kernstart, datastart;
199 	vaddr_t lastpage = -1;
200 	char *dst;
201 
202 	/* if readonly page, require changing attribute to write */
203 	extern char __kernel_text[], __data_start[];
204 	kernstart = trunc_page((vaddr_t)__kernel_text);
205 	datastart = trunc_page((vaddr_t)__data_start);
206 	if (kernstart <= addr && addr < datastart) {
207 		size_t s;
208 
209 		s = datastart - addr;
210 		if (s > size)
211 			s = size;
212 		db_write_text(addr, s, data);
213 		addr += s;
214 		size -= s;
215 		data += s;
216 	}
217 
218 	for (dst = (char *)addr; size > 0;) {
219 		const vaddr_t va = (vaddr_t)dst;
220 		uintptr_t tmp;
221 
222 		if (lastpage != atop(va) && !db_accessible_address(va, false)) {
223 			db_printf("address %p is invalid\n", dst);
224 			return;
225 		}
226 		lastpage = atop(va);
227 
228 		if (aarch64_pan_enabled)
229 			reg_pan_write(0); /* disable PAN */
230 
231 		tmp = (uintptr_t)dst | (uintptr_t)data;
232 		if (size >= 8 && (tmp & 7) == 0) {
233 			*(uint64_t *)dst = *(const uint64_t *)data;
234 			dst += 8;
235 			data += 8;
236 			size -= 8;
237 		} else if (size >= 4 && (tmp & 3) == 0) {
238 			*(uint32_t *)dst = *(const uint32_t *)data;
239 			dst += 4;
240 			data += 4;
241 			size -= 4;
242 		} else if (size >= 2 && (tmp & 1) == 0) {
243 			*(uint16_t *)dst = *(const uint16_t *)data;
244 			dst += 2;
245 			data += 2;
246 			size -= 2;
247 		} else {
248 			*dst++ = *data++;
249 			size--;
250 		}
251 
252 		if (aarch64_pan_enabled)
253 			reg_pan_write(1); /* enable PAN */
254 	}
255 }
256 
257 /*
258  * return register value of $X0..$X30, $SP or 0($XZR)
259  */
260 static uint64_t
db_fetch_reg(unsigned int reg,db_regs_t * regs,bool use_sp)261 db_fetch_reg(unsigned int reg, db_regs_t *regs, bool use_sp)
262 {
263 	if (reg >= 32)
264 		panic("db_fetch_reg: botch");
265 
266 	if (reg == 31) {
267 		/* $SP or $XZR */
268 		return use_sp ? regs->tf_sp : 0;
269 	}
270 	return regs->tf_reg[reg];
271 }
272 
273 static inline uint64_t
SignExtend(int bitwidth,uint64_t imm,unsigned int multiply)274 SignExtend(int bitwidth, uint64_t imm, unsigned int multiply)
275 {
276 	const uint64_t signbit = ((uint64_t)1 << (bitwidth - 1));
277 	const uint64_t immmax = signbit << 1;
278 
279 	if (imm & signbit)
280 		imm -= immmax;
281 	return imm * multiply;
282 }
283 
284 db_addr_t
db_branch_taken(db_expr_t inst,db_addr_t pc,db_regs_t * regs)285 db_branch_taken(db_expr_t inst, db_addr_t pc, db_regs_t *regs)
286 {
287 	LE32TOH(inst);
288 
289 #define INSN_FMT_RN(insn)		(((insn) >> 5) & 0x1f)
290 #define INSN_FMT_IMM26(insn)	((insn) & 0x03ffffff)
291 #define INSN_FMT_IMM19(insn)	(((insn) >> 5) & 0x7ffff)
292 #define INSN_FMT_IMM14(insn)	(((insn) >> 5) & 0x3fff)
293 
294 	if ((inst & 0xfffffc1f) == 0xd65f0000 ||	/* ret xN */
295 	    (inst & 0xfffffc1f) == 0xd63f0000 ||	/* blr xN */
296 	    (inst & 0xfffffc1f) == 0xd61f0000) {	/* br xN */
297 		return db_fetch_reg(INSN_FMT_RN(inst), regs, false);
298 	}
299 
300 	if ((inst & 0xfc000000) == 0x94000000 ||	/* bl imm */
301 	    (inst & 0xfc000000) == 0x14000000) {	/* b imm */
302 		return SignExtend(26, INSN_FMT_IMM26(inst), 4) + pc;
303 	}
304 
305 	if ((inst & 0xff000010) == 0x54000000 ||	/* b.cond */
306 	    (inst & 0x7f000000) == 0x35000000 ||	/* cbnz */
307 	    (inst & 0x7f000000) == 0x34000000) {	/* cbz */
308 		return SignExtend(19, INSN_FMT_IMM19(inst), 4) + pc;
309 	}
310 
311 	if ((inst & 0x7f000000) == 0x37000000 ||	/* tbnz */
312 	    (inst & 0x7f000000) == 0x36000000) {	/* tbz */
313 		return SignExtend(14, INSN_FMT_IMM14(inst), 4) + pc;
314 	}
315 
316 	panic("branch_taken: botch");
317 }
318 
319 bool
db_inst_unconditional_flow_transfer(db_expr_t inst)320 db_inst_unconditional_flow_transfer(db_expr_t inst)
321 {
322 	LE32TOH(inst);
323 
324 	if ((inst & 0xfffffc1f) == 0xd65f0000 ||	/* ret xN */
325 	    (inst & 0xfc000000) == 0x94000000 ||	/* bl */
326 	    (inst & 0xfffffc1f) == 0xd63f0000 ||	/* blr */
327 	    (inst & 0xfc000000) == 0x14000000 ||	/* b imm */
328 	    (inst & 0xfffffc1f) == 0xd61f0000)		/* br */
329 		return true;
330 
331 #define INSN_FMT_COND(insn)	((insn) & 0xf)
332 #define CONDITION_AL	14
333 
334 	if ((inst & 0xff000010) == 0x54000000 &&	/* b.cond */
335 	    INSN_FMT_COND(inst) == CONDITION_AL)	/* always? */
336 		return true;
337 
338 	return false;
339 }
340 
341 void
342 db_pte_print(pt_entry_t pte, int level,
343     void (*pr)(const char *, ...) __printflike(1, 2))
344 {
345 	if (pte == 0) {
346 		pr(" UNUSED\n");
347 		return;
348 	}
349 
350 	pr(" %s", (pte & LX_VALID) ? "VALID" : "**INVALID**");
351 
352 	if (level == 0 ||
353 	    (level == 1 && l1pde_is_table(pte)) ||
354 	    (level == 2 && l2pde_is_table(pte))) {
355 
356 		/* L0/L1/L2 TABLE */
357 		if (level == 0 && (pte & LX_TYPE) != LX_TYPE_TBL)
358 			pr(" **ILLEGAL TYPE**"); /* L0 doesn't support block */
359 		else
360 			pr(" L%d-TABLE", level);
361 
362 		pr(", PA=%lx", l0pde_pa(pte));
363 
364 		if (pte & LX_TBL_NSTABLE)
365 			pr(", NSTABLE");
366 		if (pte & LX_TBL_APTABLE)
367 			pr(", APTABLE");
368 		if (pte & LX_TBL_UXNTABLE)
369 			pr(", UXNTABLE");
370 		if (pte & LX_TBL_PXNTABLE)
371 			pr(", PXNTABLE");
372 
373 	} else if ((level == 1 && l1pde_is_block(pte)) ||
374 	    (level == 2 && l2pde_is_block(pte)) ||
375 	    level == 3) {
376 
377 		/* L1/L2 BLOCK or L3 PAGE */
378 		switch (level) {
379 		case 1:
380 			pr(" L1(1G)-BLOCK");
381 			break;
382 		case 2:
383 			pr(" L2(2M)-BLOCK");
384 			break;
385 		case 3:
386 			pr(" %s", l3pte_is_page(pte) ?
387 			    "L3(4K)-PAGE" : "**ILLEGAL TYPE**");
388 			break;
389 		}
390 
391 		pr(", PA=%lx", l3pte_pa(pte));
392 
393 		pr(", %s", (pte & LX_BLKPAG_UXN) ? "UXN" : "UX");
394 		pr(", %s", (pte & LX_BLKPAG_PXN) ? "PXN" : "PX");
395 
396 		if (pte & LX_BLKPAG_CONTIG)
397 			pr(", CONTIG");
398 
399 		pr(", %s", (pte & LX_BLKPAG_NG) ? "nG" : "G");
400 		pr(", %s", (pte & LX_BLKPAG_AF) ?
401 		    "accessible" :
402 		    "**fault** ");
403 
404 		switch (pte & LX_BLKPAG_SH) {
405 		case LX_BLKPAG_SH_NS:
406 			pr(", SH_NS");
407 			break;
408 		case LX_BLKPAG_SH_OS:
409 			pr(", SH_OS");
410 			break;
411 		case LX_BLKPAG_SH_IS:
412 			pr(", SH_IS");
413 			break;
414 		default:
415 			pr(", SH_??");
416 			break;
417 		}
418 
419 		pr(", %s", (pte & LX_BLKPAG_AP_RO) ? "RO" : "RW");
420 		pr(", %s", (pte & LX_BLKPAG_APUSER) ? "EL0" : "EL1");
421 		pr(", %s", (pte & LX_BLKPAG_NS) ? "NS" : "secure");
422 
423 		switch (pte & LX_BLKPAG_ATTR_MASK) {
424 		case LX_BLKPAG_ATTR_NORMAL_WB:
425 			pr(", WB");
426 			break;
427 		case LX_BLKPAG_ATTR_NORMAL_NC:
428 			pr(", NC");
429 			break;
430 		case LX_BLKPAG_ATTR_NORMAL_WT:
431 			pr(", WT");
432 			break;
433 		case LX_BLKPAG_ATTR_DEVICE_MEM:
434 			pr(", DEV");
435 			break;
436 		case LX_BLKPAG_ATTR_DEVICE_MEM_NP:
437 			pr(", DEV(NP)");
438 			break;
439 		default:
440 			pr(", ATTR(%lu)", __SHIFTOUT(pte, LX_BLKPAG_ATTR_INDX));
441 			break;
442 		}
443 
444 		if (pte & LX_BLKPAG_OS_0)
445 			pr(", " PMAP_PTE_OS0);
446 		if (pte & LX_BLKPAG_OS_1)
447 			pr(", " PMAP_PTE_OS1);
448 		if (pte & LX_BLKPAG_OS_2)
449 			pr(", " PMAP_PTE_OS2);
450 		if (pte & LX_BLKPAG_OS_3)
451 			pr(", " PMAP_PTE_OS3);
452 	} else {
453 		pr(" **ILLEGAL TYPE**");
454 	}
455 	pr("\n");
456 }
457 
458 void
459 db_pteinfo(vaddr_t va, void (*pr)(const char *, ...) __printflike(1, 2))
460 {
461 	struct vm_page *pg;
462 	bool user;
463 	pd_entry_t *l0, *l1, *l2, *l3;
464 	pd_entry_t pde;
465 	pt_entry_t pte;
466 	uint64_t ttbr;
467 	paddr_t pa;
468 	unsigned int idx;
469 
470 	switch (aarch64_addressspace(va)) {
471 	case AARCH64_ADDRSPACE_UPPER:
472 		user = false;
473 		ttbr = reg_ttbr1_el1_read();
474 		break;
475 	case AARCH64_ADDRSPACE_LOWER:
476 		user = true;
477 		ttbr = reg_ttbr0_el1_read();
478 		break;
479 	default:
480 		pr("illegal address space\n");
481 		return;
482 	}
483 	pa = ttbr & TTBR_BADDR;
484 	l0 = (pd_entry_t *)AARCH64_PA_TO_KVA(pa);
485 
486 	/*
487 	 * traverse L0 -> L1 -> L2 -> L3 table
488 	 */
489 	pr("TTBR%d=%016"PRIx64", pa=%016"PRIxPADDR", va=%p",
490 	    user ? 0 : 1, ttbr, pa, l0);
491 	pr(", input-va=%016"PRIxVADDR
492 	    ", L0-index=%ld, L1-index=%ld, L2-index=%ld, L3-index=%ld\n",
493 	    va,
494 	    (va & L0_ADDR_BITS) >> L0_SHIFT,
495 	    (va & L1_ADDR_BITS) >> L1_SHIFT,
496 	    (va & L2_ADDR_BITS) >> L2_SHIFT,
497 	    (va & L3_ADDR_BITS) >> L3_SHIFT);
498 
499 	idx = l0pde_index(va);
500 	pde = l0[idx];
501 
502 	pr("L0[%3d]=%016"PRIx64":", idx, pde);
503 	db_pte_print(pde, 0, pr);
504 
505 	if (!l0pde_valid(pde))
506 		return;
507 
508 	l1 = (pd_entry_t *)AARCH64_PA_TO_KVA(l0pde_pa(pde));
509 	idx = l1pde_index(va);
510 	pde = l1[idx];
511 
512 	pr(" L1[%3d]=%016"PRIx64":", idx, pde);
513 	db_pte_print(pde, 1, pr);
514 
515 	if (!l1pde_valid(pde) || l1pde_is_block(pde))
516 		return;
517 
518 	l2 = (pd_entry_t *)AARCH64_PA_TO_KVA(l1pde_pa(pde));
519 	idx = l2pde_index(va);
520 	pde = l2[idx];
521 
522 	pr("  L2[%3d]=%016"PRIx64":", idx, pde);
523 	db_pte_print(pde, 2, pr);
524 
525 	if (!l2pde_valid(pde) || l2pde_is_block(pde))
526 		return;
527 
528 	l3 = (pd_entry_t *)AARCH64_PA_TO_KVA(l2pde_pa(pde));
529 	idx = l3pte_index(va);
530 	pte = l3[idx];
531 
532 	pr("   L3[%3d]=%016"PRIx64":", idx, pte);
533 	db_pte_print(pte, 3, pr);
534 
535 	pa = l3pte_pa(pte);
536 	pg = PHYS_TO_VM_PAGE(pa);
537 
538 	if (pg != NULL) {
539 		uvm_page_printit(pg, false, pr);
540 
541 		pmap_db_mdpg_print(pg, pr);
542 	} else {
543 #ifdef __HAVE_PMAP_PV_TRACK
544 		if (pmap_pv_tracked(pa))
545 			pr("PV tracked");
546 		else
547 			pr("No VM_PAGE or PV tracked");
548 #else
549 		pr("no VM_PAGE\n");
550 #endif
551 	}
552 }
553 
554 static void
555 dump_ln_table(bool countmode, pd_entry_t *pdp, int level, int lnindex,
556     vaddr_t va, void (*pr)(const char *, ...) __printflike(1, 2))
557 {
558 	struct vm_page *pg;
559 	pd_entry_t pde;
560 	paddr_t pa;
561 	int i, n;
562 	const char *spaces[4] = { " ", "  ", "   ", "    " };
563 	const char *spc = spaces[level];
564 
565 	pa = AARCH64_KVA_TO_PA((vaddr_t)pdp);
566 	pg = PHYS_TO_VM_PAGE(pa);
567 
568 	if (pg == NULL) {
569 		pr("%sL%d: pa=%lx pg=NULL\n", spc, level, pa);
570 	} else {
571 		pr("%sL%d: pa=%lx pg=%p\n", spc, level, pa, pg);
572 	}
573 
574 	for (i = n = 0; i < Ln_ENTRIES; i++) {
575 		db_read_bytes((db_addr_t)&pdp[i], sizeof(pdp[i]), (char *)&pde);
576 		if (lxpde_valid(pde)) {
577 			if (!countmode)
578 				pr("%sL%d[%3d] %3dth, va=%016lx, pte=%016lx:",
579 				    spc, level, i, n, va, pde);
580 			n++;
581 
582 			if ((level != 0 && level != 3 && l1pde_is_block(pde)) ||
583 			    (level == 3 && l3pte_is_page(pde))) {
584 				if (!countmode)
585 					db_pte_print(pde, level, pr);
586 			} else if (level != 3 && l1pde_is_table(pde)) {
587 				if (!countmode)
588 					db_pte_print(pde, level, pr);
589 				pa = l0pde_pa(pde);
590 				dump_ln_table(countmode,
591 				    (pd_entry_t *)AARCH64_PA_TO_KVA(pa),
592 				    level + 1, i, va, pr);
593 			} else {
594 				if (!countmode)
595 					db_pte_print(pde, level, pr);
596 			}
597 		}
598 
599 		switch (level) {
600 		case 0:
601 			va += L0_SIZE;
602 			break;
603 		case 1:
604 			va += L1_SIZE;
605 			break;
606 		case 2:
607 			va += L2_SIZE;
608 			break;
609 		case 3:
610 			va += L3_SIZE;
611 			break;
612 		}
613 	}
614 
615 	if (level == 0)
616 		pr("L0 has %d entries\n", n);
617 	else
618 		pr("%sL%d[%3d] has %d L%d entries\n", spaces[level - 1],
619 		    level - 1, lnindex, n, level);
620 }
621 
622 static void
623 db_dump_l0table(bool countmode, pd_entry_t *pdp, vaddr_t va_base,
624     void (*pr)(const char *, ...) __printflike(1, 2))
625 {
626 	dump_ln_table(countmode, pdp, 0, 0, va_base, pr);
627 }
628 
629 void
630 db_ttbrdump(bool countmode, vaddr_t va,
631     void (*pr)(const char *, ...) __printflike(1, 2))
632 {
633 	struct pmap *pm, _pm;
634 
635 	pm = (struct pmap *)va;
636 	db_read_bytes((db_addr_t)va, sizeof(_pm), (char *)&_pm);
637 
638 	pr("pmap=%p\n", pm);
639 	pmap_db_pmap_print(&_pm, pr);
640 
641 	db_dump_l0table(countmode, pmap_l0table(pm),
642 	    (pm == pmap_kernel()) ? 0xffff000000000000UL : 0, pr);
643 }
644 
645 void
cpu_Debugger(void)646 cpu_Debugger(void)
647 {
648 	__asm __volatile ("brk #0xffff");
649 }
650