1 /* $NetBSD: db_interface.c,v 1.52 2014/03/30 08:00:34 skrll Exp $ */ 2 3 /* 4 * Copyright (c) 1996 Scott K. Stevens 5 * 6 * Mach Operating System 7 * Copyright (c) 1991,1990 Carnegie Mellon University 8 * All Rights Reserved. 9 * 10 * Permission to use, copy, modify and distribute this software and its 11 * documentation is hereby granted, provided that both the copyright 12 * notice and this permission notice appear in all copies of the 13 * software, derivative works or modified versions, and any portions 14 * thereof, and that both notices appear in supporting documentation. 15 * 16 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 17 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 18 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 19 * 20 * Carnegie Mellon requests users of this software to return to 21 * 22 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 23 * School of Computer Science 24 * Carnegie Mellon University 25 * Pittsburgh PA 15213-3890 26 * 27 * any improvements or extensions that they make and grant Carnegie the 28 * rights to redistribute these changes. 29 * 30 * From: db_interface.c,v 2.4 1991/02/05 17:11:13 mrt (CMU) 31 */ 32 33 /* 34 * Interface to new debugger. 35 */ 36 37 #include <sys/cdefs.h> 38 __KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.52 2014/03/30 08:00:34 skrll Exp $"); 39 40 #include "opt_ddb.h" 41 #include "opt_kgdb.h" 42 43 #include <sys/param.h> 44 #include <sys/proc.h> 45 #include <sys/reboot.h> 46 #include <sys/systm.h> /* just for boothowto */ 47 #include <sys/exec.h> 48 #include <sys/atomic.h> 49 #include <sys/intr.h> 50 51 #include <uvm/uvm_extern.h> 52 53 #include <arm/arm32/db_machdep.h> 54 #include <arm/arm32/katelib.h> 55 #include <arm/undefined.h> 56 #include <ddb/db_access.h> 57 #include <ddb/db_command.h> 58 #include <ddb/db_output.h> 59 #include <ddb/db_variables.h> 60 #include <ddb/db_sym.h> 61 #include <ddb/db_extern.h> 62 #include <ddb/db_interface.h> 63 #include <dev/cons.h> 64 65 #if defined(KGDB) || !defined(DDB) 66 #define db_printf printf 67 #endif 68 69 u_int db_fetch_reg(int, db_regs_t *); 70 71 int db_trapper(u_int, u_int, trapframe_t *, int); 72 73 int db_active = 0; 74 db_regs_t ddb_regs; /* register state */ 75 db_regs_t *ddb_regp; 76 77 #ifdef MULTIPROCESSOR 78 volatile struct cpu_info *db_onproc; 79 volatile struct cpu_info *db_newcpu; 80 #endif 81 82 83 84 85 #ifdef DDB 86 /* 87 * kdb_trap - field a TRACE or BPT trap 88 */ 89 int 90 kdb_trap(int type, db_regs_t *regs) 91 { 92 struct cpu_info * const ci = curcpu(); 93 db_regs_t dbreg; 94 int s; 95 96 switch (type) { 97 case T_BREAKPOINT: /* breakpoint */ 98 case -1: /* keyboard interrupt */ 99 break; 100 #ifdef MULTIPROCESSOR 101 case -2: 102 /* 103 * We called to enter ddb from another process but by the time 104 * we got here, no one was in ddb. So ignore the request. 105 */ 106 if (db_onproc == NULL) 107 return 1; 108 break; 109 #endif 110 default: 111 if (db_recover != 0) { 112 /* This will longjmp back into db_command_loop() */ 113 db_error("Faulted in DDB; continuing...\n"); 114 /*NOTREACHED*/ 115 } 116 } 117 118 /* Should switch to kdb`s own stack here. */ 119 120 #ifdef MULTIPROCESSOR 121 const bool is_mp_p = ncpu > 1; 122 if (is_mp_p) { 123 /* 124 * Try to take ownership of DDB. If we do, tell all other 125 * CPUs to enter DDB too. 126 */ 127 if (atomic_cas_ptr(&db_onproc, NULL, ci) == NULL) { 128 intr_ipi_send(NULL, IPI_DDB); 129 } 130 } 131 for (;;) { 132 if (is_mp_p) { 133 /* 134 * While we aren't the master, wait until the master 135 * gives control to us or exits. If it exited, we 136 * just exit to. Otherwise this cpu will enter DDB. 137 */ 138 membar_consumer(); 139 while (db_onproc != ci) { 140 if (db_onproc == NULL) 141 return 1; 142 #ifdef _ARM_ARCH_6 143 __asm __volatile("wfe"); 144 membar_consumer(); 145 #endif 146 if (db_onproc == ci) { 147 printf("%s: switching to %s\n", 148 __func__, ci->ci_cpuname); 149 } 150 } 151 } 152 #endif 153 154 s = splhigh(); 155 ci->ci_ddb_regs = &dbreg; 156 ddb_regp = &dbreg; 157 ddb_regs = *regs; 158 159 atomic_inc_32(&db_active); 160 cnpollc(true); 161 db_trap(type, 0/*code*/); 162 cnpollc(false); 163 atomic_dec_32(&db_active); 164 165 ci->ci_ddb_regs = NULL; 166 ddb_regp = &dbreg; 167 *regs = ddb_regs; 168 splx(s); 169 170 #ifdef MULTIPROCESSOR 171 if (is_mp_p && db_newcpu != NULL) { 172 db_onproc = db_newcpu; 173 db_newcpu = NULL; 174 #ifdef _ARM_ARCH_6 175 membar_producer(); 176 __asm __volatile("sev; sev"); 177 #endif 178 continue; 179 } 180 break; 181 } 182 183 if (is_mp_p) { 184 /* 185 * We are exiting DDB so there is noone onproc. Tell 186 * the other CPUs to exit. 187 */ 188 db_onproc = NULL; 189 #ifdef _ARM_ARCH_6 190 __asm __volatile("sev; sev"); 191 #endif 192 } 193 #endif 194 195 return (1); 196 } 197 #endif 198 199 int 200 db_validate_address(vaddr_t addr) 201 { 202 struct proc *p = curproc; 203 struct pmap *pmap; 204 205 if (!p || !p->p_vmspace || !p->p_vmspace->vm_map.pmap || 206 #ifndef ARM32_NEW_VM_LAYOUT 207 addr >= VM_MAXUSER_ADDRESS 208 #else 209 addr >= VM_MIN_KERNEL_ADDRESS 210 #endif 211 ) 212 pmap = pmap_kernel(); 213 else 214 pmap = p->p_vmspace->vm_map.pmap; 215 216 return (pmap_extract(pmap, addr, NULL) == false); 217 } 218 219 /* 220 * Read bytes from kernel address space for debugger. 221 */ 222 void 223 db_read_bytes(vaddr_t addr, size_t size, char *data) 224 { 225 char *src = (char *)addr; 226 227 if (db_validate_address((u_int)src)) { 228 db_printf("address %p is invalid\n", src); 229 return; 230 } 231 232 if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0) { 233 *((int*)data) = *((int*)src); 234 return; 235 } 236 237 if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0) { 238 *((short*)data) = *((short*)src); 239 return; 240 } 241 242 while (size-- > 0) { 243 if (db_validate_address((u_int)src)) { 244 db_printf("address %p is invalid\n", src); 245 return; 246 } 247 *data++ = *src++; 248 } 249 } 250 251 static void 252 db_write_text(vaddr_t addr, size_t size, const char *data) 253 { 254 struct pmap *pmap = pmap_kernel(); 255 pd_entry_t *pde, oldpde, tmppde; 256 pt_entry_t *pte, oldpte, tmppte; 257 vaddr_t pgva; 258 size_t limit, savesize; 259 char *dst; 260 261 /* XXX: gcc */ 262 oldpte = 0; 263 264 if ((savesize = size) == 0) 265 return; 266 267 dst = (char *) addr; 268 269 do { 270 /* Get the PDE of the current VA. */ 271 if (pmap_get_pde_pte(pmap, (vaddr_t) dst, &pde, &pte) == false) 272 goto no_mapping; 273 switch ((oldpde = *pde) & L1_TYPE_MASK) { 274 case L1_TYPE_S: 275 pgva = (vaddr_t)dst & L1_S_FRAME; 276 limit = L1_S_SIZE - ((vaddr_t)dst & L1_S_OFFSET); 277 278 tmppde = l1pte_set_writable(oldpde); 279 *pde = tmppde; 280 PTE_SYNC(pde); 281 break; 282 283 case L1_TYPE_C: 284 pgva = (vaddr_t)dst & L2_S_FRAME; 285 limit = L2_S_SIZE - ((vaddr_t)dst & L2_S_OFFSET); 286 287 if (pte == NULL) 288 goto no_mapping; 289 oldpte = *pte; 290 tmppte = l2pte_set_writable(oldpte); 291 *pte = tmppte; 292 PTE_SYNC(pte); 293 break; 294 295 default: 296 no_mapping: 297 printf(" address 0x%08lx not a valid page\n", 298 (vaddr_t) dst); 299 return; 300 } 301 cpu_tlb_flushD_SE(pgva); 302 cpu_cpwait(); 303 304 if (limit > size) 305 limit = size; 306 size -= limit; 307 308 /* 309 * Page is now writable. Do as much access as we 310 * can in this page. 311 */ 312 for (; limit > 0; limit--) 313 *dst++ = *data++; 314 315 /* 316 * Restore old mapping permissions. 317 */ 318 switch (oldpde & L1_TYPE_MASK) { 319 case L1_TYPE_S: 320 *pde = oldpde; 321 PTE_SYNC(pde); 322 break; 323 324 case L1_TYPE_C: 325 *pte = oldpte; 326 PTE_SYNC(pte); 327 break; 328 } 329 cpu_tlb_flushD_SE(pgva); 330 cpu_cpwait(); 331 } while (size != 0); 332 333 /* Sync the I-cache. */ 334 cpu_icache_sync_range(addr, savesize); 335 } 336 337 /* 338 * Write bytes to kernel address space for debugger. 339 */ 340 void 341 db_write_bytes(vaddr_t addr, size_t size, const char *data) 342 { 343 extern char kernel_text[]; 344 extern char etext[]; 345 char *dst; 346 size_t loop; 347 348 /* If any part is in kernel text, use db_write_text() */ 349 if (addr >= (vaddr_t) kernel_text && addr < (vaddr_t) etext) { 350 db_write_text(addr, size, data); 351 return; 352 } 353 354 dst = (char *)addr; 355 if (db_validate_address((u_int)dst)) { 356 db_printf("address %p is invalid\n", dst); 357 return; 358 } 359 360 if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0) 361 *((int*)dst) = *((const int *)data); 362 else 363 if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0) 364 *((short*)dst) = *((const short *)data); 365 else { 366 loop = size; 367 while (loop-- > 0) { 368 if (db_validate_address((u_int)dst)) { 369 db_printf("address %p is invalid\n", dst); 370 return; 371 } 372 *dst++ = *data++; 373 } 374 } 375 376 /* make sure the caches and memory are in sync */ 377 cpu_icache_sync_range(addr, size); 378 379 /* In case the current page tables have been modified ... */ 380 cpu_tlb_flushID(); 381 cpu_cpwait(); 382 } 383 384 #ifdef DDB 385 void 386 cpu_Debugger(void) 387 { 388 __asm(".word 0xe7ffffff"); 389 } 390 391 int 392 db_trapper(u_int addr, u_int inst, trapframe_t *frame, int fault_code) 393 { 394 395 if (fault_code == 0) { 396 if ((inst & ~INSN_COND_MASK) == (BKPT_INST & ~INSN_COND_MASK)) 397 kdb_trap(T_BREAKPOINT, frame); 398 else 399 kdb_trap(-1, frame); 400 } else 401 return (1); 402 return (0); 403 } 404 405 extern u_int esym; 406 extern u_int end; 407 408 static struct undefined_handler db_uh; 409 410 void 411 db_machine_init(void) 412 { 413 414 /* 415 * We get called before malloc() is available, so supply a static 416 * struct undefined_handler. 417 */ 418 db_uh.uh_handler = db_trapper; 419 install_coproc_handler_static(CORE_UNKNOWN_HANDLER, &db_uh); 420 } 421 #endif 422 423 u_int 424 db_fetch_reg(int reg, db_regs_t *regs) 425 { 426 427 switch (reg) { 428 case 0: 429 return (regs->tf_r0); 430 case 1: 431 return (regs->tf_r1); 432 case 2: 433 return (regs->tf_r2); 434 case 3: 435 return (regs->tf_r3); 436 case 4: 437 return (regs->tf_r4); 438 case 5: 439 return (regs->tf_r5); 440 case 6: 441 return (regs->tf_r6); 442 case 7: 443 return (regs->tf_r7); 444 case 8: 445 return (regs->tf_r8); 446 case 9: 447 return (regs->tf_r9); 448 case 10: 449 return (regs->tf_r10); 450 case 11: 451 return (regs->tf_r11); 452 case 12: 453 return (regs->tf_r12); 454 case 13: 455 return (regs->tf_svc_sp); 456 case 14: 457 return (regs->tf_svc_lr); 458 case 15: 459 return (regs->tf_pc); 460 default: 461 panic("db_fetch_reg: botch"); 462 } 463 } 464 465 u_int 466 branch_taken(u_int insn, u_int pc, db_regs_t *regs) 467 { 468 u_int addr, nregs; 469 470 switch ((insn >> 24) & 0xf) { 471 case 0xa: /* b ... */ 472 case 0xb: /* bl ... */ 473 addr = ((insn << 2) & 0x03ffffff); 474 if (addr & 0x02000000) 475 addr |= 0xfc000000; 476 return (pc + 8 + addr); 477 case 0x7: /* ldr pc, [pc, reg, lsl #2] */ 478 addr = db_fetch_reg(insn & 0xf, regs); 479 addr = pc + 8 + (addr << 2); 480 db_read_bytes(addr, 4, (char *)&addr); 481 return (addr); 482 case 0x5: /* ldr pc, [reg] */ 483 addr = db_fetch_reg((insn >> 16) & 0xf, regs); 484 db_read_bytes(addr, 4, (char *)&addr); 485 return (addr); 486 case 0x1: /* mov pc, reg */ 487 addr = db_fetch_reg(insn & 0xf, regs); 488 return (addr); 489 case 0x8: /* ldmxx reg, {..., pc} */ 490 case 0x9: 491 addr = db_fetch_reg((insn >> 16) & 0xf, regs); 492 nregs = (insn & 0x5555) + ((insn >> 1) & 0x5555); 493 nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333); 494 nregs = (nregs + (nregs >> 4)) & 0x0f0f; 495 nregs = (nregs + (nregs >> 8)) & 0x001f; 496 switch ((insn >> 23) & 0x3) { 497 case 0x0: /* ldmda */ 498 addr = addr - 0; 499 break; 500 case 0x1: /* ldmia */ 501 addr = addr + 0 + ((nregs - 1) << 2); 502 break; 503 case 0x2: /* ldmdb */ 504 addr = addr - 4; 505 break; 506 case 0x3: /* ldmib */ 507 addr = addr + 4 + ((nregs - 1) << 2); 508 break; 509 } 510 db_read_bytes(addr, 4, (char *)&addr); 511 return (addr); 512 default: 513 panic("branch_taken: botch"); 514 } 515 } 516