1 /* Blackfin Memory Management Unit (MMU) model. 2 3 Copyright (C) 2010-2024 Free Software Foundation, Inc. 4 Contributed by Analog Devices, Inc. 5 6 This file is part of simulators. 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3 of the License, or 11 (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 20 21 /* This must come before any other includes. */ 22 #include "defs.h" 23 24 #include "sim-main.h" 25 #include "sim-options.h" 26 #include "devices.h" 27 #include "dv-bfin_mmu.h" 28 #include "dv-bfin_cec.h" 29 30 /* XXX: Should this really be two blocks of registers ? PRM describes 31 these as two Content Addressable Memory (CAM) blocks. */ 32 33 struct bfin_mmu 34 { 35 bu32 base; 36 37 /* Order after here is important -- matches hardware MMR layout. */ 38 bu32 sram_base_address; 39 40 bu32 dmem_control, dcplb_fault_status, dcplb_fault_addr; 41 char _dpad0[0x100 - 0x0 - (4 * 4)]; 42 bu32 dcplb_addr[16]; 43 char _dpad1[0x200 - 0x100 - (4 * 16)]; 44 bu32 dcplb_data[16]; 45 char _dpad2[0x300 - 0x200 - (4 * 16)]; 46 bu32 dtest_command; 47 char _dpad3[0x400 - 0x300 - (4 * 1)]; 48 bu32 dtest_data[2]; 49 50 char _dpad4[0x1000 - 0x400 - (4 * 2)]; 51 52 bu32 idk; /* Filler MMR; hardware simply ignores. */ 53 bu32 imem_control, icplb_fault_status, icplb_fault_addr; 54 char _ipad0[0x100 - 0x0 - (4 * 4)]; 55 bu32 icplb_addr[16]; 56 char _ipad1[0x200 - 0x100 - (4 * 16)]; 57 bu32 icplb_data[16]; 58 char _ipad2[0x300 - 0x200 - (4 * 16)]; 59 bu32 itest_command; 60 char _ipad3[0x400 - 0x300 - (4 * 1)]; 61 bu32 itest_data[2]; 62 }; 63 #define mmr_base() offsetof(struct bfin_mmu, sram_base_address) 64 #define mmr_offset(mmr) (offsetof(struct bfin_mmu, mmr) - mmr_base()) 65 #define mmr_idx(mmr) (mmr_offset (mmr) / 4) 66 67 static const char * const mmr_names[BFIN_COREMMR_MMU_SIZE / 4] = 68 { 69 "SRAM_BASE_ADDRESS", "DMEM_CONTROL", "DCPLB_FAULT_STATUS", "DCPLB_FAULT_ADDR", 70 [mmr_idx (dcplb_addr[0])] = "DCPLB_ADDR0", 71 "DCPLB_ADDR1", "DCPLB_ADDR2", "DCPLB_ADDR3", "DCPLB_ADDR4", "DCPLB_ADDR5", 72 "DCPLB_ADDR6", "DCPLB_ADDR7", "DCPLB_ADDR8", "DCPLB_ADDR9", "DCPLB_ADDR10", 73 "DCPLB_ADDR11", "DCPLB_ADDR12", "DCPLB_ADDR13", "DCPLB_ADDR14", "DCPLB_ADDR15", 74 [mmr_idx (dcplb_data[0])] = "DCPLB_DATA0", 75 "DCPLB_DATA1", "DCPLB_DATA2", "DCPLB_DATA3", "DCPLB_DATA4", "DCPLB_DATA5", 76 "DCPLB_DATA6", "DCPLB_DATA7", "DCPLB_DATA8", "DCPLB_DATA9", "DCPLB_DATA10", 77 "DCPLB_DATA11", "DCPLB_DATA12", "DCPLB_DATA13", "DCPLB_DATA14", "DCPLB_DATA15", 78 [mmr_idx (dtest_command)] = "DTEST_COMMAND", 79 [mmr_idx (dtest_data[0])] = "DTEST_DATA0", "DTEST_DATA1", 80 [mmr_idx (imem_control)] = "IMEM_CONTROL", "ICPLB_FAULT_STATUS", "ICPLB_FAULT_ADDR", 81 [mmr_idx (icplb_addr[0])] = "ICPLB_ADDR0", 82 "ICPLB_ADDR1", "ICPLB_ADDR2", "ICPLB_ADDR3", "ICPLB_ADDR4", "ICPLB_ADDR5", 83 "ICPLB_ADDR6", "ICPLB_ADDR7", "ICPLB_ADDR8", "ICPLB_ADDR9", "ICPLB_ADDR10", 84 "ICPLB_ADDR11", "ICPLB_ADDR12", "ICPLB_ADDR13", "ICPLB_ADDR14", "ICPLB_ADDR15", 85 [mmr_idx (icplb_data[0])] = "ICPLB_DATA0", 86 "ICPLB_DATA1", "ICPLB_DATA2", "ICPLB_DATA3", "ICPLB_DATA4", "ICPLB_DATA5", 87 "ICPLB_DATA6", "ICPLB_DATA7", "ICPLB_DATA8", "ICPLB_DATA9", "ICPLB_DATA10", 88 "ICPLB_DATA11", "ICPLB_DATA12", "ICPLB_DATA13", "ICPLB_DATA14", "ICPLB_DATA15", 89 [mmr_idx (itest_command)] = "ITEST_COMMAND", 90 [mmr_idx (itest_data[0])] = "ITEST_DATA0", "ITEST_DATA1", 91 }; 92 #define mmr_name(off) (mmr_names[(off) / 4] ? : "<INV>") 93 94 static bool bfin_mmu_skip_cplbs = false; 95 96 static unsigned 97 bfin_mmu_io_write_buffer (struct hw *me, const void *source, 98 int space, address_word addr, unsigned nr_bytes) 99 { 100 struct bfin_mmu *mmu = hw_data (me); 101 bu32 mmr_off; 102 bu32 value; 103 bu32 *valuep; 104 105 /* Invalid access mode is higher priority than missing register. */ 106 if (!dv_bfin_mmr_require_32 (me, addr, nr_bytes, true)) 107 return 0; 108 109 value = dv_load_4 (source); 110 111 mmr_off = addr - mmu->base; 112 valuep = (void *)((uintptr_t)mmu + mmr_base() + mmr_off); 113 114 HW_TRACE_WRITE (); 115 116 switch (mmr_off) 117 { 118 case mmr_offset(dmem_control): 119 case mmr_offset(imem_control): 120 /* XXX: IMC/DMC bit should add/remove L1 cache regions ... */ 121 case mmr_offset(dtest_data[0]) ... mmr_offset(dtest_data[1]): 122 case mmr_offset(itest_data[0]) ... mmr_offset(itest_data[1]): 123 case mmr_offset(dcplb_addr[0]) ... mmr_offset(dcplb_addr[15]): 124 case mmr_offset(dcplb_data[0]) ... mmr_offset(dcplb_data[15]): 125 case mmr_offset(icplb_addr[0]) ... mmr_offset(icplb_addr[15]): 126 case mmr_offset(icplb_data[0]) ... mmr_offset(icplb_data[15]): 127 *valuep = value; 128 break; 129 case mmr_offset(sram_base_address): 130 case mmr_offset(dcplb_fault_status): 131 case mmr_offset(dcplb_fault_addr): 132 case mmr_offset(idk): 133 case mmr_offset(icplb_fault_status): 134 case mmr_offset(icplb_fault_addr): 135 /* Discard writes to these. */ 136 break; 137 case mmr_offset(itest_command): 138 /* XXX: Not supported atm. */ 139 if (value) 140 hw_abort (me, "ITEST_COMMAND unimplemented"); 141 break; 142 case mmr_offset(dtest_command): 143 /* Access L1 memory indirectly. */ 144 *valuep = value; 145 if (value) 146 { 147 bu32 sram_addr = mmu->sram_base_address | 148 ((value >> (26 - 11)) & (1 << 11)) | /* addr bit 11 (Way0/Way1) */ 149 ((value >> (24 - 21)) & (1 << 21)) | /* addr bit 21 (Data/Inst) */ 150 ((value >> (23 - 15)) & (1 << 15)) | /* addr bit 15 (Data Bank) */ 151 ((value >> (16 - 12)) & (3 << 12)) | /* addr bits 13:12 (Subbank) */ 152 (value & 0x47F8); /* addr bits 14 & 10:3 */ 153 154 if (!(value & TEST_DATA_ARRAY)) 155 hw_abort (me, "DTEST_COMMAND tag array unimplemented"); 156 if (value & 0xfa7cb801) 157 hw_abort (me, "DTEST_COMMAND bits undefined"); 158 159 if (value & TEST_WRITE) 160 sim_write (hw_system (me), sram_addr, mmu->dtest_data, 8); 161 else 162 sim_read (hw_system (me), sram_addr, mmu->dtest_data, 8); 163 } 164 break; 165 default: 166 dv_bfin_mmr_invalid (me, addr, nr_bytes, true); 167 return 0; 168 } 169 170 return nr_bytes; 171 } 172 173 static unsigned 174 bfin_mmu_io_read_buffer (struct hw *me, void *dest, 175 int space, address_word addr, unsigned nr_bytes) 176 { 177 struct bfin_mmu *mmu = hw_data (me); 178 bu32 mmr_off; 179 bu32 *valuep; 180 181 /* Invalid access mode is higher priority than missing register. */ 182 if (!dv_bfin_mmr_require_32 (me, addr, nr_bytes, false)) 183 return 0; 184 185 mmr_off = addr - mmu->base; 186 valuep = (void *)((uintptr_t)mmu + mmr_base() + mmr_off); 187 188 HW_TRACE_READ (); 189 190 switch (mmr_off) 191 { 192 case mmr_offset(dmem_control): 193 case mmr_offset(imem_control): 194 case mmr_offset(dtest_command): 195 case mmr_offset(dtest_data[0]) ... mmr_offset(dtest_data[2]): 196 case mmr_offset(itest_command): 197 case mmr_offset(itest_data[0]) ... mmr_offset(itest_data[2]): 198 /* XXX: should do something here. */ 199 case mmr_offset(dcplb_addr[0]) ... mmr_offset(dcplb_addr[15]): 200 case mmr_offset(dcplb_data[0]) ... mmr_offset(dcplb_data[15]): 201 case mmr_offset(icplb_addr[0]) ... mmr_offset(icplb_addr[15]): 202 case mmr_offset(icplb_data[0]) ... mmr_offset(icplb_data[15]): 203 case mmr_offset(sram_base_address): 204 case mmr_offset(dcplb_fault_status): 205 case mmr_offset(dcplb_fault_addr): 206 case mmr_offset(idk): 207 case mmr_offset(icplb_fault_status): 208 case mmr_offset(icplb_fault_addr): 209 dv_store_4 (dest, *valuep); 210 break; 211 default: 212 dv_bfin_mmr_invalid (me, addr, nr_bytes, false); 213 return 0; 214 } 215 216 return nr_bytes; 217 } 218 219 static void 220 attach_bfin_mmu_regs (struct hw *me, struct bfin_mmu *mmu) 221 { 222 address_word attach_address; 223 int attach_space; 224 unsigned attach_size; 225 reg_property_spec reg; 226 227 if (hw_find_property (me, "reg") == NULL) 228 hw_abort (me, "Missing \"reg\" property"); 229 230 if (!hw_find_reg_array_property (me, "reg", 0, ®)) 231 hw_abort (me, "\"reg\" property must contain three addr/size entries"); 232 233 hw_unit_address_to_attach_address (hw_parent (me), 234 ®.address, 235 &attach_space, &attach_address, me); 236 hw_unit_size_to_attach_size (hw_parent (me), ®.size, &attach_size, me); 237 238 if (attach_size != BFIN_COREMMR_MMU_SIZE) 239 hw_abort (me, "\"reg\" size must be %#x", BFIN_COREMMR_MMU_SIZE); 240 241 hw_attach_address (hw_parent (me), 242 0, attach_space, attach_address, attach_size, me); 243 244 mmu->base = attach_address; 245 } 246 247 static void 248 bfin_mmu_finish (struct hw *me) 249 { 250 struct bfin_mmu *mmu; 251 252 mmu = HW_ZALLOC (me, struct bfin_mmu); 253 254 set_hw_data (me, mmu); 255 set_hw_io_read_buffer (me, bfin_mmu_io_read_buffer); 256 set_hw_io_write_buffer (me, bfin_mmu_io_write_buffer); 257 258 attach_bfin_mmu_regs (me, mmu); 259 260 /* Initialize the MMU. */ 261 mmu->sram_base_address = 0xff800000 - 0; 262 /*(4 * 1024 * 1024 * CPU_INDEX (hw_system_cpu (me)));*/ 263 mmu->dmem_control = 0x00000001; 264 mmu->imem_control = 0x00000001; 265 } 266 267 const struct hw_descriptor dv_bfin_mmu_descriptor[] = 268 { 269 {"bfin_mmu", bfin_mmu_finish,}, 270 {NULL, NULL}, 271 }; 272 273 /* Device option parsing. */ 274 275 static DECLARE_OPTION_HANDLER (bfin_mmu_option_handler); 276 277 enum { 278 OPTION_MMU_SKIP_TABLES = OPTION_START, 279 }; 280 281 static const OPTION bfin_mmu_options[] = 282 { 283 { {"mmu-skip-cplbs", no_argument, NULL, OPTION_MMU_SKIP_TABLES }, 284 '\0', NULL, "Skip parsing of CPLB tables (big speed increase)", 285 bfin_mmu_option_handler, NULL }, 286 287 { {NULL, no_argument, NULL, 0}, '\0', NULL, NULL, NULL, NULL } 288 }; 289 290 static SIM_RC 291 bfin_mmu_option_handler (SIM_DESC sd, sim_cpu *current_cpu, int opt, 292 char *arg, int is_command) 293 { 294 switch (opt) 295 { 296 case OPTION_MMU_SKIP_TABLES: 297 bfin_mmu_skip_cplbs = true; 298 return SIM_RC_OK; 299 300 default: 301 sim_io_eprintf (sd, "Unknown Blackfin MMU option %d\n", opt); 302 return SIM_RC_FAIL; 303 } 304 } 305 306 /* Provide a prototype to silence -Wmissing-prototypes. */ 307 extern MODULE_INIT_FN sim_install_bfin_mmu; 308 309 SIM_RC 310 sim_install_bfin_mmu (SIM_DESC sd) 311 { 312 SIM_ASSERT (STATE_MAGIC (sd) == SIM_MAGIC_NUMBER); 313 return sim_add_option_table (sd, NULL, bfin_mmu_options); 314 } 315 316 #define MMU_STATE(cpu) DV_STATE_CACHED (cpu, mmu) 317 318 static void 319 _mmu_log_ifault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 pc, bool supv) 320 { 321 mmu->icplb_fault_addr = pc; 322 mmu->icplb_fault_status = supv << 17; 323 } 324 325 void 326 mmu_log_ifault (SIM_CPU *cpu) 327 { 328 _mmu_log_ifault (cpu, MMU_STATE (cpu), PCREG, cec_get_ivg (cpu) >= 0); 329 } 330 331 static void 332 _mmu_log_fault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 addr, bool write, 333 bool inst, bool miss, bool supv, bool dag1, bu32 faults) 334 { 335 bu32 *fault_status, *fault_addr; 336 337 /* No logging in non-OS mode. */ 338 if (!mmu) 339 return; 340 341 fault_status = inst ? &mmu->icplb_fault_status : &mmu->dcplb_fault_status; 342 fault_addr = inst ? &mmu->icplb_fault_addr : &mmu->dcplb_fault_addr; 343 /* ICPLB regs always get updated. */ 344 if (!inst) 345 _mmu_log_ifault (cpu, mmu, PCREG, supv); 346 347 *fault_addr = addr; 348 *fault_status = 349 (miss << 19) | 350 (dag1 << 18) | 351 (supv << 17) | 352 (write << 16) | 353 faults; 354 } 355 356 static void 357 _mmu_process_fault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 addr, bool write, 358 bool inst, bool unaligned, bool miss, bool supv, bool dag1) 359 { 360 int excp; 361 362 /* See order in mmu_check_addr() */ 363 if (unaligned) 364 excp = inst ? VEC_MISALI_I : VEC_MISALI_D; 365 else if (addr >= BFIN_SYSTEM_MMR_BASE) 366 excp = VEC_ILL_RES; 367 else if (!mmu) 368 excp = inst ? VEC_CPLB_I_M : VEC_CPLB_M; 369 else 370 { 371 /* Misses are hardware errors. */ 372 cec_hwerr (cpu, HWERR_EXTERN_ADDR); 373 return; 374 } 375 376 _mmu_log_fault (cpu, mmu, addr, write, inst, miss, supv, dag1, 0); 377 cec_exception (cpu, excp); 378 } 379 380 void 381 mmu_process_fault (SIM_CPU *cpu, bu32 addr, bool write, bool inst, 382 bool unaligned, bool miss) 383 { 384 SIM_DESC sd = CPU_STATE (cpu); 385 struct bfin_mmu *mmu; 386 387 if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT) 388 mmu = NULL; 389 else 390 mmu = MMU_STATE (cpu); 391 392 _mmu_process_fault (cpu, mmu, addr, write, inst, unaligned, miss, 393 cec_is_supervisor_mode (cpu), 394 BFIN_CPU_STATE.multi_pc == PCREG + 6); 395 } 396 397 /* Return values: 398 -2: no known problems 399 -1: valid 400 0: miss 401 1: protection violation 402 2: multiple hits 403 3: unaligned 404 4: miss; hwerr */ 405 static int 406 mmu_check_implicit_addr (SIM_CPU *cpu, bu32 addr, bool inst, int size, 407 bool supv, bool dag1) 408 { 409 bool l1 = ((addr & 0xFF000000) == 0xFF000000); 410 bu32 amask = (addr & 0xFFF00000); 411 412 if (addr & (size - 1)) 413 return 3; 414 415 /* MMRs may never be executable or accessed from usermode. */ 416 if (addr >= BFIN_SYSTEM_MMR_BASE) 417 { 418 if (inst) 419 return 0; 420 else if (!supv || dag1) 421 return 1; 422 else 423 return -1; 424 } 425 else if (inst) 426 { 427 /* Some regions are not executable. */ 428 /* XXX: Should this be in the model data ? Core B 561 ? */ 429 if (l1) 430 return (amask == 0xFFA00000) ? -1 : 1; 431 } 432 else 433 { 434 /* Some regions are not readable. */ 435 /* XXX: Should this be in the model data ? Core B 561 ? */ 436 if (l1) 437 return (amask != 0xFFA00000) ? -1 : 4; 438 } 439 440 return -2; 441 } 442 443 /* Exception order per the PRM (first has highest): 444 Inst Multiple CPLB Hits 445 Inst Misaligned Access 446 Inst Protection Violation 447 Inst CPLB Miss 448 Only the alignment matters in non-OS mode though. */ 449 static int 450 _mmu_check_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst, int size) 451 { 452 SIM_DESC sd = CPU_STATE (cpu); 453 struct bfin_mmu *mmu; 454 bu32 *mem_control, *cplb_addr, *cplb_data; 455 bu32 faults; 456 bool supv, do_excp, dag1; 457 int i, hits; 458 459 supv = cec_is_supervisor_mode (cpu); 460 dag1 = (BFIN_CPU_STATE.multi_pc == PCREG + 6); 461 462 if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT || bfin_mmu_skip_cplbs) 463 { 464 int ret = mmu_check_implicit_addr (cpu, addr, inst, size, supv, dag1); 465 /* Valid hits and misses are OK in non-OS envs. */ 466 if (ret < 0) 467 return 0; 468 _mmu_process_fault (cpu, NULL, addr, write, inst, (ret == 3), false, supv, dag1); 469 } 470 471 mmu = MMU_STATE (cpu); 472 mem_control = inst ? &mmu->imem_control : &mmu->dmem_control; 473 cplb_addr = inst ? &mmu->icplb_addr[0] : &mmu->dcplb_addr[0]; 474 cplb_data = inst ? &mmu->icplb_data[0] : &mmu->dcplb_data[0]; 475 476 faults = 0; 477 hits = 0; 478 do_excp = false; 479 480 /* CPLBs disabled -> little to do. */ 481 if (!(*mem_control & ENCPLB)) 482 { 483 hits = 1; 484 goto implicit_check; 485 } 486 487 /* Check all the CPLBs first. */ 488 for (i = 0; i < 16; ++i) 489 { 490 const bu32 pages[4] = { 0x400, 0x1000, 0x100000, 0x400000 }; 491 bu32 addr_lo, addr_hi; 492 493 /* Skip invalid entries. */ 494 if (!(cplb_data[i] & CPLB_VALID)) 495 continue; 496 497 /* See if this entry covers this address. */ 498 addr_lo = cplb_addr[i]; 499 addr_hi = cplb_addr[i] + pages[(cplb_data[i] & PAGE_SIZE) >> 16]; 500 if (addr < addr_lo || addr >= addr_hi) 501 continue; 502 503 ++hits; 504 faults |= (1 << i); 505 if (write) 506 { 507 if (!supv && !(cplb_data[i] & CPLB_USER_WR)) 508 do_excp = true; 509 if (supv && !(cplb_data[i] & CPLB_SUPV_WR)) 510 do_excp = true; 511 if ((cplb_data[i] & (CPLB_WT | CPLB_L1_CHBL | CPLB_DIRTY)) == CPLB_L1_CHBL) 512 do_excp = true; 513 } 514 else 515 { 516 if (!supv && !(cplb_data[i] & CPLB_USER_RD)) 517 do_excp = true; 518 } 519 } 520 521 /* Handle default/implicit CPLBs. */ 522 if (!do_excp && hits < 2) 523 { 524 int ihits; 525 implicit_check: 526 ihits = mmu_check_implicit_addr (cpu, addr, inst, size, supv, dag1); 527 switch (ihits) 528 { 529 /* No faults and one match -> good to go. */ 530 case -1: return 0; 531 case -2: 532 if (hits == 1) 533 return 0; 534 break; 535 case 4: 536 cec_hwerr (cpu, HWERR_EXTERN_ADDR); 537 return 0; 538 default: 539 hits = ihits; 540 } 541 } 542 else 543 /* Normalize hit count so hits==2 is always multiple hit exception. */ 544 hits = min (2, hits); 545 546 _mmu_log_fault (cpu, mmu, addr, write, inst, hits == 0, supv, dag1, faults); 547 548 if (inst) 549 { 550 int iexcps[] = { VEC_CPLB_I_M, VEC_CPLB_I_VL, VEC_CPLB_I_MHIT, VEC_MISALI_I }; 551 return iexcps[hits]; 552 } 553 else 554 { 555 int dexcps[] = { VEC_CPLB_M, VEC_CPLB_VL, VEC_CPLB_MHIT, VEC_MISALI_D }; 556 return dexcps[hits]; 557 } 558 } 559 560 void 561 mmu_check_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst, int size) 562 { 563 int excp = _mmu_check_addr (cpu, addr, write, inst, size); 564 if (excp) 565 cec_exception (cpu, excp); 566 } 567 568 void 569 mmu_check_cache_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst) 570 { 571 bu32 cacheaddr; 572 int excp; 573 574 cacheaddr = addr & ~(BFIN_L1_CACHE_BYTES - 1); 575 excp = _mmu_check_addr (cpu, cacheaddr, write, inst, BFIN_L1_CACHE_BYTES); 576 if (excp == 0) 577 return; 578 579 /* Most exceptions are ignored with cache funcs. */ 580 /* XXX: Not sure if we should be ignoring CPLB misses. */ 581 if (inst) 582 { 583 if (excp == VEC_CPLB_I_VL) 584 return; 585 } 586 else 587 { 588 if (excp == VEC_CPLB_VL) 589 return; 590 } 591 cec_exception (cpu, excp); 592 } 593