1 /* Blackfin Memory Management Unit (MMU) model. 2 3 Copyright (C) 2010-2014 Free Software Foundation, Inc. 4 Contributed by Analog Devices, Inc. 5 6 This file is part of simulators. 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 3 of the License, or 11 (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */ 20 21 #include "config.h" 22 23 #include "sim-main.h" 24 #include "sim-options.h" 25 #include "devices.h" 26 #include "dv-bfin_mmu.h" 27 #include "dv-bfin_cec.h" 28 29 /* XXX: Should this really be two blocks of registers ? PRM describes 30 these as two Content Addressable Memory (CAM) blocks. */ 31 32 struct bfin_mmu 33 { 34 bu32 base; 35 36 /* Order after here is important -- matches hardware MMR layout. */ 37 bu32 sram_base_address; 38 39 bu32 dmem_control, dcplb_fault_status, dcplb_fault_addr; 40 char _dpad0[0x100 - 0x0 - (4 * 4)]; 41 bu32 dcplb_addr[16]; 42 char _dpad1[0x200 - 0x100 - (4 * 16)]; 43 bu32 dcplb_data[16]; 44 char _dpad2[0x300 - 0x200 - (4 * 16)]; 45 bu32 dtest_command; 46 char _dpad3[0x400 - 0x300 - (4 * 1)]; 47 bu32 dtest_data[2]; 48 49 char _dpad4[0x1000 - 0x400 - (4 * 2)]; 50 51 bu32 idk; /* Filler MMR; hardware simply ignores. */ 52 bu32 imem_control, icplb_fault_status, icplb_fault_addr; 53 char _ipad0[0x100 - 0x0 - (4 * 4)]; 54 bu32 icplb_addr[16]; 55 char _ipad1[0x200 - 0x100 - (4 * 16)]; 56 bu32 icplb_data[16]; 57 char _ipad2[0x300 - 0x200 - (4 * 16)]; 58 bu32 itest_command; 59 char _ipad3[0x400 - 0x300 - (4 * 1)]; 60 bu32 itest_data[2]; 61 }; 62 #define mmr_base() offsetof(struct bfin_mmu, sram_base_address) 63 #define mmr_offset(mmr) (offsetof(struct bfin_mmu, mmr) - mmr_base()) 64 #define mmr_idx(mmr) (mmr_offset (mmr) / 4) 65 66 static const char * const mmr_names[BFIN_COREMMR_MMU_SIZE / 4] = 67 { 68 "SRAM_BASE_ADDRESS", "DMEM_CONTROL", "DCPLB_FAULT_STATUS", "DCPLB_FAULT_ADDR", 69 [mmr_idx (dcplb_addr[0])] = "DCPLB_ADDR0", 70 "DCPLB_ADDR1", "DCPLB_ADDR2", "DCPLB_ADDR3", "DCPLB_ADDR4", "DCPLB_ADDR5", 71 "DCPLB_ADDR6", "DCPLB_ADDR7", "DCPLB_ADDR8", "DCPLB_ADDR9", "DCPLB_ADDR10", 72 "DCPLB_ADDR11", "DCPLB_ADDR12", "DCPLB_ADDR13", "DCPLB_ADDR14", "DCPLB_ADDR15", 73 [mmr_idx (dcplb_data[0])] = "DCPLB_DATA0", 74 "DCPLB_DATA1", "DCPLB_DATA2", "DCPLB_DATA3", "DCPLB_DATA4", "DCPLB_DATA5", 75 "DCPLB_DATA6", "DCPLB_DATA7", "DCPLB_DATA8", "DCPLB_DATA9", "DCPLB_DATA10", 76 "DCPLB_DATA11", "DCPLB_DATA12", "DCPLB_DATA13", "DCPLB_DATA14", "DCPLB_DATA15", 77 [mmr_idx (dtest_command)] = "DTEST_COMMAND", 78 [mmr_idx (dtest_data[0])] = "DTEST_DATA0", "DTEST_DATA1", 79 [mmr_idx (imem_control)] = "IMEM_CONTROL", "ICPLB_FAULT_STATUS", "ICPLB_FAULT_ADDR", 80 [mmr_idx (icplb_addr[0])] = "ICPLB_ADDR0", 81 "ICPLB_ADDR1", "ICPLB_ADDR2", "ICPLB_ADDR3", "ICPLB_ADDR4", "ICPLB_ADDR5", 82 "ICPLB_ADDR6", "ICPLB_ADDR7", "ICPLB_ADDR8", "ICPLB_ADDR9", "ICPLB_ADDR10", 83 "ICPLB_ADDR11", "ICPLB_ADDR12", "ICPLB_ADDR13", "ICPLB_ADDR14", "ICPLB_ADDR15", 84 [mmr_idx (icplb_data[0])] = "ICPLB_DATA0", 85 "ICPLB_DATA1", "ICPLB_DATA2", "ICPLB_DATA3", "ICPLB_DATA4", "ICPLB_DATA5", 86 "ICPLB_DATA6", "ICPLB_DATA7", "ICPLB_DATA8", "ICPLB_DATA9", "ICPLB_DATA10", 87 "ICPLB_DATA11", "ICPLB_DATA12", "ICPLB_DATA13", "ICPLB_DATA14", "ICPLB_DATA15", 88 [mmr_idx (itest_command)] = "ITEST_COMMAND", 89 [mmr_idx (itest_data[0])] = "ITEST_DATA0", "ITEST_DATA1", 90 }; 91 #define mmr_name(off) (mmr_names[(off) / 4] ? : "<INV>") 92 93 static bool bfin_mmu_skip_cplbs = false; 94 95 static unsigned 96 bfin_mmu_io_write_buffer (struct hw *me, const void *source, 97 int space, address_word addr, unsigned nr_bytes) 98 { 99 struct bfin_mmu *mmu = hw_data (me); 100 bu32 mmr_off; 101 bu32 value; 102 bu32 *valuep; 103 104 value = dv_load_4 (source); 105 106 mmr_off = addr - mmu->base; 107 valuep = (void *)((unsigned long)mmu + mmr_base() + mmr_off); 108 109 HW_TRACE_WRITE (); 110 111 switch (mmr_off) 112 { 113 case mmr_offset(dmem_control): 114 case mmr_offset(imem_control): 115 /* XXX: IMC/DMC bit should add/remove L1 cache regions ... */ 116 case mmr_offset(dtest_data[0]) ... mmr_offset(dtest_data[1]): 117 case mmr_offset(itest_data[0]) ... mmr_offset(itest_data[1]): 118 case mmr_offset(dcplb_addr[0]) ... mmr_offset(dcplb_addr[15]): 119 case mmr_offset(dcplb_data[0]) ... mmr_offset(dcplb_data[15]): 120 case mmr_offset(icplb_addr[0]) ... mmr_offset(icplb_addr[15]): 121 case mmr_offset(icplb_data[0]) ... mmr_offset(icplb_data[15]): 122 *valuep = value; 123 break; 124 case mmr_offset(sram_base_address): 125 case mmr_offset(dcplb_fault_status): 126 case mmr_offset(dcplb_fault_addr): 127 case mmr_offset(idk): 128 case mmr_offset(icplb_fault_status): 129 case mmr_offset(icplb_fault_addr): 130 /* Discard writes to these. */ 131 break; 132 case mmr_offset(itest_command): 133 /* XXX: Not supported atm. */ 134 if (value) 135 hw_abort (me, "ITEST_COMMAND unimplemented"); 136 break; 137 case mmr_offset(dtest_command): 138 /* Access L1 memory indirectly. */ 139 *valuep = value; 140 if (value) 141 { 142 bu32 addr = mmu->sram_base_address | 143 ((value >> (26 - 11)) & (1 << 11)) | /* addr bit 11 (Way0/Way1) */ 144 ((value >> (24 - 21)) & (1 << 21)) | /* addr bit 21 (Data/Inst) */ 145 ((value >> (23 - 15)) & (1 << 15)) | /* addr bit 15 (Data Bank) */ 146 ((value >> (16 - 12)) & (3 << 12)) | /* addr bits 13:12 (Subbank) */ 147 (value & 0x47F8); /* addr bits 14 & 10:3 */ 148 149 if (!(value & TEST_DATA_ARRAY)) 150 hw_abort (me, "DTEST_COMMAND tag array unimplemented"); 151 if (value & 0xfa7cb801) 152 hw_abort (me, "DTEST_COMMAND bits undefined"); 153 154 if (value & TEST_WRITE) 155 sim_write (hw_system (me), addr, (void *)mmu->dtest_data, 8); 156 else 157 sim_read (hw_system (me), addr, (void *)mmu->dtest_data, 8); 158 } 159 break; 160 default: 161 dv_bfin_mmr_invalid (me, addr, nr_bytes, true); 162 break; 163 } 164 165 return nr_bytes; 166 } 167 168 static unsigned 169 bfin_mmu_io_read_buffer (struct hw *me, void *dest, 170 int space, address_word addr, unsigned nr_bytes) 171 { 172 struct bfin_mmu *mmu = hw_data (me); 173 bu32 mmr_off; 174 bu32 *valuep; 175 176 mmr_off = addr - mmu->base; 177 valuep = (void *)((unsigned long)mmu + mmr_base() + mmr_off); 178 179 HW_TRACE_READ (); 180 181 switch (mmr_off) 182 { 183 case mmr_offset(dmem_control): 184 case mmr_offset(imem_control): 185 case mmr_offset(dtest_command): 186 case mmr_offset(dtest_data[0]) ... mmr_offset(dtest_data[2]): 187 case mmr_offset(itest_command): 188 case mmr_offset(itest_data[0]) ... mmr_offset(itest_data[2]): 189 /* XXX: should do something here. */ 190 case mmr_offset(dcplb_addr[0]) ... mmr_offset(dcplb_addr[15]): 191 case mmr_offset(dcplb_data[0]) ... mmr_offset(dcplb_data[15]): 192 case mmr_offset(icplb_addr[0]) ... mmr_offset(icplb_addr[15]): 193 case mmr_offset(icplb_data[0]) ... mmr_offset(icplb_data[15]): 194 case mmr_offset(sram_base_address): 195 case mmr_offset(dcplb_fault_status): 196 case mmr_offset(dcplb_fault_addr): 197 case mmr_offset(idk): 198 case mmr_offset(icplb_fault_status): 199 case mmr_offset(icplb_fault_addr): 200 dv_store_4 (dest, *valuep); 201 break; 202 default: 203 while (1) /* Core MMRs -> exception -> doesn't return. */ 204 dv_bfin_mmr_invalid (me, addr, nr_bytes, false); 205 break; 206 } 207 208 return nr_bytes; 209 } 210 211 static void 212 attach_bfin_mmu_regs (struct hw *me, struct bfin_mmu *mmu) 213 { 214 address_word attach_address; 215 int attach_space; 216 unsigned attach_size; 217 reg_property_spec reg; 218 219 if (hw_find_property (me, "reg") == NULL) 220 hw_abort (me, "Missing \"reg\" property"); 221 222 if (!hw_find_reg_array_property (me, "reg", 0, ®)) 223 hw_abort (me, "\"reg\" property must contain three addr/size entries"); 224 225 hw_unit_address_to_attach_address (hw_parent (me), 226 ®.address, 227 &attach_space, &attach_address, me); 228 hw_unit_size_to_attach_size (hw_parent (me), ®.size, &attach_size, me); 229 230 if (attach_size != BFIN_COREMMR_MMU_SIZE) 231 hw_abort (me, "\"reg\" size must be %#x", BFIN_COREMMR_MMU_SIZE); 232 233 hw_attach_address (hw_parent (me), 234 0, attach_space, attach_address, attach_size, me); 235 236 mmu->base = attach_address; 237 } 238 239 static void 240 bfin_mmu_finish (struct hw *me) 241 { 242 struct bfin_mmu *mmu; 243 244 mmu = HW_ZALLOC (me, struct bfin_mmu); 245 246 set_hw_data (me, mmu); 247 set_hw_io_read_buffer (me, bfin_mmu_io_read_buffer); 248 set_hw_io_write_buffer (me, bfin_mmu_io_write_buffer); 249 250 attach_bfin_mmu_regs (me, mmu); 251 252 /* Initialize the MMU. */ 253 mmu->sram_base_address = 0xff800000 - 0; 254 /*(4 * 1024 * 1024 * CPU_INDEX (hw_system_cpu (me)));*/ 255 mmu->dmem_control = 0x00000001; 256 mmu->imem_control = 0x00000001; 257 } 258 259 const struct hw_descriptor dv_bfin_mmu_descriptor[] = 260 { 261 {"bfin_mmu", bfin_mmu_finish,}, 262 {NULL, NULL}, 263 }; 264 265 /* Device option parsing. */ 266 267 static DECLARE_OPTION_HANDLER (bfin_mmu_option_handler); 268 269 enum { 270 OPTION_MMU_SKIP_TABLES = OPTION_START, 271 }; 272 273 const OPTION bfin_mmu_options[] = 274 { 275 { {"mmu-skip-cplbs", no_argument, NULL, OPTION_MMU_SKIP_TABLES }, 276 '\0', NULL, "Skip parsing of CPLB tables (big speed increase)", 277 bfin_mmu_option_handler, NULL }, 278 279 { {NULL, no_argument, NULL, 0}, '\0', NULL, NULL, NULL, NULL } 280 }; 281 282 static SIM_RC 283 bfin_mmu_option_handler (SIM_DESC sd, sim_cpu *current_cpu, int opt, 284 char *arg, int is_command) 285 { 286 switch (opt) 287 { 288 case OPTION_MMU_SKIP_TABLES: 289 bfin_mmu_skip_cplbs = true; 290 return SIM_RC_OK; 291 292 default: 293 sim_io_eprintf (sd, "Unknown Blackfin MMU option %d\n", opt); 294 return SIM_RC_FAIL; 295 } 296 } 297 298 #define MMU_STATE(cpu) DV_STATE_CACHED (cpu, mmu) 299 300 static void 301 _mmu_log_ifault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 pc, bool supv) 302 { 303 mmu->icplb_fault_addr = pc; 304 mmu->icplb_fault_status = supv << 17; 305 } 306 307 void 308 mmu_log_ifault (SIM_CPU *cpu) 309 { 310 _mmu_log_ifault (cpu, MMU_STATE (cpu), PCREG, cec_get_ivg (cpu) >= 0); 311 } 312 313 static void 314 _mmu_log_fault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 addr, bool write, 315 bool inst, bool miss, bool supv, bool dag1, bu32 faults) 316 { 317 bu32 *fault_status, *fault_addr; 318 319 /* No logging in non-OS mode. */ 320 if (!mmu) 321 return; 322 323 fault_status = inst ? &mmu->icplb_fault_status : &mmu->dcplb_fault_status; 324 fault_addr = inst ? &mmu->icplb_fault_addr : &mmu->dcplb_fault_addr; 325 /* ICPLB regs always get updated. */ 326 if (!inst) 327 _mmu_log_ifault (cpu, mmu, PCREG, supv); 328 329 *fault_addr = addr; 330 *fault_status = 331 (miss << 19) | 332 (dag1 << 18) | 333 (supv << 17) | 334 (write << 16) | 335 faults; 336 } 337 338 static void 339 _mmu_process_fault (SIM_CPU *cpu, struct bfin_mmu *mmu, bu32 addr, bool write, 340 bool inst, bool unaligned, bool miss, bool supv, bool dag1) 341 { 342 int excp; 343 344 /* See order in mmu_check_addr() */ 345 if (unaligned) 346 excp = inst ? VEC_MISALI_I : VEC_MISALI_D; 347 else if (addr >= BFIN_SYSTEM_MMR_BASE) 348 excp = VEC_ILL_RES; 349 else if (!mmu) 350 excp = inst ? VEC_CPLB_I_M : VEC_CPLB_M; 351 else 352 { 353 /* Misses are hardware errors. */ 354 cec_hwerr (cpu, HWERR_EXTERN_ADDR); 355 return; 356 } 357 358 _mmu_log_fault (cpu, mmu, addr, write, inst, miss, supv, dag1, 0); 359 cec_exception (cpu, excp); 360 } 361 362 void 363 mmu_process_fault (SIM_CPU *cpu, bu32 addr, bool write, bool inst, 364 bool unaligned, bool miss) 365 { 366 SIM_DESC sd = CPU_STATE (cpu); 367 struct bfin_mmu *mmu; 368 369 if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT) 370 mmu = NULL; 371 else 372 mmu = MMU_STATE (cpu); 373 374 _mmu_process_fault (cpu, mmu, addr, write, inst, unaligned, miss, 375 cec_is_supervisor_mode (cpu), 376 BFIN_CPU_STATE.multi_pc == PCREG + 6); 377 } 378 379 /* Return values: 380 -2: no known problems 381 -1: valid 382 0: miss 383 1: protection violation 384 2: multiple hits 385 3: unaligned 386 4: miss; hwerr */ 387 static int 388 mmu_check_implicit_addr (SIM_CPU *cpu, bu32 addr, bool inst, int size, 389 bool supv, bool dag1) 390 { 391 bool l1 = ((addr & 0xFF000000) == 0xFF000000); 392 bu32 amask = (addr & 0xFFF00000); 393 394 if (addr & (size - 1)) 395 return 3; 396 397 /* MMRs may never be executable or accessed from usermode. */ 398 if (addr >= BFIN_SYSTEM_MMR_BASE) 399 { 400 if (inst) 401 return 0; 402 else if (!supv || dag1) 403 return 1; 404 else 405 return -1; 406 } 407 else if (inst) 408 { 409 /* Some regions are not executable. */ 410 /* XXX: Should this be in the model data ? Core B 561 ? */ 411 if (l1) 412 return (amask == 0xFFA00000) ? -1 : 1; 413 } 414 else 415 { 416 /* Some regions are not readable. */ 417 /* XXX: Should this be in the model data ? Core B 561 ? */ 418 if (l1) 419 return (amask != 0xFFA00000) ? -1 : 4; 420 } 421 422 return -2; 423 } 424 425 /* Exception order per the PRM (first has highest): 426 Inst Multiple CPLB Hits 427 Inst Misaligned Access 428 Inst Protection Violation 429 Inst CPLB Miss 430 Only the alignment matters in non-OS mode though. */ 431 static int 432 _mmu_check_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst, int size) 433 { 434 SIM_DESC sd = CPU_STATE (cpu); 435 struct bfin_mmu *mmu; 436 bu32 *fault_status, *fault_addr, *mem_control, *cplb_addr, *cplb_data; 437 bu32 faults; 438 bool supv, do_excp, dag1; 439 int i, hits; 440 441 supv = cec_is_supervisor_mode (cpu); 442 dag1 = (BFIN_CPU_STATE.multi_pc == PCREG + 6); 443 444 if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT || bfin_mmu_skip_cplbs) 445 { 446 int ret = mmu_check_implicit_addr (cpu, addr, inst, size, supv, dag1); 447 /* Valid hits and misses are OK in non-OS envs. */ 448 if (ret < 0) 449 return 0; 450 _mmu_process_fault (cpu, NULL, addr, write, inst, (ret == 3), false, supv, dag1); 451 } 452 453 mmu = MMU_STATE (cpu); 454 fault_status = inst ? &mmu->icplb_fault_status : &mmu->dcplb_fault_status; 455 fault_addr = inst ? &mmu->icplb_fault_addr : &mmu->dcplb_fault_addr; 456 mem_control = inst ? &mmu->imem_control : &mmu->dmem_control; 457 cplb_addr = inst ? &mmu->icplb_addr[0] : &mmu->dcplb_addr[0]; 458 cplb_data = inst ? &mmu->icplb_data[0] : &mmu->dcplb_data[0]; 459 460 faults = 0; 461 hits = 0; 462 do_excp = false; 463 464 /* CPLBs disabled -> little to do. */ 465 if (!(*mem_control & ENCPLB)) 466 { 467 hits = 1; 468 goto implicit_check; 469 } 470 471 /* Check all the CPLBs first. */ 472 for (i = 0; i < 16; ++i) 473 { 474 const bu32 pages[4] = { 0x400, 0x1000, 0x100000, 0x400000 }; 475 bu32 addr_lo, addr_hi; 476 477 /* Skip invalid entries. */ 478 if (!(cplb_data[i] & CPLB_VALID)) 479 continue; 480 481 /* See if this entry covers this address. */ 482 addr_lo = cplb_addr[i]; 483 addr_hi = cplb_addr[i] + pages[(cplb_data[i] & PAGE_SIZE) >> 16]; 484 if (addr < addr_lo || addr >= addr_hi) 485 continue; 486 487 ++hits; 488 faults |= (1 << i); 489 if (write) 490 { 491 if (!supv && !(cplb_data[i] & CPLB_USER_WR)) 492 do_excp = true; 493 if (supv && !(cplb_data[i] & CPLB_SUPV_WR)) 494 do_excp = true; 495 if ((cplb_data[i] & (CPLB_WT | CPLB_L1_CHBL | CPLB_DIRTY)) == CPLB_L1_CHBL) 496 do_excp = true; 497 } 498 else 499 { 500 if (!supv && !(cplb_data[i] & CPLB_USER_RD)) 501 do_excp = true; 502 } 503 } 504 505 /* Handle default/implicit CPLBs. */ 506 if (!do_excp && hits < 2) 507 { 508 int ihits; 509 implicit_check: 510 ihits = mmu_check_implicit_addr (cpu, addr, inst, size, supv, dag1); 511 switch (ihits) 512 { 513 /* No faults and one match -> good to go. */ 514 case -1: return 0; 515 case -2: 516 if (hits == 1) 517 return 0; 518 break; 519 case 4: 520 cec_hwerr (cpu, HWERR_EXTERN_ADDR); 521 return 0; 522 default: 523 hits = ihits; 524 } 525 } 526 else 527 /* Normalize hit count so hits==2 is always multiple hit exception. */ 528 hits = MIN (2, hits); 529 530 _mmu_log_fault (cpu, mmu, addr, write, inst, hits == 0, supv, dag1, faults); 531 532 if (inst) 533 { 534 int iexcps[] = { VEC_CPLB_I_M, VEC_CPLB_I_VL, VEC_CPLB_I_MHIT, VEC_MISALI_I }; 535 return iexcps[hits]; 536 } 537 else 538 { 539 int dexcps[] = { VEC_CPLB_M, VEC_CPLB_VL, VEC_CPLB_MHIT, VEC_MISALI_D }; 540 return dexcps[hits]; 541 } 542 } 543 544 void 545 mmu_check_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst, int size) 546 { 547 int excp = _mmu_check_addr (cpu, addr, write, inst, size); 548 if (excp) 549 cec_exception (cpu, excp); 550 } 551 552 void 553 mmu_check_cache_addr (SIM_CPU *cpu, bu32 addr, bool write, bool inst) 554 { 555 bu32 cacheaddr; 556 int excp; 557 558 cacheaddr = addr & ~(BFIN_L1_CACHE_BYTES - 1); 559 excp = _mmu_check_addr (cpu, cacheaddr, write, inst, BFIN_L1_CACHE_BYTES); 560 if (excp == 0) 561 return; 562 563 /* Most exceptions are ignored with cache funcs. */ 564 /* XXX: Not sure if we should be ignoring CPLB misses. */ 565 if (inst) 566 { 567 if (excp == VEC_CPLB_I_VL) 568 return; 569 } 570 else 571 { 572 if (excp == VEC_CPLB_VL) 573 return; 574 } 575 cec_exception (cpu, excp); 576 } 577