1 /* $NetBSD: cpufunc.c,v 1.132 2013/12/20 06:48:09 matt Exp $ */ 2 3 /* 4 * arm7tdmi support code Copyright (c) 2001 John Fremlin 5 * arm8 support code Copyright (c) 1997 ARM Limited 6 * arm8 support code Copyright (c) 1997 Causality Limited 7 * arm9 support code Copyright (C) 2001 ARM Ltd 8 * arm11 support code Copyright (c) 2007 Microsoft 9 * cortexa8 support code Copyright (c) 2008 3am Software Foundry 10 * cortexa8 improvements Copyright (c) Goeran Weinholt 11 * Copyright (c) 1997 Mark Brinicombe. 12 * Copyright (c) 1997 Causality Limited 13 * All rights reserved. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by Causality Limited. 26 * 4. The name of Causality Limited may not be used to endorse or promote 27 * products derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 31 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 33 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 34 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 35 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 36 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * RiscBSD kernel project 43 * 44 * cpufuncs.c 45 * 46 * C functions for supporting CPU / MMU / TLB specific operations. 47 * 48 * Created : 30/01/97 49 */ 50 51 #include <sys/cdefs.h> 52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.132 2013/12/20 06:48:09 matt Exp $"); 53 54 #include "opt_compat_netbsd.h" 55 #include "opt_cpuoptions.h" 56 #include "opt_perfctrs.h" 57 58 #include <sys/types.h> 59 #include <sys/param.h> 60 #include <sys/pmc.h> 61 #include <sys/systm.h> 62 #include <machine/cpu.h> 63 #include <machine/bootconfig.h> 64 #include <arch/arm/arm/disassem.h> 65 66 #include <uvm/uvm.h> 67 68 #include <arm/cpuconf.h> 69 #include <arm/cpufunc.h> 70 #include <arm/locore.h> 71 72 #ifdef CPU_XSCALE_80200 73 #include <arm/xscale/i80200reg.h> 74 #include <arm/xscale/i80200var.h> 75 #endif 76 77 #ifdef CPU_XSCALE_80321 78 #include <arm/xscale/i80321reg.h> 79 #include <arm/xscale/i80321var.h> 80 #endif 81 82 #ifdef CPU_XSCALE_IXP425 83 #include <arm/xscale/ixp425reg.h> 84 #include <arm/xscale/ixp425var.h> 85 #endif 86 87 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) 88 #include <arm/xscale/xscalereg.h> 89 #endif 90 91 #if defined(PERFCTRS) 92 struct arm_pmc_funcs *arm_pmc; 93 #endif 94 95 #if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)) 96 bool cpu_armv7_p; 97 #endif 98 99 #if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)) 100 bool cpu_armv6_p; 101 #endif 102 103 104 /* PRIMARY CACHE VARIABLES */ 105 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 106 u_int arm_cache_prefer_mask; 107 #endif 108 struct arm_cache_info arm_pcache; 109 struct arm_cache_info arm_scache; 110 111 u_int arm_dcache_align; 112 u_int arm_dcache_align_mask; 113 114 /* 1 == use cpu_sleep(), 0 == don't */ 115 int cpu_do_powersave; 116 117 #ifdef CPU_ARM2 118 struct cpu_functions arm2_cpufuncs = { 119 /* CPU functions */ 120 121 .cf_id = arm2_id, 122 .cf_cpwait = cpufunc_nullop, 123 124 /* MMU functions */ 125 126 .cf_control = (void *)cpufunc_nullop, 127 128 /* TLB functions */ 129 130 .cf_tlb_flushID = cpufunc_nullop, 131 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 132 .cf_tlb_flushI = cpufunc_nullop, 133 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 134 .cf_tlb_flushD = cpufunc_nullop, 135 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 136 137 /* Cache operations */ 138 139 .cf_icache_sync_all = cpufunc_nullop, 140 .cf_icache_sync_range = (void *) cpufunc_nullop, 141 142 .cf_dcache_wbinv_all = arm3_cache_flush, 143 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 144 .cf_dcache_inv_range = (void *)cpufunc_nullop, 145 .cf_dcache_wb_range = (void *)cpufunc_nullop, 146 147 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 148 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 149 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 150 151 .cf_idcache_wbinv_all = cpufunc_nullop, 152 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 153 154 /* Other functions */ 155 156 .cf_flush_prefetchbuf = cpufunc_nullop, 157 .cf_drain_writebuf = cpufunc_nullop, 158 .cf_flush_brnchtgt_C = cpufunc_nullop, 159 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 160 161 .cf_sleep = (void *)cpufunc_nullop, 162 163 /* Soft functions */ 164 165 .cf_dataabt_fixup = early_abort_fixup, 166 .cf_prefetchabt_fixup = cpufunc_null_fixup, 167 168 .cf_setup = (void *)cpufunc_nullop 169 170 }; 171 #endif /* CPU_ARM2 */ 172 173 #ifdef CPU_ARM250 174 struct cpu_functions arm250_cpufuncs = { 175 /* CPU functions */ 176 177 .cf_id = arm250_id, 178 .cf_cpwait = cpufunc_nullop, 179 180 /* MMU functions */ 181 182 .cf_control = (void *)cpufunc_nullop, 183 184 /* TLB functions */ 185 186 .cf_tlb_flushID = cpufunc_nullop, 187 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 188 .cf_tlb_flushI = cpufunc_nullop, 189 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 190 .cf_tlb_flushD = cpufunc_nullop, 191 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 192 193 /* Cache operations */ 194 195 .cf_icache_sync_all = cpufunc_nullop, 196 .cf_icache_sync_range = (void *) cpufunc_nullop, 197 198 .cf_dcache_wbinv_all = arm3_cache_flush, 199 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 200 .cf_dcache_inv_range = (void *)cpufunc_nullop, 201 .cf_dcache_wb_range = (void *)cpufunc_nullop, 202 203 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 204 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 205 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 206 207 .cf_idcache_wbinv_all = cpufunc_nullop, 208 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 209 210 /* Other functions */ 211 212 .cf_flush_prefetchbuf = cpufunc_nullop, 213 .cf_drain_writebuf = cpufunc_nullop, 214 .cf_flush_brnchtgt_C = cpufunc_nullop, 215 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 216 217 .cf_sleep = (void *)cpufunc_nullop, 218 219 /* Soft functions */ 220 221 .cf_dataabt_fixup = early_abort_fixup, 222 .cf_prefetchabt_fixup = cpufunc_null_fixup, 223 224 .cf_setup = (void *)cpufunc_nullop 225 226 }; 227 #endif /* CPU_ARM250 */ 228 229 #ifdef CPU_ARM3 230 struct cpu_functions arm3_cpufuncs = { 231 /* CPU functions */ 232 233 .cf_id = cpufunc_id, 234 .cf_cpwait = cpufunc_nullop, 235 236 /* MMU functions */ 237 238 .cf_control = arm3_control, 239 240 /* TLB functions */ 241 242 .cf_tlb_flushID = cpufunc_nullop, 243 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 244 .cf_tlb_flushI = cpufunc_nullop, 245 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 246 .cf_tlb_flushD = cpufunc_nullop, 247 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 248 249 /* Cache operations */ 250 251 .cf_icache_sync_all = cpufunc_nullop, 252 .cf_icache_sync_range = (void *) cpufunc_nullop, 253 254 .cf_dcache_wbinv_all = arm3_cache_flush, 255 .cf_dcache_wbinv_range = (void *)arm3_cache_flush, 256 .cf_dcache_inv_range = (void *)arm3_cache_flush, 257 .cf_dcache_wb_range = (void *)cpufunc_nullop, 258 259 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 260 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 261 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 262 263 .cf_idcache_wbinv_all = arm3_cache_flush, 264 .cf_idcache_wbinv_range = (void *)arm3_cache_flush, 265 266 /* Other functions */ 267 268 .cf_flush_prefetchbuf = cpufunc_nullop, 269 .cf_drain_writebuf = cpufunc_nullop, 270 .cf_flush_brnchtgt_C = cpufunc_nullop, 271 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 272 273 .cf_sleep = (void *)cpufunc_nullop, 274 275 /* Soft functions */ 276 277 .cf_dataabt_fixup = early_abort_fixup, 278 .cf_prefetchabt_fixup = cpufunc_null_fixup, 279 280 .cf_setup = (void *)cpufunc_nullop 281 282 }; 283 #endif /* CPU_ARM3 */ 284 285 #ifdef CPU_ARM6 286 struct cpu_functions arm6_cpufuncs = { 287 /* CPU functions */ 288 289 .cf_id = cpufunc_id, 290 .cf_cpwait = cpufunc_nullop, 291 292 /* MMU functions */ 293 294 .cf_control = cpufunc_control, 295 .cf_domains = cpufunc_domains, 296 .cf_setttb = arm67_setttb, 297 .cf_faultstatus = cpufunc_faultstatus, 298 .cf_faultaddress = cpufunc_faultaddress, 299 300 /* TLB functions */ 301 302 .cf_tlb_flushID = arm67_tlb_flush, 303 .cf_tlb_flushID_SE = arm67_tlb_purge, 304 .cf_tlb_flushI = arm67_tlb_flush, 305 .cf_tlb_flushI_SE = arm67_tlb_purge, 306 .cf_tlb_flushD = arm67_tlb_flush, 307 .cf_tlb_flushD_SE = arm67_tlb_purge, 308 309 /* Cache operations */ 310 311 .cf_icache_sync_all = cpufunc_nullop, 312 .cf_icache_sync_range = (void *) cpufunc_nullop, 313 314 .cf_dcache_wbinv_all = arm67_cache_flush, 315 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 316 .cf_dcache_inv_range = (void *)arm67_cache_flush, 317 .cf_dcache_wb_range = (void *)cpufunc_nullop, 318 319 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 320 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 321 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 322 323 .cf_idcache_wbinv_all = arm67_cache_flush, 324 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 325 326 /* Other functions */ 327 328 .cf_flush_prefetchbuf = cpufunc_nullop, 329 .cf_drain_writebuf = cpufunc_nullop, 330 .cf_flush_brnchtgt_C = cpufunc_nullop, 331 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 332 333 .cf_sleep = (void *)cpufunc_nullop, 334 335 /* Soft functions */ 336 337 #ifdef ARM6_LATE_ABORT 338 .cf_dataabt_fixup = late_abort_fixup, 339 #else 340 .cf_dataabt_fixup = early_abort_fixup, 341 #endif 342 .cf_prefetchabt_fixup = cpufunc_null_fixup, 343 344 .cf_context_switch = arm67_context_switch, 345 346 .cf_setup = arm6_setup 347 348 }; 349 #endif /* CPU_ARM6 */ 350 351 #ifdef CPU_ARM7 352 struct cpu_functions arm7_cpufuncs = { 353 /* CPU functions */ 354 355 .cf_id = cpufunc_id, 356 .cf_cpwait = cpufunc_nullop, 357 358 /* MMU functions */ 359 360 .cf_control = cpufunc_control, 361 .cf_domains = cpufunc_domains, 362 .cf_setttb = arm67_setttb, 363 .cf_faultstatus = cpufunc_faultstatus, 364 .cf_faultaddress = cpufunc_faultaddress, 365 366 /* TLB functions */ 367 368 .cf_tlb_flushID = arm67_tlb_flush, 369 .cf_tlb_flushID_SE = arm67_tlb_purge, 370 .cf_tlb_flushI = arm67_tlb_flush, 371 .cf_tlb_flushI_SE = arm67_tlb_purge, 372 .cf_tlb_flushD = arm67_tlb_flush, 373 .cf_tlb_flushD_SE = arm67_tlb_purge, 374 375 /* Cache operations */ 376 377 .cf_icache_sync_all = cpufunc_nullop, 378 .cf_icache_sync_range = (void *)cpufunc_nullop, 379 380 .cf_dcache_wbinv_all = arm67_cache_flush, 381 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 382 .cf_dcache_inv_range = (void *)arm67_cache_flush, 383 .cf_dcache_wb_range = (void *)cpufunc_nullop, 384 385 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 386 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 387 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 388 389 .cf_idcache_wbinv_all = arm67_cache_flush, 390 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 391 392 /* Other functions */ 393 394 .cf_flush_prefetchbuf = cpufunc_nullop, 395 .cf_drain_writebuf = cpufunc_nullop, 396 .cf_flush_brnchtgt_C = cpufunc_nullop, 397 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 398 399 .cf_sleep = (void *)cpufunc_nullop, 400 401 /* Soft functions */ 402 403 .cf_dataabt_fixup = late_abort_fixup, 404 .cf_prefetchabt_fixup = cpufunc_null_fixup, 405 406 .cf_context_switch = arm67_context_switch, 407 408 .cf_setup = arm7_setup 409 410 }; 411 #endif /* CPU_ARM7 */ 412 413 #ifdef CPU_ARM7TDMI 414 struct cpu_functions arm7tdmi_cpufuncs = { 415 /* CPU functions */ 416 417 .cf_id = cpufunc_id, 418 .cf_cpwait = cpufunc_nullop, 419 420 /* MMU functions */ 421 422 .cf_control = cpufunc_control, 423 .cf_domains = cpufunc_domains, 424 .cf_setttb = arm7tdmi_setttb, 425 .cf_faultstatus = cpufunc_faultstatus, 426 .cf_faultaddress = cpufunc_faultaddress, 427 428 /* TLB functions */ 429 430 .cf_tlb_flushID = arm7tdmi_tlb_flushID, 431 .cf_tlb_flushID_SE = arm7tdmi_tlb_flushID_SE, 432 .cf_tlb_flushI = arm7tdmi_tlb_flushID, 433 .cf_tlb_flushI_SE = arm7tdmi_tlb_flushID_SE, 434 .cf_tlb_flushD = arm7tdmi_tlb_flushID, 435 .cf_tlb_flushD_SE = arm7tdmi_tlb_flushID_SE, 436 437 /* Cache operations */ 438 439 .cf_icache_sync_all = cpufunc_nullop, 440 .cf_icache_sync_range = (void *)cpufunc_nullop, 441 442 .cf_dcache_wbinv_all = arm7tdmi_cache_flushID, 443 .cf_dcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 444 .cf_dcache_inv_range = (void *)arm7tdmi_cache_flushID, 445 .cf_dcache_wb_range = (void *)cpufunc_nullop, 446 447 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 448 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 449 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 450 451 .cf_idcache_wbinv_all = arm7tdmi_cache_flushID, 452 .cf_idcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 453 454 /* Other functions */ 455 456 .cf_flush_prefetchbuf = cpufunc_nullop, 457 .cf_drain_writebuf = cpufunc_nullop, 458 .cf_flush_brnchtgt_C = cpufunc_nullop, 459 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 460 461 .cf_sleep = (void *)cpufunc_nullop, 462 463 /* Soft functions */ 464 465 .cf_dataabt_fixup = late_abort_fixup, 466 .cf_prefetchabt_fixup = cpufunc_null_fixup, 467 468 .cf_context_switch = arm7tdmi_context_switch, 469 470 .cf_setup = arm7tdmi_setup 471 472 }; 473 #endif /* CPU_ARM7TDMI */ 474 475 #ifdef CPU_ARM8 476 struct cpu_functions arm8_cpufuncs = { 477 /* CPU functions */ 478 479 .cf_id = cpufunc_id, 480 .cf_cpwait = cpufunc_nullop, 481 482 /* MMU functions */ 483 484 .cf_control = cpufunc_control, 485 .cf_domains = cpufunc_domains, 486 .cf_setttb = arm8_setttb, 487 .cf_faultstatus = cpufunc_faultstatus, 488 .cf_faultaddress = cpufunc_faultaddress, 489 490 /* TLB functions */ 491 492 .cf_tlb_flushID = arm8_tlb_flushID, 493 .cf_tlb_flushID_SE = arm8_tlb_flushID_SE, 494 .cf_tlb_flushI = arm8_tlb_flushID, 495 .cf_tlb_flushI_SE = arm8_tlb_flushID_SE, 496 .cf_tlb_flushD = arm8_tlb_flushID, 497 .cf_tlb_flushD_SE = arm8_tlb_flushID_SE, 498 499 /* Cache operations */ 500 501 .cf_icache_sync_all = cpufunc_nullop, 502 .cf_icache_sync_range = (void *)cpufunc_nullop, 503 504 .cf_dcache_wbinv_all = arm8_cache_purgeID, 505 .cf_dcache_wbinv_range = (void *)arm8_cache_purgeID, 506 /*XXX*/ .cf_dcache_inv_range = (void *)arm8_cache_purgeID, 507 .cf_dcache_wb_range = (void *)arm8_cache_cleanID, 508 509 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 510 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 511 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 512 513 .cf_idcache_wbinv_all = arm8_cache_purgeID, 514 .cf_idcache_wbinv_range = (void *)arm8_cache_purgeID, 515 516 /* Other functions */ 517 518 .cf_flush_prefetchbuf = cpufunc_nullop, 519 .cf_drain_writebuf = cpufunc_nullop, 520 .cf_flush_brnchtgt_C = cpufunc_nullop, 521 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 522 523 .cf_sleep = (void *)cpufunc_nullop, 524 525 /* Soft functions */ 526 527 .cf_dataabt_fixup = cpufunc_null_fixup, 528 .cf_prefetchabt_fixup = cpufunc_null_fixup, 529 530 .cf_context_switch = arm8_context_switch, 531 532 .cf_setup = arm8_setup 533 }; 534 #endif /* CPU_ARM8 */ 535 536 #ifdef CPU_ARM9 537 struct cpu_functions arm9_cpufuncs = { 538 /* CPU functions */ 539 540 .cf_id = cpufunc_id, 541 .cf_cpwait = cpufunc_nullop, 542 543 /* MMU functions */ 544 545 .cf_control = cpufunc_control, 546 .cf_domains = cpufunc_domains, 547 .cf_setttb = arm9_setttb, 548 .cf_faultstatus = cpufunc_faultstatus, 549 .cf_faultaddress = cpufunc_faultaddress, 550 551 /* TLB functions */ 552 553 .cf_tlb_flushID = armv4_tlb_flushID, 554 .cf_tlb_flushID_SE = arm9_tlb_flushID_SE, 555 .cf_tlb_flushI = armv4_tlb_flushI, 556 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 557 .cf_tlb_flushD = armv4_tlb_flushD, 558 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 559 560 /* Cache operations */ 561 562 .cf_icache_sync_all = arm9_icache_sync_all, 563 .cf_icache_sync_range = arm9_icache_sync_range, 564 565 .cf_dcache_wbinv_all = arm9_dcache_wbinv_all, 566 .cf_dcache_wbinv_range = arm9_dcache_wbinv_range, 567 /*XXX*/ .cf_dcache_inv_range = arm9_dcache_wbinv_range, 568 .cf_dcache_wb_range = arm9_dcache_wb_range, 569 570 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 571 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 572 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 573 574 .cf_idcache_wbinv_all = arm9_idcache_wbinv_all, 575 .cf_idcache_wbinv_range = arm9_idcache_wbinv_range, 576 577 /* Other functions */ 578 579 .cf_flush_prefetchbuf = cpufunc_nullop, 580 .cf_drain_writebuf = armv4_drain_writebuf, 581 .cf_flush_brnchtgt_C = cpufunc_nullop, 582 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 583 584 .cf_sleep = (void *)cpufunc_nullop, 585 586 /* Soft functions */ 587 588 .cf_dataabt_fixup = cpufunc_null_fixup, 589 .cf_prefetchabt_fixup = cpufunc_null_fixup, 590 591 .cf_context_switch = arm9_context_switch, 592 593 .cf_setup = arm9_setup 594 595 }; 596 #endif /* CPU_ARM9 */ 597 598 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 599 struct cpu_functions armv5_ec_cpufuncs = { 600 /* CPU functions */ 601 602 .cf_id = cpufunc_id, 603 .cf_cpwait = cpufunc_nullop, 604 605 /* MMU functions */ 606 607 .cf_control = cpufunc_control, 608 .cf_domains = cpufunc_domains, 609 .cf_setttb = armv5_ec_setttb, 610 .cf_faultstatus = cpufunc_faultstatus, 611 .cf_faultaddress = cpufunc_faultaddress, 612 613 /* TLB functions */ 614 615 .cf_tlb_flushID = armv4_tlb_flushID, 616 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 617 .cf_tlb_flushI = armv4_tlb_flushI, 618 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 619 .cf_tlb_flushD = armv4_tlb_flushD, 620 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 621 622 /* Cache operations */ 623 624 .cf_icache_sync_all = armv5_ec_icache_sync_all, 625 .cf_icache_sync_range = armv5_ec_icache_sync_range, 626 627 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all, 628 .cf_dcache_wbinv_range = armv5_ec_dcache_wbinv_range, 629 /*XXX*/ .cf_dcache_inv_range = armv5_ec_dcache_wbinv_range, 630 .cf_dcache_wb_range = armv5_ec_dcache_wb_range, 631 632 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 633 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 634 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 635 636 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all, 637 .cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range, 638 639 /* Other functions */ 640 641 .cf_flush_prefetchbuf = cpufunc_nullop, 642 .cf_drain_writebuf = armv4_drain_writebuf, 643 .cf_flush_brnchtgt_C = cpufunc_nullop, 644 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 645 646 .cf_sleep = (void *)cpufunc_nullop, 647 648 /* Soft functions */ 649 650 .cf_dataabt_fixup = cpufunc_null_fixup, 651 .cf_prefetchabt_fixup = cpufunc_null_fixup, 652 653 .cf_context_switch = arm10_context_switch, 654 655 .cf_setup = arm10_setup 656 657 }; 658 #endif /* CPU_ARM9E || CPU_ARM10 */ 659 660 #ifdef CPU_ARM10 661 struct cpu_functions arm10_cpufuncs = { 662 /* CPU functions */ 663 664 .cf_id = cpufunc_id, 665 .cf_cpwait = cpufunc_nullop, 666 667 /* MMU functions */ 668 669 .cf_control = cpufunc_control, 670 .cf_domains = cpufunc_domains, 671 .cf_setttb = armv5_setttb, 672 .cf_faultstatus = cpufunc_faultstatus, 673 .cf_faultaddress = cpufunc_faultaddress, 674 675 /* TLB functions */ 676 677 .cf_tlb_flushID = armv4_tlb_flushID, 678 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 679 .cf_tlb_flushI = armv4_tlb_flushI, 680 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 681 .cf_tlb_flushD = armv4_tlb_flushD, 682 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 683 684 /* Cache operations */ 685 686 .cf_icache_sync_all = armv5_icache_sync_all, 687 .cf_icache_sync_range = armv5_icache_sync_range, 688 689 .cf_dcache_wbinv_all = armv5_dcache_wbinv_all, 690 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range, 691 /*XXX*/ .cf_dcache_inv_range = armv5_dcache_wbinv_range, 692 .cf_dcache_wb_range = armv5_dcache_wb_range, 693 694 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 695 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 696 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 697 698 .cf_idcache_wbinv_all = armv5_idcache_wbinv_all, 699 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range, 700 701 /* Other functions */ 702 703 .cf_flush_prefetchbuf = cpufunc_nullop, 704 .cf_drain_writebuf = armv4_drain_writebuf, 705 .cf_flush_brnchtgt_C = cpufunc_nullop, 706 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 707 708 .cf_sleep = (void *)cpufunc_nullop, 709 710 /* Soft functions */ 711 712 .cf_dataabt_fixup = cpufunc_null_fixup, 713 .cf_prefetchabt_fixup = cpufunc_null_fixup, 714 715 .cf_context_switch = arm10_context_switch, 716 717 .cf_setup = arm10_setup 718 719 }; 720 #endif /* CPU_ARM10 */ 721 722 #ifdef CPU_ARM11 723 struct cpu_functions arm11_cpufuncs = { 724 /* CPU functions */ 725 726 .cf_id = cpufunc_id, 727 .cf_cpwait = cpufunc_nullop, 728 729 /* MMU functions */ 730 731 .cf_control = cpufunc_control, 732 .cf_domains = cpufunc_domains, 733 .cf_setttb = arm11_setttb, 734 .cf_faultstatus = cpufunc_faultstatus, 735 .cf_faultaddress = cpufunc_faultaddress, 736 737 /* TLB functions */ 738 739 .cf_tlb_flushID = arm11_tlb_flushID, 740 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 741 .cf_tlb_flushI = arm11_tlb_flushI, 742 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 743 .cf_tlb_flushD = arm11_tlb_flushD, 744 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 745 746 /* Cache operations */ 747 748 .cf_icache_sync_all = armv6_icache_sync_all, 749 .cf_icache_sync_range = armv6_icache_sync_range, 750 751 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all, 752 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 753 .cf_dcache_inv_range = armv6_dcache_inv_range, 754 .cf_dcache_wb_range = armv6_dcache_wb_range, 755 756 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 757 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 758 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 759 760 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all, 761 .cf_idcache_wbinv_range = armv6_idcache_wbinv_range, 762 763 /* Other functions */ 764 765 .cf_flush_prefetchbuf = cpufunc_nullop, 766 .cf_drain_writebuf = arm11_drain_writebuf, 767 .cf_flush_brnchtgt_C = cpufunc_nullop, 768 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 769 770 .cf_sleep = arm11_sleep, 771 772 /* Soft functions */ 773 774 .cf_dataabt_fixup = cpufunc_null_fixup, 775 .cf_prefetchabt_fixup = cpufunc_null_fixup, 776 777 .cf_context_switch = arm11_context_switch, 778 779 .cf_setup = arm11_setup 780 781 }; 782 #endif /* CPU_ARM11 */ 783 784 #ifdef CPU_ARM1136 785 struct cpu_functions arm1136_cpufuncs = { 786 /* CPU functions */ 787 788 .cf_id = cpufunc_id, 789 .cf_cpwait = cpufunc_nullop, 790 791 /* MMU functions */ 792 793 .cf_control = cpufunc_control, 794 .cf_domains = cpufunc_domains, 795 .cf_setttb = arm11x6_setttb, 796 .cf_faultstatus = cpufunc_faultstatus, 797 .cf_faultaddress = cpufunc_faultaddress, 798 799 /* TLB functions */ 800 801 .cf_tlb_flushID = arm11_tlb_flushID, 802 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 803 .cf_tlb_flushI = arm11_tlb_flushI, 804 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 805 .cf_tlb_flushD = arm11_tlb_flushD, 806 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 807 808 /* Cache operations */ 809 810 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 411920 */ 811 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371025 */ 812 813 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 411920 */ 814 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 815 .cf_dcache_inv_range = armv6_dcache_inv_range, 816 .cf_dcache_wb_range = armv6_dcache_wb_range, 817 818 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 819 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 820 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 821 822 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 411920 */ 823 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371025 */ 824 825 /* Other functions */ 826 827 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf, 828 .cf_drain_writebuf = arm11_drain_writebuf, 829 .cf_flush_brnchtgt_C = cpufunc_nullop, 830 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 831 832 .cf_sleep = arm11_sleep, /* arm1136_sleep_rev0 */ 833 834 /* Soft functions */ 835 836 .cf_dataabt_fixup = cpufunc_null_fixup, 837 .cf_prefetchabt_fixup = cpufunc_null_fixup, 838 839 .cf_context_switch = arm11_context_switch, 840 841 .cf_setup = arm11x6_setup 842 843 }; 844 #endif /* CPU_ARM1136 */ 845 846 #ifdef CPU_ARM1176 847 struct cpu_functions arm1176_cpufuncs = { 848 /* CPU functions */ 849 850 .cf_id = cpufunc_id, 851 .cf_cpwait = cpufunc_nullop, 852 853 /* MMU functions */ 854 855 .cf_control = cpufunc_control, 856 .cf_domains = cpufunc_domains, 857 .cf_setttb = arm11x6_setttb, 858 .cf_faultstatus = cpufunc_faultstatus, 859 .cf_faultaddress = cpufunc_faultaddress, 860 861 /* TLB functions */ 862 863 .cf_tlb_flushID = arm11_tlb_flushID, 864 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 865 .cf_tlb_flushI = arm11_tlb_flushI, 866 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 867 .cf_tlb_flushD = arm11_tlb_flushD, 868 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 869 870 /* Cache operations */ 871 872 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 415045 */ 873 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371367 */ 874 875 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 415045 */ 876 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 877 .cf_dcache_inv_range = armv6_dcache_inv_range, 878 .cf_dcache_wb_range = armv6_dcache_wb_range, 879 880 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 881 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 882 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 883 884 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 415045 */ 885 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371367 */ 886 887 /* Other functions */ 888 889 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf, 890 .cf_drain_writebuf = arm11_drain_writebuf, 891 .cf_flush_brnchtgt_C = cpufunc_nullop, 892 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 893 894 .cf_sleep = arm11x6_sleep, /* no ref. */ 895 896 /* Soft functions */ 897 898 .cf_dataabt_fixup = cpufunc_null_fixup, 899 .cf_prefetchabt_fixup = cpufunc_null_fixup, 900 901 .cf_context_switch = arm11_context_switch, 902 903 .cf_setup = arm11x6_setup 904 905 }; 906 #endif /* CPU_ARM1176 */ 907 908 909 #ifdef CPU_ARM11MPCORE 910 struct cpu_functions arm11mpcore_cpufuncs = { 911 /* CPU functions */ 912 913 .cf_id = cpufunc_id, 914 .cf_cpwait = cpufunc_nullop, 915 916 /* MMU functions */ 917 918 .cf_control = cpufunc_control, 919 .cf_domains = cpufunc_domains, 920 .cf_setttb = arm11_setttb, 921 .cf_faultstatus = cpufunc_faultstatus, 922 .cf_faultaddress = cpufunc_faultaddress, 923 924 /* TLB functions */ 925 926 .cf_tlb_flushID = arm11_tlb_flushID, 927 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 928 .cf_tlb_flushI = arm11_tlb_flushI, 929 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 930 .cf_tlb_flushD = arm11_tlb_flushD, 931 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 932 933 /* Cache operations */ 934 935 .cf_icache_sync_all = armv6_icache_sync_all, 936 .cf_icache_sync_range = armv5_icache_sync_range, 937 938 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all, 939 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range, 940 .cf_dcache_inv_range = armv5_dcache_inv_range, 941 .cf_dcache_wb_range = armv5_dcache_wb_range, 942 943 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 944 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 945 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 946 947 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all, 948 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range, 949 950 /* Other functions */ 951 952 .cf_flush_prefetchbuf = cpufunc_nullop, 953 .cf_drain_writebuf = arm11_drain_writebuf, 954 .cf_flush_brnchtgt_C = cpufunc_nullop, 955 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 956 957 .cf_sleep = arm11_sleep, 958 959 /* Soft functions */ 960 961 .cf_dataabt_fixup = cpufunc_null_fixup, 962 .cf_prefetchabt_fixup = cpufunc_null_fixup, 963 964 .cf_context_switch = arm11_context_switch, 965 966 .cf_setup = arm11mpcore_setup 967 968 }; 969 #endif /* CPU_ARM11MPCORE */ 970 971 #ifdef CPU_SA110 972 struct cpu_functions sa110_cpufuncs = { 973 /* CPU functions */ 974 975 .cf_id = cpufunc_id, 976 .cf_cpwait = cpufunc_nullop, 977 978 /* MMU functions */ 979 980 .cf_control = cpufunc_control, 981 .cf_domains = cpufunc_domains, 982 .cf_setttb = sa1_setttb, 983 .cf_faultstatus = cpufunc_faultstatus, 984 .cf_faultaddress = cpufunc_faultaddress, 985 986 /* TLB functions */ 987 988 .cf_tlb_flushID = armv4_tlb_flushID, 989 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 990 .cf_tlb_flushI = armv4_tlb_flushI, 991 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 992 .cf_tlb_flushD = armv4_tlb_flushD, 993 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 994 995 /* Cache operations */ 996 997 .cf_icache_sync_all = sa1_cache_syncI, 998 .cf_icache_sync_range = sa1_cache_syncI_rng, 999 1000 .cf_dcache_wbinv_all = sa1_cache_purgeD, 1001 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 1002 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 1003 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 1004 1005 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1006 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1007 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1008 1009 .cf_idcache_wbinv_all = sa1_cache_purgeID, 1010 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 1011 1012 /* Other functions */ 1013 1014 .cf_flush_prefetchbuf = cpufunc_nullop, 1015 .cf_drain_writebuf = armv4_drain_writebuf, 1016 .cf_flush_brnchtgt_C = cpufunc_nullop, 1017 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1018 1019 .cf_sleep = (void *)cpufunc_nullop, 1020 1021 /* Soft functions */ 1022 1023 .cf_dataabt_fixup = cpufunc_null_fixup, 1024 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1025 1026 .cf_context_switch = sa110_context_switch, 1027 1028 .cf_setup = sa110_setup 1029 }; 1030 #endif /* CPU_SA110 */ 1031 1032 #if defined(CPU_SA1100) || defined(CPU_SA1110) 1033 struct cpu_functions sa11x0_cpufuncs = { 1034 /* CPU functions */ 1035 1036 .cf_id = cpufunc_id, 1037 .cf_cpwait = cpufunc_nullop, 1038 1039 /* MMU functions */ 1040 1041 .cf_control = cpufunc_control, 1042 .cf_domains = cpufunc_domains, 1043 .cf_setttb = sa1_setttb, 1044 .cf_faultstatus = cpufunc_faultstatus, 1045 .cf_faultaddress = cpufunc_faultaddress, 1046 1047 /* TLB functions */ 1048 1049 .cf_tlb_flushID = armv4_tlb_flushID, 1050 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 1051 .cf_tlb_flushI = armv4_tlb_flushI, 1052 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 1053 .cf_tlb_flushD = armv4_tlb_flushD, 1054 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1055 1056 /* Cache operations */ 1057 1058 .cf_icache_sync_all = sa1_cache_syncI, 1059 .cf_icache_sync_range = sa1_cache_syncI_rng, 1060 1061 .cf_dcache_wbinv_all = sa1_cache_purgeD, 1062 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 1063 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 1064 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 1065 1066 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1067 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1068 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1069 1070 .cf_idcache_wbinv_all = sa1_cache_purgeID, 1071 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 1072 1073 /* Other functions */ 1074 1075 .cf_flush_prefetchbuf = sa11x0_drain_readbuf, 1076 .cf_drain_writebuf = armv4_drain_writebuf, 1077 .cf_flush_brnchtgt_C = cpufunc_nullop, 1078 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1079 1080 .cf_sleep = sa11x0_cpu_sleep, 1081 1082 /* Soft functions */ 1083 1084 .cf_dataabt_fixup = cpufunc_null_fixup, 1085 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1086 1087 .cf_context_switch = sa11x0_context_switch, 1088 1089 .cf_setup = sa11x0_setup 1090 }; 1091 #endif /* CPU_SA1100 || CPU_SA1110 */ 1092 1093 #if defined(CPU_FA526) 1094 struct cpu_functions fa526_cpufuncs = { 1095 /* CPU functions */ 1096 1097 .cf_id = cpufunc_id, 1098 .cf_cpwait = cpufunc_nullop, 1099 1100 /* MMU functions */ 1101 1102 .cf_control = cpufunc_control, 1103 .cf_domains = cpufunc_domains, 1104 .cf_setttb = fa526_setttb, 1105 .cf_faultstatus = cpufunc_faultstatus, 1106 .cf_faultaddress = cpufunc_faultaddress, 1107 1108 /* TLB functions */ 1109 1110 .cf_tlb_flushID = armv4_tlb_flushID, 1111 .cf_tlb_flushID_SE = fa526_tlb_flushID_SE, 1112 .cf_tlb_flushI = armv4_tlb_flushI, 1113 .cf_tlb_flushI_SE = fa526_tlb_flushI_SE, 1114 .cf_tlb_flushD = armv4_tlb_flushD, 1115 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1116 1117 /* Cache operations */ 1118 1119 .cf_icache_sync_all = fa526_icache_sync_all, 1120 .cf_icache_sync_range = fa526_icache_sync_range, 1121 1122 .cf_dcache_wbinv_all = fa526_dcache_wbinv_all, 1123 .cf_dcache_wbinv_range = fa526_dcache_wbinv_range, 1124 .cf_dcache_inv_range = fa526_dcache_inv_range, 1125 .cf_dcache_wb_range = fa526_dcache_wb_range, 1126 1127 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1128 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1129 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1130 1131 .cf_idcache_wbinv_all = fa526_idcache_wbinv_all, 1132 .cf_idcache_wbinv_range = fa526_idcache_wbinv_range, 1133 1134 /* Other functions */ 1135 1136 .cf_flush_prefetchbuf = fa526_flush_prefetchbuf, 1137 .cf_drain_writebuf = armv4_drain_writebuf, 1138 .cf_flush_brnchtgt_C = cpufunc_nullop, 1139 .cf_flush_brnchtgt_E = fa526_flush_brnchtgt_E, 1140 1141 .cf_sleep = fa526_cpu_sleep, 1142 1143 /* Soft functions */ 1144 1145 .cf_dataabt_fixup = cpufunc_null_fixup, 1146 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1147 1148 .cf_context_switch = fa526_context_switch, 1149 1150 .cf_setup = fa526_setup 1151 }; 1152 #endif /* CPU_FA526 */ 1153 1154 #ifdef CPU_IXP12X0 1155 struct cpu_functions ixp12x0_cpufuncs = { 1156 /* CPU functions */ 1157 1158 .cf_id = cpufunc_id, 1159 .cf_cpwait = cpufunc_nullop, 1160 1161 /* MMU functions */ 1162 1163 .cf_control = cpufunc_control, 1164 .cf_domains = cpufunc_domains, 1165 .cf_setttb = sa1_setttb, 1166 .cf_faultstatus = cpufunc_faultstatus, 1167 .cf_faultaddress = cpufunc_faultaddress, 1168 1169 /* TLB functions */ 1170 1171 .cf_tlb_flushID = armv4_tlb_flushID, 1172 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 1173 .cf_tlb_flushI = armv4_tlb_flushI, 1174 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 1175 .cf_tlb_flushD = armv4_tlb_flushD, 1176 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1177 1178 /* Cache operations */ 1179 1180 .cf_icache_sync_all = sa1_cache_syncI, 1181 .cf_icache_sync_range = sa1_cache_syncI_rng, 1182 1183 .cf_dcache_wbinv_all = sa1_cache_purgeD, 1184 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 1185 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 1186 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 1187 1188 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1189 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1190 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1191 1192 .cf_idcache_wbinv_all = sa1_cache_purgeID, 1193 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 1194 1195 /* Other functions */ 1196 1197 .cf_flush_prefetchbuf = ixp12x0_drain_readbuf, 1198 .cf_drain_writebuf = armv4_drain_writebuf, 1199 .cf_flush_brnchtgt_C = cpufunc_nullop, 1200 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1201 1202 .cf_sleep = (void *)cpufunc_nullop, 1203 1204 /* Soft functions */ 1205 1206 .cf_dataabt_fixup = cpufunc_null_fixup, 1207 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1208 1209 .cf_context_switch = ixp12x0_context_switch, 1210 1211 .cf_setup = ixp12x0_setup 1212 }; 1213 #endif /* CPU_IXP12X0 */ 1214 1215 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1216 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) 1217 struct cpu_functions xscale_cpufuncs = { 1218 /* CPU functions */ 1219 1220 .cf_id = cpufunc_id, 1221 .cf_cpwait = xscale_cpwait, 1222 1223 /* MMU functions */ 1224 1225 .cf_control = xscale_control, 1226 .cf_domains = cpufunc_domains, 1227 .cf_setttb = xscale_setttb, 1228 .cf_faultstatus = cpufunc_faultstatus, 1229 .cf_faultaddress = cpufunc_faultaddress, 1230 1231 /* TLB functions */ 1232 1233 .cf_tlb_flushID = armv4_tlb_flushID, 1234 .cf_tlb_flushID_SE = xscale_tlb_flushID_SE, 1235 .cf_tlb_flushI = armv4_tlb_flushI, 1236 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 1237 .cf_tlb_flushD = armv4_tlb_flushD, 1238 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1239 1240 /* Cache operations */ 1241 1242 .cf_icache_sync_all = xscale_cache_syncI, 1243 .cf_icache_sync_range = xscale_cache_syncI_rng, 1244 1245 .cf_dcache_wbinv_all = xscale_cache_purgeD, 1246 .cf_dcache_wbinv_range = xscale_cache_purgeD_rng, 1247 .cf_dcache_inv_range = xscale_cache_flushD_rng, 1248 .cf_dcache_wb_range = xscale_cache_cleanD_rng, 1249 1250 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1251 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1252 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1253 1254 .cf_idcache_wbinv_all = xscale_cache_purgeID, 1255 .cf_idcache_wbinv_range = xscale_cache_purgeID_rng, 1256 1257 /* Other functions */ 1258 1259 .cf_flush_prefetchbuf = cpufunc_nullop, 1260 .cf_drain_writebuf = armv4_drain_writebuf, 1261 .cf_flush_brnchtgt_C = cpufunc_nullop, 1262 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1263 1264 .cf_sleep = xscale_cpu_sleep, 1265 1266 /* Soft functions */ 1267 1268 .cf_dataabt_fixup = cpufunc_null_fixup, 1269 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1270 1271 .cf_context_switch = xscale_context_switch, 1272 1273 .cf_setup = xscale_setup 1274 }; 1275 #endif 1276 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 1277 1278 #if defined(CPU_CORTEX) 1279 struct cpu_functions cortex_cpufuncs = { 1280 /* CPU functions */ 1281 1282 .cf_id = cpufunc_id, 1283 .cf_cpwait = cpufunc_nullop, 1284 1285 /* MMU functions */ 1286 1287 .cf_control = cpufunc_control, 1288 .cf_domains = cpufunc_domains, 1289 .cf_setttb = armv7_setttb, 1290 .cf_faultstatus = cpufunc_faultstatus, 1291 .cf_faultaddress = cpufunc_faultaddress, 1292 1293 /* TLB functions */ 1294 1295 .cf_tlb_flushID = arm11_tlb_flushID, 1296 .cf_tlb_flushID_SE = armv7_tlb_flushID_SE, 1297 .cf_tlb_flushI = arm11_tlb_flushI, 1298 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 1299 .cf_tlb_flushD = arm11_tlb_flushD, 1300 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 1301 1302 /* Cache operations */ 1303 1304 .cf_icache_sync_all = armv7_icache_sync_all, 1305 .cf_dcache_wbinv_all = armv7_dcache_wbinv_all, 1306 1307 .cf_dcache_inv_range = armv7_dcache_inv_range, 1308 .cf_dcache_wb_range = armv7_dcache_wb_range, 1309 .cf_dcache_wbinv_range = armv7_dcache_wbinv_range, 1310 1311 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1312 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1313 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1314 1315 .cf_icache_sync_range = armv7_icache_sync_range, 1316 .cf_idcache_wbinv_range = armv7_idcache_wbinv_range, 1317 1318 1319 .cf_idcache_wbinv_all = armv7_idcache_wbinv_all, 1320 1321 /* Other functions */ 1322 1323 .cf_flush_prefetchbuf = cpufunc_nullop, 1324 .cf_drain_writebuf = armv7_drain_writebuf, 1325 .cf_flush_brnchtgt_C = cpufunc_nullop, 1326 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1327 1328 .cf_sleep = armv7_cpu_sleep, 1329 1330 /* Soft functions */ 1331 1332 .cf_dataabt_fixup = cpufunc_null_fixup, 1333 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1334 1335 .cf_context_switch = armv7_context_switch, 1336 1337 .cf_setup = armv7_setup 1338 1339 }; 1340 #endif /* CPU_CORTEX */ 1341 1342 #ifdef CPU_PJ4B 1343 struct cpu_functions pj4bv7_cpufuncs = { 1344 /* CPU functions */ 1345 1346 .cf_id = cpufunc_id, 1347 .cf_cpwait = pj4b_drain_writebuf, 1348 1349 /* MMU functions */ 1350 1351 .cf_control = cpufunc_control, 1352 .cf_domains = cpufunc_domains, 1353 .cf_setttb = pj4b_setttb, 1354 .cf_faultstatus = cpufunc_faultstatus, 1355 .cf_faultaddress = cpufunc_faultaddress, 1356 1357 /* TLB functions */ 1358 1359 .cf_tlb_flushID = pj4b_tlb_flushID, 1360 .cf_tlb_flushID_SE = pj4b_tlb_flushID_SE, 1361 .cf_tlb_flushI = pj4b_tlb_flushID, 1362 .cf_tlb_flushI_SE = pj4b_tlb_flushID_SE, 1363 .cf_tlb_flushD = pj4b_tlb_flushID, 1364 .cf_tlb_flushD_SE = pj4b_tlb_flushID_SE, 1365 1366 /* Cache operations */ 1367 1368 .cf_icache_sync_all = armv7_idcache_wbinv_all, 1369 .cf_icache_sync_range = pj4b_icache_sync_range, 1370 1371 .cf_dcache_wbinv_all = armv7_dcache_wbinv_all, 1372 .cf_dcache_wbinv_range = pj4b_dcache_wbinv_range, 1373 .cf_dcache_inv_range = pj4b_dcache_inv_range, 1374 .cf_dcache_wb_range = pj4b_dcache_wb_range, 1375 1376 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1377 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1378 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1379 1380 .cf_idcache_wbinv_all = armv7_idcache_wbinv_all, 1381 .cf_idcache_wbinv_range = pj4b_idcache_wbinv_range, 1382 1383 /* Other functions */ 1384 1385 .cf_flush_prefetchbuf = pj4b_drain_readbuf, 1386 .cf_drain_writebuf = pj4b_drain_writebuf, 1387 .cf_flush_brnchtgt_C = pj4b_flush_brnchtgt_all, 1388 .cf_flush_brnchtgt_E = pj4b_flush_brnchtgt_va, 1389 1390 .cf_sleep = (void *)cpufunc_nullop, 1391 1392 /* Soft functions */ 1393 1394 .cf_dataabt_fixup = cpufunc_null_fixup, 1395 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1396 1397 .cf_context_switch = pj4b_context_switch, 1398 1399 .cf_setup = pj4bv7_setup 1400 }; 1401 #endif /* CPU_PJ4B */ 1402 1403 #ifdef CPU_SHEEVA 1404 struct cpu_functions sheeva_cpufuncs = { 1405 /* CPU functions */ 1406 1407 .cf_id = cpufunc_id, 1408 .cf_cpwait = cpufunc_nullop, 1409 1410 /* MMU functions */ 1411 1412 .cf_control = cpufunc_control, 1413 .cf_domains = cpufunc_domains, 1414 .cf_setttb = armv5_ec_setttb, 1415 .cf_faultstatus = cpufunc_faultstatus, 1416 .cf_faultaddress = cpufunc_faultaddress, 1417 1418 /* TLB functions */ 1419 1420 .cf_tlb_flushID = armv4_tlb_flushID, 1421 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 1422 .cf_tlb_flushI = armv4_tlb_flushI, 1423 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 1424 .cf_tlb_flushD = armv4_tlb_flushD, 1425 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1426 1427 /* Cache operations */ 1428 1429 .cf_icache_sync_all = armv5_ec_icache_sync_all, 1430 .cf_icache_sync_range = armv5_ec_icache_sync_range, 1431 1432 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all, 1433 .cf_dcache_wbinv_range = sheeva_dcache_wbinv_range, 1434 .cf_dcache_inv_range = sheeva_dcache_inv_range, 1435 .cf_dcache_wb_range = sheeva_dcache_wb_range, 1436 1437 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1438 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1439 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1440 1441 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all, 1442 .cf_idcache_wbinv_range = sheeva_idcache_wbinv_range, 1443 1444 /* Other functions */ 1445 1446 .cf_flush_prefetchbuf = cpufunc_nullop, 1447 .cf_drain_writebuf = armv4_drain_writebuf, 1448 .cf_flush_brnchtgt_C = cpufunc_nullop, 1449 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1450 1451 .cf_sleep = (void *)sheeva_cpu_sleep, 1452 1453 /* Soft functions */ 1454 1455 .cf_dataabt_fixup = cpufunc_null_fixup, 1456 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1457 1458 .cf_context_switch = arm10_context_switch, 1459 1460 .cf_setup = sheeva_setup 1461 }; 1462 #endif /* CPU_SHEEVA */ 1463 1464 1465 /* 1466 * Global constants also used by locore.s 1467 */ 1468 1469 struct cpu_functions cpufuncs; 1470 u_int cputype; 1471 1472 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \ 1473 defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_ARM11) || \ 1474 defined(CPU_FA526) || \ 1475 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1476 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 1477 defined(CPU_CORTEX) || defined(CPU_PJ4B) || defined(CPU_SHEEVA) 1478 static void get_cachetype_cp15(void); 1479 1480 /* Additional cache information local to this file. Log2 of some of the 1481 above numbers. */ 1482 static int arm_dcache_log2_nsets; 1483 static int arm_dcache_log2_assoc; 1484 static int arm_dcache_log2_linesize; 1485 1486 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1487 static inline u_int 1488 get_cachesize_cp15(int cssr) 1489 { 1490 u_int csid; 1491 1492 #if ((CPU_CORTEX) > 0) || defined(CPU_PJ4B) 1493 __asm volatile(".arch\tarmv7a"); 1494 __asm volatile("mcr p15, 2, %0, c0, c0, 0" :: "r" (cssr)); 1495 __asm volatile("isb"); /* sync to the new cssr */ 1496 #else 1497 __asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr)); 1498 #endif 1499 __asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid)); 1500 return csid; 1501 } 1502 #endif 1503 1504 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1505 static void 1506 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr) 1507 { 1508 u_int csid; 1509 u_int nsets; 1510 1511 if (clidr & 6) { 1512 csid = get_cachesize_cp15(level << 1); /* select L1 dcache values */ 1513 nsets = CPU_CSID_NUMSETS(csid) + 1; 1514 info->dcache_ways = CPU_CSID_ASSOC(csid) + 1; 1515 info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4); 1516 info->dcache_size = info->dcache_line_size * info->dcache_ways * nsets; 1517 1518 if (level == 0) { 1519 arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1; 1520 arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4; 1521 arm_dcache_log2_nsets = 31 - __builtin_clz(nsets); 1522 } 1523 } 1524 1525 info->cache_unified = (clidr == 4); 1526 1527 if (clidr & 1) { 1528 csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select L1 icache values */ 1529 nsets = CPU_CSID_NUMSETS(csid) + 1; 1530 info->icache_ways = CPU_CSID_ASSOC(csid) + 1; 1531 info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4); 1532 info->icache_size = info->icache_line_size * info->icache_ways * nsets; 1533 } else { 1534 info->icache_ways = info->dcache_ways; 1535 info->icache_line_size = info->dcache_line_size; 1536 info->icache_size = info->dcache_size; 1537 } 1538 } 1539 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */ 1540 1541 static void 1542 get_cachetype_cp15(void) 1543 { 1544 u_int ctype, isize, dsize; 1545 u_int multiplier; 1546 1547 __asm volatile("mrc p15, 0, %0, c0, c0, 1" 1548 : "=r" (ctype)); 1549 1550 /* 1551 * ...and thus spake the ARM ARM: 1552 * 1553 * If an <opcode2> value corresponding to an unimplemented or 1554 * reserved ID register is encountered, the System Control 1555 * processor returns the value of the main ID register. 1556 */ 1557 if (ctype == cpu_id()) 1558 goto out; 1559 1560 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1561 if (CPU_CT_FORMAT(ctype) == 4) { 1562 u_int clidr = armreg_clidr_read(); 1563 1564 if (CPU_CT4_L1IPOLICY(ctype) != CPU_CT4_L1_PIPT) { 1565 arm_cache_prefer_mask = PAGE_SIZE; 1566 } 1567 arm_pcache.cache_type = CPU_CT_CTYPE_WB14; 1568 1569 get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7); 1570 arm_dcache_align = arm_pcache.dcache_line_size; 1571 clidr >>= 3; 1572 if (clidr & 7) { 1573 get_cacheinfo_clidr(&arm_scache, 1, clidr & 7); 1574 if (arm_scache.dcache_line_size < arm_dcache_align) 1575 arm_dcache_align = arm_scache.dcache_line_size; 1576 } 1577 goto out; 1578 } 1579 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */ 1580 1581 if ((ctype & CPU_CT_S) == 0) 1582 arm_pcache.cache_unified = 1; 1583 1584 /* 1585 * If you want to know how this code works, go read the ARM ARM. 1586 */ 1587 1588 arm_pcache.cache_type = CPU_CT_CTYPE(ctype); 1589 1590 if (arm_pcache.cache_unified == 0) { 1591 isize = CPU_CT_ISIZE(ctype); 1592 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; 1593 arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); 1594 if (CPU_CT_xSIZE_ASSOC(isize) == 0) { 1595 if (isize & CPU_CT_xSIZE_M) 1596 arm_pcache.icache_line_size = 0; /* not present */ 1597 else 1598 arm_pcache.icache_ways = 1; 1599 } else { 1600 arm_pcache.icache_ways = multiplier << 1601 (CPU_CT_xSIZE_ASSOC(isize) - 1); 1602 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1603 if (CPU_CT_xSIZE_P & isize) 1604 arm_cache_prefer_mask |= 1605 __BIT(9 + CPU_CT_xSIZE_SIZE(isize) 1606 - CPU_CT_xSIZE_ASSOC(isize)) 1607 - PAGE_SIZE; 1608 #endif 1609 } 1610 arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); 1611 } 1612 1613 dsize = CPU_CT_DSIZE(ctype); 1614 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; 1615 arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); 1616 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { 1617 if (dsize & CPU_CT_xSIZE_M) 1618 arm_pcache.dcache_line_size = 0; /* not present */ 1619 else 1620 arm_pcache.dcache_ways = 1; 1621 } else { 1622 arm_pcache.dcache_ways = multiplier << 1623 (CPU_CT_xSIZE_ASSOC(dsize) - 1); 1624 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1625 if (CPU_CT_xSIZE_P & dsize) 1626 arm_cache_prefer_mask |= 1627 __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) 1628 - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE; 1629 #endif 1630 } 1631 arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); 1632 1633 arm_dcache_align = arm_pcache.dcache_line_size; 1634 1635 arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; 1636 arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; 1637 arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - 1638 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); 1639 1640 out: 1641 arm_dcache_align_mask = arm_dcache_align - 1; 1642 } 1643 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */ 1644 1645 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 1646 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \ 1647 defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0) 1648 /* Cache information for CPUs without cache type registers. */ 1649 struct cachetab { 1650 uint32_t ct_cpuid; 1651 int ct_pcache_type; 1652 int ct_pcache_unified; 1653 int ct_pdcache_size; 1654 int ct_pdcache_line_size; 1655 int ct_pdcache_ways; 1656 int ct_picache_size; 1657 int ct_picache_line_size; 1658 int ct_picache_ways; 1659 }; 1660 1661 struct cachetab cachetab[] = { 1662 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */ 1663 { CPU_ID_ARM2, 0, 1, 0, 0, 0, 0, 0, 0 }, 1664 { CPU_ID_ARM250, 0, 1, 0, 0, 0, 0, 0, 0 }, 1665 { CPU_ID_ARM3, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 1666 { CPU_ID_ARM610, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 1667 { CPU_ID_ARM710, CPU_CT_CTYPE_WT, 1, 8192, 32, 4, 0, 0, 0 }, 1668 { CPU_ID_ARM7500, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 1669 { CPU_ID_ARM710A, CPU_CT_CTYPE_WT, 1, 8192, 16, 4, 0, 0, 0 }, 1670 { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 1671 /* XXX is this type right for SA-1? */ 1672 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, 1673 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 1674 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 1675 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */ 1676 { 0, 0, 0, 0, 0, 0, 0, 0} 1677 }; 1678 1679 static void get_cachetype_table(void); 1680 1681 static void 1682 get_cachetype_table(void) 1683 { 1684 int i; 1685 uint32_t cpuid = cpu_id(); 1686 1687 for (i = 0; cachetab[i].ct_cpuid != 0; i++) { 1688 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) { 1689 arm_pcache.cache_type = cachetab[i].ct_pcache_type; 1690 arm_pcache.cache_unified = cachetab[i].ct_pcache_unified; 1691 arm_pcache.dcache_size = cachetab[i].ct_pdcache_size; 1692 arm_pcache.dcache_line_size = 1693 cachetab[i].ct_pdcache_line_size; 1694 arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways; 1695 arm_pcache.icache_size = cachetab[i].ct_picache_size; 1696 arm_pcache.icache_line_size = 1697 cachetab[i].ct_picache_line_size; 1698 arm_pcache.icache_ways = cachetab[i].ct_picache_ways; 1699 } 1700 } 1701 1702 arm_dcache_align = arm_pcache.dcache_line_size; 1703 arm_dcache_align_mask = arm_dcache_align - 1; 1704 } 1705 1706 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */ 1707 1708 /* 1709 * Cannot panic here as we may not have a console yet ... 1710 */ 1711 1712 int 1713 set_cpufuncs(void) 1714 { 1715 if (cputype == 0) { 1716 cputype = cpufunc_id(); 1717 cputype &= CPU_ID_CPU_MASK; 1718 } 1719 1720 /* 1721 * NOTE: cpu_do_powersave defaults to off. If we encounter a 1722 * CPU type where we want to use it by default, then we set it. 1723 */ 1724 #ifdef CPU_ARM2 1725 if (cputype == CPU_ID_ARM2) { 1726 cpufuncs = arm2_cpufuncs; 1727 get_cachetype_table(); 1728 return 0; 1729 } 1730 #endif /* CPU_ARM2 */ 1731 #ifdef CPU_ARM250 1732 if (cputype == CPU_ID_ARM250) { 1733 cpufuncs = arm250_cpufuncs; 1734 get_cachetype_table(); 1735 return 0; 1736 } 1737 #endif 1738 #ifdef CPU_ARM3 1739 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1740 (cputype & 0x00000f00) == 0x00000300) { 1741 cpufuncs = arm3_cpufuncs; 1742 get_cachetype_table(); 1743 return 0; 1744 } 1745 #endif /* CPU_ARM3 */ 1746 #ifdef CPU_ARM6 1747 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1748 (cputype & 0x00000f00) == 0x00000600) { 1749 cpufuncs = arm6_cpufuncs; 1750 get_cachetype_table(); 1751 pmap_pte_init_generic(); 1752 return 0; 1753 } 1754 #endif /* CPU_ARM6 */ 1755 #ifdef CPU_ARM7 1756 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1757 CPU_ID_IS7(cputype) && 1758 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) { 1759 cpufuncs = arm7_cpufuncs; 1760 get_cachetype_table(); 1761 pmap_pte_init_generic(); 1762 return 0; 1763 } 1764 #endif /* CPU_ARM7 */ 1765 #ifdef CPU_ARM7TDMI 1766 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1767 CPU_ID_IS7(cputype) && 1768 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) { 1769 cpufuncs = arm7tdmi_cpufuncs; 1770 get_cachetype_cp15(); 1771 pmap_pte_init_generic(); 1772 return 0; 1773 } 1774 #endif 1775 #ifdef CPU_ARM8 1776 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1777 (cputype & 0x0000f000) == 0x00008000) { 1778 cpufuncs = arm8_cpufuncs; 1779 get_cachetype_cp15(); 1780 pmap_pte_init_arm8(); 1781 return 0; 1782 } 1783 #endif /* CPU_ARM8 */ 1784 #ifdef CPU_ARM9 1785 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD || 1786 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) && 1787 (cputype & 0x0000f000) == 0x00009000) { 1788 cpufuncs = arm9_cpufuncs; 1789 get_cachetype_cp15(); 1790 arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize; 1791 arm9_dcache_sets_max = 1792 (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) - 1793 arm9_dcache_sets_inc; 1794 arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc); 1795 arm9_dcache_index_max = 0U - arm9_dcache_index_inc; 1796 #ifdef ARM9_CACHE_WRITE_THROUGH 1797 pmap_pte_init_arm9(); 1798 #else 1799 pmap_pte_init_generic(); 1800 #endif 1801 return 0; 1802 } 1803 #endif /* CPU_ARM9 */ 1804 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 1805 if (cputype == CPU_ID_ARM926EJS || 1806 cputype == CPU_ID_ARM1026EJS) { 1807 cpufuncs = armv5_ec_cpufuncs; 1808 get_cachetype_cp15(); 1809 pmap_pte_init_generic(); 1810 return 0; 1811 } 1812 #endif /* CPU_ARM9E || CPU_ARM10 */ 1813 #if defined(CPU_SHEEVA) 1814 if (cputype == CPU_ID_MV88SV131 || 1815 cputype == CPU_ID_MV88FR571_VD) { 1816 cpufuncs = sheeva_cpufuncs; 1817 get_cachetype_cp15(); 1818 pmap_pte_init_generic(); 1819 cpu_do_powersave = 1; /* Enable powersave */ 1820 return 0; 1821 } 1822 #endif /* CPU_SHEEVA */ 1823 #ifdef CPU_ARM10 1824 if (/* cputype == CPU_ID_ARM1020T || */ 1825 cputype == CPU_ID_ARM1020E) { 1826 /* 1827 * Select write-through cacheing (this isn't really an 1828 * option on ARM1020T). 1829 */ 1830 cpufuncs = arm10_cpufuncs; 1831 get_cachetype_cp15(); 1832 armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize; 1833 armv5_dcache_sets_max = 1834 (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) - 1835 armv5_dcache_sets_inc; 1836 armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc); 1837 armv5_dcache_index_max = 0U - armv5_dcache_index_inc; 1838 pmap_pte_init_generic(); 1839 return 0; 1840 } 1841 #endif /* CPU_ARM10 */ 1842 1843 1844 #if defined(CPU_ARM11MPCORE) 1845 if (cputype == CPU_ID_ARM11MPCORE) { 1846 cpufuncs = arm11mpcore_cpufuncs; 1847 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6) 1848 cpu_armv6_p = true; 1849 #endif 1850 get_cachetype_cp15(); 1851 armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize; 1852 armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize + 1853 arm_dcache_log2_nsets)) - armv5_dcache_sets_inc; 1854 armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc); 1855 armv5_dcache_index_max = 0U - armv5_dcache_index_inc; 1856 cpu_do_powersave = 1; /* Enable powersave */ 1857 pmap_pte_init_arm11mpcore(); 1858 if (arm_cache_prefer_mask) 1859 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 1860 1861 return 0; 1862 1863 } 1864 #endif /* CPU_ARM11MPCORE */ 1865 1866 #if defined(CPU_ARM11) 1867 if (cputype == CPU_ID_ARM1136JS || 1868 cputype == CPU_ID_ARM1136JSR1 || 1869 cputype == CPU_ID_ARM1176JZS) { 1870 cpufuncs = arm11_cpufuncs; 1871 #if defined(CPU_ARM1136) 1872 if (cputype == CPU_ID_ARM1136JS && 1873 cputype == CPU_ID_ARM1136JSR1) { 1874 cpufuncs = arm1136_cpufuncs; 1875 if (cputype == CPU_ID_ARM1136JS) 1876 cpufuncs.cf_sleep = arm1136_sleep_rev0; 1877 } 1878 #endif 1879 #if defined(CPU_ARM1176) 1880 if (cputype == CPU_ID_ARM1176JZS) { 1881 cpufuncs = arm1176_cpufuncs; 1882 } 1883 #endif 1884 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6) 1885 cpu_armv6_p = true; 1886 #endif 1887 cpu_do_powersave = 1; /* Enable powersave */ 1888 get_cachetype_cp15(); 1889 #ifdef ARM11_CACHE_WRITE_THROUGH 1890 pmap_pte_init_arm11(); 1891 #else 1892 pmap_pte_init_generic(); 1893 #endif 1894 if (arm_cache_prefer_mask) 1895 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 1896 1897 /* 1898 * Start and reset the PMC Cycle Counter. 1899 */ 1900 armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C); 1901 return 0; 1902 } 1903 #endif /* CPU_ARM11 */ 1904 #ifdef CPU_SA110 1905 if (cputype == CPU_ID_SA110) { 1906 cpufuncs = sa110_cpufuncs; 1907 get_cachetype_table(); 1908 pmap_pte_init_sa1(); 1909 return 0; 1910 } 1911 #endif /* CPU_SA110 */ 1912 #ifdef CPU_SA1100 1913 if (cputype == CPU_ID_SA1100) { 1914 cpufuncs = sa11x0_cpufuncs; 1915 get_cachetype_table(); 1916 pmap_pte_init_sa1(); 1917 1918 /* Use powersave on this CPU. */ 1919 cpu_do_powersave = 1; 1920 1921 return 0; 1922 } 1923 #endif /* CPU_SA1100 */ 1924 #ifdef CPU_SA1110 1925 if (cputype == CPU_ID_SA1110) { 1926 cpufuncs = sa11x0_cpufuncs; 1927 get_cachetype_table(); 1928 pmap_pte_init_sa1(); 1929 1930 /* Use powersave on this CPU. */ 1931 cpu_do_powersave = 1; 1932 1933 return 0; 1934 } 1935 #endif /* CPU_SA1110 */ 1936 #ifdef CPU_FA526 1937 if (cputype == CPU_ID_FA526) { 1938 cpufuncs = fa526_cpufuncs; 1939 get_cachetype_cp15(); 1940 pmap_pte_init_generic(); 1941 1942 /* Use powersave on this CPU. */ 1943 cpu_do_powersave = 1; 1944 1945 return 0; 1946 } 1947 #endif /* CPU_FA526 */ 1948 #ifdef CPU_IXP12X0 1949 if (cputype == CPU_ID_IXP1200) { 1950 cpufuncs = ixp12x0_cpufuncs; 1951 get_cachetype_table(); 1952 pmap_pte_init_sa1(); 1953 return 0; 1954 } 1955 #endif /* CPU_IXP12X0 */ 1956 #ifdef CPU_XSCALE_80200 1957 if (cputype == CPU_ID_80200) { 1958 int rev = cpufunc_id() & CPU_ID_REVISION_MASK; 1959 1960 i80200_icu_init(); 1961 1962 /* 1963 * Reset the Performance Monitoring Unit to a 1964 * pristine state: 1965 * - CCNT, PMN0, PMN1 reset to 0 1966 * - overflow indications cleared 1967 * - all counters disabled 1968 */ 1969 __asm volatile("mcr p14, 0, %0, c0, c0, 0" 1970 : 1971 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 1972 PMNC_CC_IF)); 1973 1974 #if defined(XSCALE_CCLKCFG) 1975 /* 1976 * Crank CCLKCFG to maximum legal value. 1977 */ 1978 __asm volatile ("mcr p14, 0, %0, c6, c0, 0" 1979 : 1980 : "r" (XSCALE_CCLKCFG)); 1981 #endif 1982 1983 /* 1984 * XXX Disable ECC in the Bus Controller Unit; we 1985 * don't really support it, yet. Clear any pending 1986 * error indications. 1987 */ 1988 __asm volatile("mcr p13, 0, %0, c0, c1, 0" 1989 : 1990 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV)); 1991 1992 cpufuncs = xscale_cpufuncs; 1993 #if defined(PERFCTRS) 1994 xscale_pmu_init(); 1995 #endif 1996 1997 /* 1998 * i80200 errata: Step-A0 and A1 have a bug where 1999 * D$ dirty bits are not cleared on "invalidate by 2000 * address". 2001 * 2002 * Workaround: Clean cache line before invalidating. 2003 */ 2004 if (rev == 0 || rev == 1) 2005 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng; 2006 2007 get_cachetype_cp15(); 2008 pmap_pte_init_xscale(); 2009 return 0; 2010 } 2011 #endif /* CPU_XSCALE_80200 */ 2012 #ifdef CPU_XSCALE_80321 2013 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 || 2014 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 || 2015 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) { 2016 i80321_icu_init(); 2017 2018 /* 2019 * Reset the Performance Monitoring Unit to a 2020 * pristine state: 2021 * - CCNT, PMN0, PMN1 reset to 0 2022 * - overflow indications cleared 2023 * - all counters disabled 2024 */ 2025 __asm volatile("mcr p14, 0, %0, c0, c0, 0" 2026 : 2027 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 2028 PMNC_CC_IF)); 2029 2030 cpufuncs = xscale_cpufuncs; 2031 #if defined(PERFCTRS) 2032 xscale_pmu_init(); 2033 #endif 2034 2035 get_cachetype_cp15(); 2036 pmap_pte_init_xscale(); 2037 return 0; 2038 } 2039 #endif /* CPU_XSCALE_80321 */ 2040 #ifdef __CPU_XSCALE_PXA2XX 2041 /* ignore core revision to test PXA2xx CPUs */ 2042 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X || 2043 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 || 2044 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) { 2045 2046 cpufuncs = xscale_cpufuncs; 2047 #if defined(PERFCTRS) 2048 xscale_pmu_init(); 2049 #endif 2050 2051 get_cachetype_cp15(); 2052 pmap_pte_init_xscale(); 2053 2054 /* Use powersave on this CPU. */ 2055 cpu_do_powersave = 1; 2056 2057 return 0; 2058 } 2059 #endif /* __CPU_XSCALE_PXA2XX */ 2060 #ifdef CPU_XSCALE_IXP425 2061 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 || 2062 cputype == CPU_ID_IXP425_266) { 2063 ixp425_icu_init(); 2064 2065 cpufuncs = xscale_cpufuncs; 2066 #if defined(PERFCTRS) 2067 xscale_pmu_init(); 2068 #endif 2069 2070 get_cachetype_cp15(); 2071 pmap_pte_init_xscale(); 2072 2073 return 0; 2074 } 2075 #endif /* CPU_XSCALE_IXP425 */ 2076 #if defined(CPU_CORTEX) 2077 if (CPU_ID_CORTEX_P(cputype)) { 2078 cpufuncs = cortex_cpufuncs; 2079 cpu_do_powersave = 1; /* Enable powersave */ 2080 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6) 2081 cpu_armv7_p = true; 2082 #endif 2083 get_cachetype_cp15(); 2084 pmap_pte_init_armv7(); 2085 if (arm_cache_prefer_mask) 2086 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 2087 /* 2088 * Start and reset the PMC Cycle Counter. 2089 */ 2090 armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C); 2091 armreg_pmcntenset_write(CORTEX_CNTENS_C); 2092 return 0; 2093 } 2094 #endif /* CPU_CORTEX */ 2095 2096 #if defined(CPU_PJ4B) 2097 if ((cputype == CPU_ID_MV88SV581X_V6 || 2098 cputype == CPU_ID_MV88SV581X_V7 || 2099 cputype == CPU_ID_MV88SV584X_V7 || 2100 cputype == CPU_ID_ARM_88SV581X_V6 || 2101 cputype == CPU_ID_ARM_88SV581X_V7) && 2102 (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) { 2103 cpufuncs = pj4bv7_cpufuncs; 2104 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6) 2105 cpu_armv7_p = true; 2106 #endif 2107 get_cachetype_cp15(); 2108 pmap_pte_init_armv7(); 2109 return 0; 2110 } 2111 #endif /* CPU_PJ4B */ 2112 2113 /* 2114 * Bzzzz. And the answer was ... 2115 */ 2116 panic("No support for this CPU type (%08x) in kernel", cputype); 2117 return(ARCHITECTURE_NOT_PRESENT); 2118 } 2119 2120 #ifdef CPU_ARM2 2121 u_int arm2_id(void) 2122 { 2123 2124 return CPU_ID_ARM2; 2125 } 2126 #endif /* CPU_ARM2 */ 2127 2128 #ifdef CPU_ARM250 2129 u_int arm250_id(void) 2130 { 2131 2132 return CPU_ID_ARM250; 2133 } 2134 #endif /* CPU_ARM250 */ 2135 2136 /* 2137 * Fixup routines for data and prefetch aborts. 2138 * 2139 * Several compile time symbols are used 2140 * 2141 * DEBUG_FAULT_CORRECTION - Print debugging information during the 2142 * correction of registers after a fault. 2143 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts 2144 * when defined should use late aborts 2145 */ 2146 2147 2148 /* 2149 * Null abort fixup routine. 2150 * For use when no fixup is required. 2151 */ 2152 int 2153 cpufunc_null_fixup(void *arg) 2154 { 2155 return(ABORT_FIXUP_OK); 2156 } 2157 2158 2159 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 2160 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) 2161 2162 #ifdef DEBUG_FAULT_CORRECTION 2163 #define DFC_PRINTF(x) printf x 2164 #define DFC_DISASSEMBLE(x) disassemble(x) 2165 #else 2166 #define DFC_PRINTF(x) /* nothing */ 2167 #define DFC_DISASSEMBLE(x) /* nothing */ 2168 #endif 2169 2170 /* 2171 * "Early" data abort fixup. 2172 * 2173 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used 2174 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI]. 2175 * 2176 * In early aborts, we may have to fix up LDM, STM, LDC and STC. 2177 */ 2178 int 2179 early_abort_fixup(void *arg) 2180 { 2181 trapframe_t *frame = arg; 2182 u_int fault_pc; 2183 u_int fault_instruction; 2184 int saved_lr = 0; 2185 2186 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2187 2188 /* Ok an abort in SVC mode */ 2189 2190 /* 2191 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2192 * as the fault happened in svc mode but we need it in the 2193 * usr slot so we can treat the registers as an array of ints 2194 * during fixing. 2195 * NOTE: This PC is in the position but writeback is not 2196 * allowed on r15. 2197 * Doing it like this is more efficient than trapping this 2198 * case in all possible locations in the following fixup code. 2199 */ 2200 2201 saved_lr = frame->tf_usr_lr; 2202 frame->tf_usr_lr = frame->tf_svc_lr; 2203 2204 /* 2205 * Note the trapframe does not have the SVC r13 so a fault 2206 * from an instruction with writeback to r13 in SVC mode is 2207 * not allowed. This should not happen as the kstack is 2208 * always valid. 2209 */ 2210 } 2211 2212 /* Get fault address and status from the CPU */ 2213 2214 fault_pc = frame->tf_pc; 2215 fault_instruction = *((volatile unsigned int *)fault_pc); 2216 2217 /* Decode the fault instruction and fix the registers as needed */ 2218 2219 if ((fault_instruction & 0x0e000000) == 0x08000000) { 2220 int base; 2221 int loop; 2222 int count; 2223 int *registers = &frame->tf_r0; 2224 2225 DFC_PRINTF(("LDM/STM\n")); 2226 DFC_DISASSEMBLE(fault_pc); 2227 if (fault_instruction & (1 << 21)) { 2228 DFC_PRINTF(("This instruction must be corrected\n")); 2229 base = (fault_instruction >> 16) & 0x0f; 2230 if (base == 15) 2231 return ABORT_FIXUP_FAILED; 2232 /* Count registers transferred */ 2233 count = 0; 2234 for (loop = 0; loop < 16; ++loop) { 2235 if (fault_instruction & (1<<loop)) 2236 ++count; 2237 } 2238 DFC_PRINTF(("%d registers used\n", count)); 2239 DFC_PRINTF(("Corrected r%d by %d bytes ", 2240 base, count * 4)); 2241 if (fault_instruction & (1 << 23)) { 2242 DFC_PRINTF(("down\n")); 2243 registers[base] -= count * 4; 2244 } else { 2245 DFC_PRINTF(("up\n")); 2246 registers[base] += count * 4; 2247 } 2248 } 2249 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) { 2250 int base; 2251 int offset; 2252 int *registers = &frame->tf_r0; 2253 2254 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */ 2255 2256 DFC_DISASSEMBLE(fault_pc); 2257 2258 /* Only need to fix registers if write back is turned on */ 2259 2260 if ((fault_instruction & (1 << 21)) != 0) { 2261 base = (fault_instruction >> 16) & 0x0f; 2262 if (base == 13 && 2263 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 2264 return ABORT_FIXUP_FAILED; 2265 if (base == 15) 2266 return ABORT_FIXUP_FAILED; 2267 2268 offset = (fault_instruction & 0xff) << 2; 2269 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2270 if ((fault_instruction & (1 << 23)) != 0) 2271 offset = -offset; 2272 registers[base] += offset; 2273 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2274 } 2275 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) 2276 return ABORT_FIXUP_FAILED; 2277 2278 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2279 2280 /* Ok an abort in SVC mode */ 2281 2282 /* 2283 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2284 * as the fault happened in svc mode but we need it in the 2285 * usr slot so we can treat the registers as an array of ints 2286 * during fixing. 2287 * NOTE: This PC is in the position but writeback is not 2288 * allowed on r15. 2289 * Doing it like this is more efficient than trapping this 2290 * case in all possible locations in the prior fixup code. 2291 */ 2292 2293 frame->tf_svc_lr = frame->tf_usr_lr; 2294 frame->tf_usr_lr = saved_lr; 2295 2296 /* 2297 * Note the trapframe does not have the SVC r13 so a fault 2298 * from an instruction with writeback to r13 in SVC mode is 2299 * not allowed. This should not happen as the kstack is 2300 * always valid. 2301 */ 2302 } 2303 2304 return(ABORT_FIXUP_OK); 2305 } 2306 #endif /* CPU_ARM2/250/3/6/7 */ 2307 2308 2309 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \ 2310 defined(CPU_ARM7TDMI) 2311 /* 2312 * "Late" (base updated) data abort fixup 2313 * 2314 * For ARM6 (in late-abort mode) and ARM7. 2315 * 2316 * In this model, all data-transfer instructions need fixing up. We defer 2317 * LDM, STM, LDC and STC fixup to the early-abort handler. 2318 */ 2319 int 2320 late_abort_fixup(void *arg) 2321 { 2322 trapframe_t *frame = arg; 2323 u_int fault_pc; 2324 u_int fault_instruction; 2325 int saved_lr = 0; 2326 2327 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2328 2329 /* Ok an abort in SVC mode */ 2330 2331 /* 2332 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2333 * as the fault happened in svc mode but we need it in the 2334 * usr slot so we can treat the registers as an array of ints 2335 * during fixing. 2336 * NOTE: This PC is in the position but writeback is not 2337 * allowed on r15. 2338 * Doing it like this is more efficient than trapping this 2339 * case in all possible locations in the following fixup code. 2340 */ 2341 2342 saved_lr = frame->tf_usr_lr; 2343 frame->tf_usr_lr = frame->tf_svc_lr; 2344 2345 /* 2346 * Note the trapframe does not have the SVC r13 so a fault 2347 * from an instruction with writeback to r13 in SVC mode is 2348 * not allowed. This should not happen as the kstack is 2349 * always valid. 2350 */ 2351 } 2352 2353 /* Get fault address and status from the CPU */ 2354 2355 fault_pc = frame->tf_pc; 2356 fault_instruction = *((volatile unsigned int *)fault_pc); 2357 2358 /* Decode the fault instruction and fix the registers as needed */ 2359 2360 /* Was is a swap instruction ? */ 2361 2362 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) { 2363 DFC_DISASSEMBLE(fault_pc); 2364 } else if ((fault_instruction & 0x0c000000) == 0x04000000) { 2365 2366 /* Was is a ldr/str instruction */ 2367 /* This is for late abort only */ 2368 2369 int base; 2370 int offset; 2371 int *registers = &frame->tf_r0; 2372 2373 DFC_DISASSEMBLE(fault_pc); 2374 2375 /* This is for late abort only */ 2376 2377 if ((fault_instruction & (1 << 24)) == 0 2378 || (fault_instruction & (1 << 21)) != 0) { 2379 /* postindexed ldr/str with no writeback */ 2380 2381 base = (fault_instruction >> 16) & 0x0f; 2382 if (base == 13 && 2383 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 2384 return ABORT_FIXUP_FAILED; 2385 if (base == 15) 2386 return ABORT_FIXUP_FAILED; 2387 DFC_PRINTF(("late abt fix: r%d=%08x : ", 2388 base, registers[base])); 2389 if ((fault_instruction & (1 << 25)) == 0) { 2390 /* Immediate offset - easy */ 2391 2392 offset = fault_instruction & 0xfff; 2393 if ((fault_instruction & (1 << 23))) 2394 offset = -offset; 2395 registers[base] += offset; 2396 DFC_PRINTF(("imm=%08x ", offset)); 2397 } else { 2398 /* offset is a shifted register */ 2399 int shift; 2400 2401 offset = fault_instruction & 0x0f; 2402 if (offset == base) 2403 return ABORT_FIXUP_FAILED; 2404 2405 /* 2406 * Register offset - hard we have to 2407 * cope with shifts ! 2408 */ 2409 offset = registers[offset]; 2410 2411 if ((fault_instruction & (1 << 4)) == 0) 2412 /* shift with amount */ 2413 shift = (fault_instruction >> 7) & 0x1f; 2414 else { 2415 /* shift with register */ 2416 if ((fault_instruction & (1 << 7)) != 0) 2417 /* undefined for now so bail out */ 2418 return ABORT_FIXUP_FAILED; 2419 shift = ((fault_instruction >> 8) & 0xf); 2420 if (base == shift) 2421 return ABORT_FIXUP_FAILED; 2422 DFC_PRINTF(("shift reg=%d ", shift)); 2423 shift = registers[shift]; 2424 } 2425 DFC_PRINTF(("shift=%08x ", shift)); 2426 switch (((fault_instruction >> 5) & 0x3)) { 2427 case 0 : /* Logical left */ 2428 offset = (int)(((u_int)offset) << shift); 2429 break; 2430 case 1 : /* Logical Right */ 2431 if (shift == 0) shift = 32; 2432 offset = (int)(((u_int)offset) >> shift); 2433 break; 2434 case 2 : /* Arithmetic Right */ 2435 if (shift == 0) shift = 32; 2436 offset = (int)(((int)offset) >> shift); 2437 break; 2438 case 3 : /* Rotate right (rol or rxx) */ 2439 return ABORT_FIXUP_FAILED; 2440 break; 2441 } 2442 2443 DFC_PRINTF(("abt: fixed LDR/STR with " 2444 "register offset\n")); 2445 if ((fault_instruction & (1 << 23))) 2446 offset = -offset; 2447 DFC_PRINTF(("offset=%08x ", offset)); 2448 registers[base] += offset; 2449 } 2450 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2451 } 2452 } 2453 2454 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2455 2456 /* Ok an abort in SVC mode */ 2457 2458 /* 2459 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2460 * as the fault happened in svc mode but we need it in the 2461 * usr slot so we can treat the registers as an array of ints 2462 * during fixing. 2463 * NOTE: This PC is in the position but writeback is not 2464 * allowed on r15. 2465 * Doing it like this is more efficient than trapping this 2466 * case in all possible locations in the prior fixup code. 2467 */ 2468 2469 frame->tf_svc_lr = frame->tf_usr_lr; 2470 frame->tf_usr_lr = saved_lr; 2471 2472 /* 2473 * Note the trapframe does not have the SVC r13 so a fault 2474 * from an instruction with writeback to r13 in SVC mode is 2475 * not allowed. This should not happen as the kstack is 2476 * always valid. 2477 */ 2478 } 2479 2480 /* 2481 * Now let the early-abort fixup routine have a go, in case it 2482 * was an LDM, STM, LDC or STC that faulted. 2483 */ 2484 2485 return early_abort_fixup(arg); 2486 } 2487 #endif /* CPU_ARM6(LATE)/7/7TDMI */ 2488 2489 /* 2490 * CPU Setup code 2491 */ 2492 2493 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \ 2494 defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \ 2495 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \ 2496 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 2497 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 2498 defined(CPU_ARM10) || defined(CPU_ARM11) || \ 2499 defined(CPU_FA526) || defined(CPU_CORTEX) || defined(CPU_SHEEVA) 2500 2501 #define IGN 0 2502 #define OR 1 2503 #define BIC 2 2504 2505 struct cpu_option { 2506 const char *co_name; 2507 int co_falseop; 2508 int co_trueop; 2509 int co_value; 2510 }; 2511 2512 static u_int parse_cpu_options(char *, struct cpu_option *, u_int); 2513 2514 static u_int 2515 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl) 2516 { 2517 int integer; 2518 2519 if (args == NULL) 2520 return(cpuctrl); 2521 2522 while (optlist->co_name) { 2523 if (get_bootconf_option(args, optlist->co_name, 2524 BOOTOPT_TYPE_BOOLEAN, &integer)) { 2525 if (integer) { 2526 if (optlist->co_trueop == OR) 2527 cpuctrl |= optlist->co_value; 2528 else if (optlist->co_trueop == BIC) 2529 cpuctrl &= ~optlist->co_value; 2530 } else { 2531 if (optlist->co_falseop == OR) 2532 cpuctrl |= optlist->co_value; 2533 else if (optlist->co_falseop == BIC) 2534 cpuctrl &= ~optlist->co_value; 2535 } 2536 } 2537 ++optlist; 2538 } 2539 return(cpuctrl); 2540 } 2541 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */ 2542 2543 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \ 2544 || defined(CPU_ARM8) 2545 struct cpu_option arm678_options[] = { 2546 #ifdef COMPAT_12 2547 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE }, 2548 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 2549 #endif /* COMPAT_12 */ 2550 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2551 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2552 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2553 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2554 { NULL, IGN, IGN, 0 } 2555 }; 2556 2557 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */ 2558 2559 #ifdef CPU_ARM6 2560 struct cpu_option arm6_options[] = { 2561 { "arm6.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2562 { "arm6.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2563 { "arm6.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2564 { "arm6.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2565 { NULL, IGN, IGN, 0 } 2566 }; 2567 2568 void 2569 arm6_setup(char *args) 2570 { 2571 int cpuctrl, cpuctrlmask; 2572 2573 /* Set up default control registers bits */ 2574 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2575 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2576 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2577 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2578 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2579 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2580 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 2581 | CPU_CONTROL_AFLT_ENABLE; 2582 2583 #ifdef ARM6_LATE_ABORT 2584 cpuctrl |= CPU_CONTROL_LABT_ENABLE; 2585 #endif /* ARM6_LATE_ABORT */ 2586 2587 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2588 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2589 #endif 2590 2591 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2592 cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl); 2593 2594 #ifdef __ARMEB__ 2595 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2596 #endif 2597 2598 /* Clear out the cache */ 2599 cpu_idcache_wbinv_all(); 2600 2601 /* Set the control register */ 2602 curcpu()->ci_ctrl = cpuctrl; 2603 cpu_control(0xffffffff, cpuctrl); 2604 } 2605 #endif /* CPU_ARM6 */ 2606 2607 #ifdef CPU_ARM7 2608 struct cpu_option arm7_options[] = { 2609 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2610 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2611 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2612 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2613 #ifdef COMPAT_12 2614 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2615 #endif /* COMPAT_12 */ 2616 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2617 { NULL, IGN, IGN, 0 } 2618 }; 2619 2620 void 2621 arm7_setup(char *args) 2622 { 2623 int cpuctrl, cpuctrlmask; 2624 2625 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2626 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2627 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2628 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2629 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2630 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2631 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE 2632 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 2633 | CPU_CONTROL_AFLT_ENABLE; 2634 2635 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2636 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2637 #endif 2638 2639 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2640 cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl); 2641 2642 #ifdef __ARMEB__ 2643 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2644 #endif 2645 2646 /* Clear out the cache */ 2647 cpu_idcache_wbinv_all(); 2648 2649 /* Set the control register */ 2650 curcpu()->ci_ctrl = cpuctrl; 2651 cpu_control(0xffffffff, cpuctrl); 2652 } 2653 #endif /* CPU_ARM7 */ 2654 2655 #ifdef CPU_ARM7TDMI 2656 struct cpu_option arm7tdmi_options[] = { 2657 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2658 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2659 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2660 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2661 #ifdef COMPAT_12 2662 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2663 #endif /* COMPAT_12 */ 2664 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2665 { NULL, IGN, IGN, 0 } 2666 }; 2667 2668 void 2669 arm7tdmi_setup(char *args) 2670 { 2671 int cpuctrl; 2672 2673 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2674 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2675 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2676 2677 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2678 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl); 2679 2680 #ifdef __ARMEB__ 2681 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2682 #endif 2683 2684 /* Clear out the cache */ 2685 cpu_idcache_wbinv_all(); 2686 2687 /* Set the control register */ 2688 curcpu()->ci_ctrl = cpuctrl; 2689 cpu_control(0xffffffff, cpuctrl); 2690 } 2691 #endif /* CPU_ARM7TDMI */ 2692 2693 #ifdef CPU_ARM8 2694 struct cpu_option arm8_options[] = { 2695 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2696 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2697 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2698 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2699 #ifdef COMPAT_12 2700 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2701 #endif /* COMPAT_12 */ 2702 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2703 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2704 { NULL, IGN, IGN, 0 } 2705 }; 2706 2707 void 2708 arm8_setup(char *args) 2709 { 2710 int integer; 2711 int cpuctrl, cpuctrlmask; 2712 int clocktest; 2713 int setclock = 0; 2714 2715 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2716 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2717 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2718 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2719 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2720 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2721 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE 2722 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE; 2723 2724 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2725 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2726 #endif 2727 2728 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2729 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl); 2730 2731 #ifdef __ARMEB__ 2732 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2733 #endif 2734 2735 /* Get clock configuration */ 2736 clocktest = arm8_clock_config(0, 0) & 0x0f; 2737 2738 /* Special ARM8 clock and test configuration */ 2739 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2740 clocktest = 0; 2741 setclock = 1; 2742 } 2743 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2744 if (integer) 2745 clocktest |= 0x01; 2746 else 2747 clocktest &= ~(0x01); 2748 setclock = 1; 2749 } 2750 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2751 if (integer) 2752 clocktest |= 0x02; 2753 else 2754 clocktest &= ~(0x02); 2755 setclock = 1; 2756 } 2757 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) { 2758 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2; 2759 setclock = 1; 2760 } 2761 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) { 2762 clocktest |= (integer & 7) << 5; 2763 setclock = 1; 2764 } 2765 2766 /* Clear out the cache */ 2767 cpu_idcache_wbinv_all(); 2768 2769 /* Set the control register */ 2770 curcpu()->ci_ctrl = cpuctrl; 2771 cpu_control(0xffffffff, cpuctrl); 2772 2773 /* Set the clock/test register */ 2774 if (setclock) 2775 arm8_clock_config(0x7f, clocktest); 2776 } 2777 #endif /* CPU_ARM8 */ 2778 2779 #ifdef CPU_ARM9 2780 struct cpu_option arm9_options[] = { 2781 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2782 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2783 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2784 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2785 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2786 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2787 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2788 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2789 { NULL, IGN, IGN, 0 } 2790 }; 2791 2792 void 2793 arm9_setup(char *args) 2794 { 2795 int cpuctrl, cpuctrlmask; 2796 2797 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2798 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2799 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2800 | CPU_CONTROL_WBUF_ENABLE; 2801 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2802 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2803 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2804 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2805 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2806 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC 2807 | CPU_CONTROL_ROUNDROBIN; 2808 2809 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2810 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2811 #endif 2812 2813 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl); 2814 2815 #ifdef __ARMEB__ 2816 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2817 #endif 2818 2819 #ifndef ARM_HAS_VBAR 2820 if (vector_page == ARM_VECTORS_HIGH) 2821 cpuctrl |= CPU_CONTROL_VECRELOC; 2822 #endif 2823 2824 /* Clear out the cache */ 2825 cpu_idcache_wbinv_all(); 2826 2827 /* Set the control register */ 2828 curcpu()->ci_ctrl = cpuctrl; 2829 cpu_control(cpuctrlmask, cpuctrl); 2830 2831 } 2832 #endif /* CPU_ARM9 */ 2833 2834 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 2835 struct cpu_option arm10_options[] = { 2836 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2837 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2838 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2839 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2840 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2841 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2842 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2843 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2844 { NULL, IGN, IGN, 0 } 2845 }; 2846 2847 void 2848 arm10_setup(char *args) 2849 { 2850 int cpuctrl; 2851 2852 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2853 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2854 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; 2855 #if 0 2856 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2857 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2858 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2859 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2860 | CPU_CONTROL_BPRD_ENABLE 2861 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2862 #endif 2863 2864 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2865 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2866 #endif 2867 2868 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl); 2869 2870 #ifdef __ARMEB__ 2871 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2872 #endif 2873 2874 #ifndef ARM_HAS_VBAR 2875 if (vector_page == ARM_VECTORS_HIGH) 2876 cpuctrl |= CPU_CONTROL_VECRELOC; 2877 #endif 2878 2879 /* Clear out the cache */ 2880 cpu_idcache_wbinv_all(); 2881 2882 /* Now really make sure they are clean. */ 2883 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2884 2885 /* Set the control register */ 2886 curcpu()->ci_ctrl = cpuctrl; 2887 cpu_control(0xffffffff, cpuctrl); 2888 2889 /* And again. */ 2890 cpu_idcache_wbinv_all(); 2891 } 2892 #endif /* CPU_ARM9E || CPU_ARM10 */ 2893 2894 #if defined(CPU_ARM11) 2895 struct cpu_option arm11_options[] = { 2896 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2897 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2898 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2899 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2900 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2901 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2902 { "arm11.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2903 { NULL, IGN, IGN, 0 } 2904 }; 2905 2906 void 2907 arm11_setup(char *args) 2908 { 2909 int cpuctrl, cpuctrlmask; 2910 2911 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2912 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2913 /* | CPU_CONTROL_BPRD_ENABLE */; 2914 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2915 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2916 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE 2917 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2918 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2919 2920 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2921 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2922 #endif 2923 2924 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2925 2926 #ifdef __ARMEB__ 2927 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2928 #endif 2929 2930 #ifndef ARM_HAS_VBAR 2931 if (vector_page == ARM_VECTORS_HIGH) 2932 cpuctrl |= CPU_CONTROL_VECRELOC; 2933 #endif 2934 2935 /* Clear out the cache */ 2936 cpu_idcache_wbinv_all(); 2937 2938 /* Now really make sure they are clean. */ 2939 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2940 2941 /* Allow detection code to find the VFP if it's fitted. */ 2942 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 2943 2944 /* Set the control register */ 2945 curcpu()->ci_ctrl = cpuctrl; 2946 cpu_control(cpuctrlmask, cpuctrl); 2947 2948 /* And again. */ 2949 cpu_idcache_wbinv_all(); 2950 } 2951 #endif /* CPU_ARM11 */ 2952 2953 #if defined(CPU_ARM11MPCORE) 2954 2955 void 2956 arm11mpcore_setup(char *args) 2957 { 2958 int cpuctrl, cpuctrlmask; 2959 2960 cpuctrl = CPU_CONTROL_IC_ENABLE 2961 | CPU_CONTROL_DC_ENABLE 2962 | CPU_CONTROL_BPRD_ENABLE ; 2963 cpuctrlmask = CPU_CONTROL_IC_ENABLE 2964 | CPU_CONTROL_DC_ENABLE 2965 | CPU_CONTROL_BPRD_ENABLE 2966 | CPU_CONTROL_AFLT_ENABLE 2967 | CPU_CONTROL_VECRELOC; 2968 2969 #ifdef ARM11MPCORE_MMU_COMPAT 2970 /* XXX: S and R? */ 2971 #endif 2972 2973 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2974 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2975 #endif 2976 2977 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2978 2979 #ifndef ARM_HAS_VBAR 2980 if (vector_page == ARM_VECTORS_HIGH) 2981 cpuctrl |= CPU_CONTROL_VECRELOC; 2982 #endif 2983 2984 /* Clear out the cache */ 2985 cpu_idcache_wbinv_all(); 2986 2987 /* Now really make sure they are clean. */ 2988 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2989 2990 /* Allow detection code to find the VFP if it's fitted. */ 2991 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 2992 2993 /* Set the control register */ 2994 curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl); 2995 2996 /* And again. */ 2997 cpu_idcache_wbinv_all(); 2998 } 2999 #endif /* CPU_ARM11MPCORE */ 3000 3001 #ifdef CPU_PJ4B 3002 void 3003 pj4bv7_setup(char *args) 3004 { 3005 int cpuctrl; 3006 3007 pj4b_config(); 3008 3009 cpuctrl = CPU_CONTROL_MMU_ENABLE; 3010 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS 3011 cpuctrl |= CPU_CONTROL_UNAL_ENABLE; 3012 #else 3013 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3014 #endif 3015 cpuctrl |= CPU_CONTROL_DC_ENABLE; 3016 cpuctrl |= CPU_CONTROL_IC_ENABLE; 3017 cpuctrl |= (0xf << 3); 3018 cpuctrl |= CPU_CONTROL_BPRD_ENABLE; 3019 cpuctrl |= (0x5 << 16) | (1 < 22); 3020 cpuctrl |= CPU_CONTROL_XP_ENABLE; 3021 3022 #ifndef ARM_HAS_VBAR 3023 if (vector_page == ARM_VECTORS_HIGH) 3024 cpuctrl |= CPU_CONTROL_VECRELOC; 3025 #endif 3026 3027 /* Clear out the cache */ 3028 cpu_idcache_wbinv_all(); 3029 3030 /* Set the control register */ 3031 cpu_control(0xffffffff, cpuctrl); 3032 3033 /* And again. */ 3034 cpu_idcache_wbinv_all(); 3035 3036 curcpu()->ci_ctrl = cpuctrl; 3037 } 3038 #endif /* CPU_PJ4B */ 3039 3040 #if defined(CPU_CORTEX) 3041 struct cpu_option armv7_options[] = { 3042 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3043 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3044 { "armv7.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3045 { "armv7.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3046 { "armv7.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3047 { NULL, IGN, IGN, 0} 3048 }; 3049 3050 void 3051 armv7_setup(char *args) 3052 { 3053 int cpuctrl; 3054 3055 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE 3056 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE ; 3057 #if 0 3058 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 3059 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3060 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE 3061 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3062 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 3063 #endif 3064 3065 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS 3066 cpuctrl |= CPU_CONTROL_UNAL_ENABLE; 3067 #else 3068 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3069 #endif 3070 3071 cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl); 3072 3073 #ifndef ARM_HAS_VBAR 3074 if (vector_page == ARM_VECTORS_HIGH) 3075 cpuctrl |= CPU_CONTROL_VECRELOC; 3076 #endif 3077 3078 /* Clear out the cache */ 3079 cpu_idcache_wbinv_all(); 3080 3081 /* Set the control register */ 3082 curcpu()->ci_ctrl = cpuctrl; 3083 cpu_control(0xffffffff, cpuctrl); 3084 } 3085 #endif /* CPU_CORTEX */ 3086 3087 3088 #if defined(CPU_ARM1136) || defined(CPU_ARM1176) 3089 void 3090 arm11x6_setup(char *args) 3091 { 3092 int cpuctrl, cpuctrl_wax; 3093 uint32_t auxctrl, auxctrl_wax; 3094 uint32_t tmp, tmp2; 3095 uint32_t sbz=0; 3096 uint32_t cpuid; 3097 3098 cpuid = cpu_id(); 3099 3100 cpuctrl = 3101 CPU_CONTROL_MMU_ENABLE | 3102 CPU_CONTROL_DC_ENABLE | 3103 CPU_CONTROL_WBUF_ENABLE | 3104 CPU_CONTROL_32BP_ENABLE | 3105 CPU_CONTROL_32BD_ENABLE | 3106 CPU_CONTROL_LABT_ENABLE | 3107 CPU_CONTROL_SYST_ENABLE | 3108 CPU_CONTROL_UNAL_ENABLE | 3109 CPU_CONTROL_IC_ENABLE; 3110 3111 /* 3112 * "write as existing" bits 3113 * inverse of this is mask 3114 */ 3115 cpuctrl_wax = 3116 (3 << 30) | 3117 (1 << 29) | 3118 (1 << 28) | 3119 (3 << 26) | 3120 (3 << 19) | 3121 (1 << 17); 3122 3123 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3124 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3125 #endif 3126 3127 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 3128 3129 #ifdef __ARMEB__ 3130 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3131 #endif 3132 3133 #ifndef ARM_HAS_VBAR 3134 if (vector_page == ARM_VECTORS_HIGH) 3135 cpuctrl |= CPU_CONTROL_VECRELOC; 3136 #endif 3137 3138 auxctrl = 0; 3139 auxctrl_wax = ~0; 3140 /* 3141 * This options enables the workaround for the 364296 ARM1136 3142 * r0pX errata (possible cache data corruption with 3143 * hit-under-miss enabled). It sets the undocumented bit 31 in 3144 * the auxiliary control register and the FI bit in the control 3145 * register, thus disabling hit-under-miss without putting the 3146 * processor into full low interrupt latency mode. ARM11MPCore 3147 * is not affected. 3148 */ 3149 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */ 3150 cpuctrl |= CPU_CONTROL_FI_ENABLE; 3151 auxctrl = ARM1136_AUXCTL_PFI; 3152 auxctrl_wax = ~ARM1136_AUXCTL_PFI; 3153 } 3154 3155 /* 3156 * Enable an errata workaround 3157 */ 3158 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */ 3159 auxctrl = ARM1176_AUXCTL_PHD; 3160 auxctrl_wax = ~ARM1176_AUXCTL_PHD; 3161 } 3162 3163 /* Clear out the cache */ 3164 cpu_idcache_wbinv_all(); 3165 3166 /* Now really make sure they are clean. */ 3167 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz)); 3168 3169 /* Allow detection code to find the VFP if it's fitted. */ 3170 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 3171 3172 /* Set the control register */ 3173 curcpu()->ci_ctrl = cpuctrl; 3174 cpu_control(~cpuctrl_wax, cpuctrl); 3175 3176 __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t" 3177 "and %1, %0, %2\n\t" 3178 "orr %1, %1, %3\n\t" 3179 "teq %0, %1\n\t" 3180 "mcrne p15, 0, %1, c1, c0, 1\n\t" 3181 : "=r"(tmp), "=r"(tmp2) : 3182 "r"(auxctrl_wax), "r"(auxctrl)); 3183 3184 /* And again. */ 3185 cpu_idcache_wbinv_all(); 3186 } 3187 #endif /* CPU_ARM1136 || CPU_ARM1176 */ 3188 3189 #ifdef CPU_SA110 3190 struct cpu_option sa110_options[] = { 3191 #ifdef COMPAT_12 3192 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3193 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3194 #endif /* COMPAT_12 */ 3195 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3196 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3197 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3198 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3199 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3200 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3201 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3202 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3203 { NULL, IGN, IGN, 0 } 3204 }; 3205 3206 void 3207 sa110_setup(char *args) 3208 { 3209 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3210 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3211 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3212 | CPU_CONTROL_WBUF_ENABLE; 3213 #ifdef notyet 3214 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3215 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3216 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3217 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3218 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3219 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3220 | CPU_CONTROL_CPCLK; 3221 #endif 3222 3223 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3224 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3225 #endif 3226 3227 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl); 3228 3229 #ifdef __ARMEB__ 3230 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3231 #endif 3232 3233 #ifndef ARM_HAS_VBAR 3234 if (vector_page == ARM_VECTORS_HIGH) 3235 cpuctrl |= CPU_CONTROL_VECRELOC; 3236 #endif 3237 3238 /* Clear out the cache */ 3239 cpu_idcache_wbinv_all(); 3240 3241 /* Set the control register */ 3242 curcpu()->ci_ctrl = cpuctrl; 3243 #ifdef notyet 3244 cpu_control(cpuctrlmask, cpuctrl); 3245 #endif 3246 cpu_control(0xffffffff, cpuctrl); 3247 3248 /* 3249 * enable clockswitching, note that this doesn't read or write to r0, 3250 * r0 is just to make it valid asm 3251 */ 3252 __asm ("mcr 15, 0, r0, c15, c1, 2"); 3253 } 3254 #endif /* CPU_SA110 */ 3255 3256 #if defined(CPU_SA1100) || defined(CPU_SA1110) 3257 struct cpu_option sa11x0_options[] = { 3258 #ifdef COMPAT_12 3259 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3260 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3261 #endif /* COMPAT_12 */ 3262 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3263 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3264 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3265 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3266 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3267 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3268 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3269 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3270 { NULL, IGN, IGN, 0 } 3271 }; 3272 3273 void 3274 sa11x0_setup(char *args) 3275 { 3276 int cpuctrl, cpuctrlmask; 3277 3278 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3279 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3280 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3281 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 3282 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3283 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3284 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3285 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3286 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3287 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3288 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3289 3290 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3291 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3292 #endif 3293 3294 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl); 3295 3296 #ifdef __ARMEB__ 3297 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3298 #endif 3299 3300 #ifndef ARM_HAS_VBAR 3301 if (vector_page == ARM_VECTORS_HIGH) 3302 cpuctrl |= CPU_CONTROL_VECRELOC; 3303 #endif 3304 3305 /* Clear out the cache */ 3306 cpu_idcache_wbinv_all(); 3307 3308 /* Set the control register */ 3309 curcpu()->ci_ctrl = cpuctrl; 3310 cpu_control(0xffffffff, cpuctrl); 3311 } 3312 #endif /* CPU_SA1100 || CPU_SA1110 */ 3313 3314 #if defined(CPU_FA526) 3315 struct cpu_option fa526_options[] = { 3316 #ifdef COMPAT_12 3317 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3318 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3319 #endif /* COMPAT_12 */ 3320 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3321 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3322 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3323 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3324 { NULL, IGN, IGN, 0 } 3325 }; 3326 3327 void 3328 fa526_setup(char *args) 3329 { 3330 int cpuctrl, cpuctrlmask; 3331 3332 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3333 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3334 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3335 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 3336 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3337 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3338 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3339 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3340 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3341 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3342 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3343 3344 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3345 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3346 #endif 3347 3348 cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl); 3349 3350 #ifdef __ARMEB__ 3351 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3352 #endif 3353 3354 #ifndef ARM_HAS_VBAR 3355 if (vector_page == ARM_VECTORS_HIGH) 3356 cpuctrl |= CPU_CONTROL_VECRELOC; 3357 #endif 3358 3359 /* Clear out the cache */ 3360 cpu_idcache_wbinv_all(); 3361 3362 /* Set the control register */ 3363 curcpu()->ci_ctrl = cpuctrl; 3364 cpu_control(0xffffffff, cpuctrl); 3365 } 3366 #endif /* CPU_FA526 */ 3367 3368 #if defined(CPU_IXP12X0) 3369 struct cpu_option ixp12x0_options[] = { 3370 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3371 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3372 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3373 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3374 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3375 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3376 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3377 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3378 { NULL, IGN, IGN, 0 } 3379 }; 3380 3381 void 3382 ixp12x0_setup(char *args) 3383 { 3384 int cpuctrl, cpuctrlmask; 3385 3386 3387 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE 3388 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE 3389 | CPU_CONTROL_IC_ENABLE; 3390 3391 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE 3392 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE 3393 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE 3394 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE 3395 | CPU_CONTROL_VECRELOC; 3396 3397 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3398 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3399 #endif 3400 3401 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl); 3402 3403 #ifdef __ARMEB__ 3404 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3405 #endif 3406 3407 #ifndef ARM_HAS_VBAR 3408 if (vector_page == ARM_VECTORS_HIGH) 3409 cpuctrl |= CPU_CONTROL_VECRELOC; 3410 #endif 3411 3412 /* Clear out the cache */ 3413 cpu_idcache_wbinv_all(); 3414 3415 /* Set the control register */ 3416 curcpu()->ci_ctrl = cpuctrl; 3417 /* cpu_control(0xffffffff, cpuctrl); */ 3418 cpu_control(cpuctrlmask, cpuctrl); 3419 } 3420 #endif /* CPU_IXP12X0 */ 3421 3422 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 3423 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || defined(CPU_CORTEX) 3424 struct cpu_option xscale_options[] = { 3425 #ifdef COMPAT_12 3426 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3427 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3428 #endif /* COMPAT_12 */ 3429 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3430 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3431 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3432 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3433 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3434 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3435 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3436 { NULL, IGN, IGN, 0 } 3437 }; 3438 3439 void 3440 xscale_setup(char *args) 3441 { 3442 uint32_t auxctl; 3443 int cpuctrl; 3444 3445 /* 3446 * The XScale Write Buffer is always enabled. Our option 3447 * is to enable/disable coalescing. Note that bits 6:3 3448 * must always be enabled. 3449 */ 3450 3451 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3452 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3453 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3454 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE 3455 | CPU_CONTROL_BPRD_ENABLE; 3456 #if 0 3457 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3458 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3459 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3460 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3461 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3462 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3463 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3464 #endif 3465 3466 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3467 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3468 #endif 3469 3470 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl); 3471 3472 #ifdef __ARMEB__ 3473 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3474 #endif 3475 3476 #ifndef ARM_HAS_VBAR 3477 if (vector_page == ARM_VECTORS_HIGH) 3478 cpuctrl |= CPU_CONTROL_VECRELOC; 3479 #endif 3480 3481 /* Clear out the cache */ 3482 cpu_idcache_wbinv_all(); 3483 3484 /* 3485 * Set the control register. Note that bits 6:3 must always 3486 * be set to 1. 3487 */ 3488 curcpu()->ci_ctrl = cpuctrl; 3489 /* cpu_control(cpuctrlmask, cpuctrl);*/ 3490 cpu_control(0xffffffff, cpuctrl); 3491 3492 /* Make sure write coalescing is turned on */ 3493 __asm volatile("mrc p15, 0, %0, c1, c0, 1" 3494 : "=r" (auxctl)); 3495 #ifdef XSCALE_NO_COALESCE_WRITES 3496 auxctl |= XSCALE_AUXCTL_K; 3497 #else 3498 auxctl &= ~XSCALE_AUXCTL_K; 3499 #endif 3500 __asm volatile("mcr p15, 0, %0, c1, c0, 1" 3501 : : "r" (auxctl)); 3502 } 3503 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 3504 3505 #if defined(CPU_SHEEVA) 3506 struct cpu_option sheeva_options[] = { 3507 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3508 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3509 { "sheeva.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3510 { "sheeva.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3511 { "sheeva.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3512 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3513 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3514 { "sheeva.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3515 { NULL, IGN, IGN, 0 } 3516 }; 3517 3518 void 3519 sheeva_setup(char *args) 3520 { 3521 int cpuctrl, cpuctrlmask; 3522 uint32_t sheeva_ext; 3523 3524 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 3525 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3526 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; 3527 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 3528 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3529 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3530 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3531 | CPU_CONTROL_BPRD_ENABLE 3532 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 3533 3534 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3535 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3536 #endif 3537 3538 cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl); 3539 3540 /* Enable DCache Streaming Switch and Write Allocate */ 3541 __asm volatile("mrc p15, 1, %0, c15, c1, 0" 3542 : "=r" (sheeva_ext)); 3543 3544 sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN; 3545 3546 __asm volatile("mcr p15, 1, %0, c15, c1, 0" 3547 :: "r" (sheeva_ext)); 3548 3549 /* 3550 * Sheeva has L2 Cache. Enable/Disable it here. 3551 * Really not support yet... 3552 */ 3553 3554 #ifdef __ARMEB__ 3555 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3556 #endif 3557 3558 #ifndef ARM_HAS_VBAR 3559 if (vector_page == ARM_VECTORS_HIGH) 3560 cpuctrl |= CPU_CONTROL_VECRELOC; 3561 #endif 3562 3563 /* Clear out the cache */ 3564 cpu_idcache_wbinv_all(); 3565 3566 /* Now really make sure they are clean. */ 3567 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 3568 3569 /* Set the control register */ 3570 curcpu()->ci_ctrl = cpuctrl; 3571 cpu_control(0xffffffff, cpuctrl); 3572 3573 /* And again. */ 3574 cpu_idcache_wbinv_all(); 3575 } 3576 #endif /* CPU_SHEEVA */ 3577