1 /* $NetBSD: cpufunc.c,v 1.140 2014/02/21 06:28:25 matt Exp $ */ 2 3 /* 4 * arm7tdmi support code Copyright (c) 2001 John Fremlin 5 * arm8 support code Copyright (c) 1997 ARM Limited 6 * arm8 support code Copyright (c) 1997 Causality Limited 7 * arm9 support code Copyright (C) 2001 ARM Ltd 8 * arm11 support code Copyright (c) 2007 Microsoft 9 * cortexa8 support code Copyright (c) 2008 3am Software Foundry 10 * cortexa8 improvements Copyright (c) Goeran Weinholt 11 * Copyright (c) 1997 Mark Brinicombe. 12 * Copyright (c) 1997 Causality Limited 13 * All rights reserved. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by Causality Limited. 26 * 4. The name of Causality Limited may not be used to endorse or promote 27 * products derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 31 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 33 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 34 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 35 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 36 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * RiscBSD kernel project 43 * 44 * cpufuncs.c 45 * 46 * C functions for supporting CPU / MMU / TLB specific operations. 47 * 48 * Created : 30/01/97 49 */ 50 51 #include <sys/cdefs.h> 52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.140 2014/02/21 06:28:25 matt Exp $"); 53 54 #include "opt_compat_netbsd.h" 55 #include "opt_cpuoptions.h" 56 #include "opt_perfctrs.h" 57 58 #include <sys/types.h> 59 #include <sys/param.h> 60 #include <sys/pmc.h> 61 #include <sys/systm.h> 62 #include <machine/cpu.h> 63 #include <machine/bootconfig.h> 64 #include <arch/arm/arm/disassem.h> 65 66 #include <uvm/uvm.h> 67 68 #include <arm/cpuconf.h> 69 #include <arm/cpufunc.h> 70 #include <arm/locore.h> 71 72 #ifdef CPU_XSCALE_80200 73 #include <arm/xscale/i80200reg.h> 74 #include <arm/xscale/i80200var.h> 75 #endif 76 77 #ifdef CPU_XSCALE_80321 78 #include <arm/xscale/i80321reg.h> 79 #include <arm/xscale/i80321var.h> 80 #endif 81 82 #ifdef CPU_XSCALE_IXP425 83 #include <arm/xscale/ixp425reg.h> 84 #include <arm/xscale/ixp425var.h> 85 #endif 86 87 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) 88 #include <arm/xscale/xscalereg.h> 89 #endif 90 91 #if defined(PERFCTRS) 92 struct arm_pmc_funcs *arm_pmc; 93 #endif 94 95 #if defined(CPU_ARMV7) && (defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6)) 96 bool cpu_armv7_p; 97 #endif 98 99 #if defined(CPU_ARMV6) && (defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6)) 100 bool cpu_armv6_p; 101 #endif 102 103 104 /* PRIMARY CACHE VARIABLES */ 105 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 106 u_int arm_cache_prefer_mask; 107 #endif 108 struct arm_cache_info arm_pcache; 109 struct arm_cache_info arm_scache; 110 111 u_int arm_dcache_align; 112 u_int arm_dcache_align_mask; 113 114 /* 1 == use cpu_sleep(), 0 == don't */ 115 int cpu_do_powersave; 116 117 #ifdef CPU_ARM2 118 struct cpu_functions arm2_cpufuncs = { 119 /* CPU functions */ 120 121 .cf_id = arm2_id, 122 .cf_cpwait = cpufunc_nullop, 123 124 /* MMU functions */ 125 126 .cf_control = (void *)cpufunc_nullop, 127 128 /* TLB functions */ 129 130 .cf_tlb_flushID = cpufunc_nullop, 131 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 132 .cf_tlb_flushI = cpufunc_nullop, 133 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 134 .cf_tlb_flushD = cpufunc_nullop, 135 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 136 137 /* Cache operations */ 138 139 .cf_icache_sync_all = cpufunc_nullop, 140 .cf_icache_sync_range = (void *) cpufunc_nullop, 141 142 .cf_dcache_wbinv_all = arm3_cache_flush, 143 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 144 .cf_dcache_inv_range = (void *)cpufunc_nullop, 145 .cf_dcache_wb_range = (void *)cpufunc_nullop, 146 147 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 148 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 149 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 150 151 .cf_idcache_wbinv_all = cpufunc_nullop, 152 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 153 154 /* Other functions */ 155 156 .cf_flush_prefetchbuf = cpufunc_nullop, 157 .cf_drain_writebuf = cpufunc_nullop, 158 .cf_flush_brnchtgt_C = cpufunc_nullop, 159 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 160 161 .cf_sleep = (void *)cpufunc_nullop, 162 163 /* Soft functions */ 164 165 .cf_dataabt_fixup = early_abort_fixup, 166 .cf_prefetchabt_fixup = cpufunc_null_fixup, 167 168 .cf_setup = (void *)cpufunc_nullop 169 170 }; 171 #endif /* CPU_ARM2 */ 172 173 #ifdef CPU_ARM250 174 struct cpu_functions arm250_cpufuncs = { 175 /* CPU functions */ 176 177 .cf_id = arm250_id, 178 .cf_cpwait = cpufunc_nullop, 179 180 /* MMU functions */ 181 182 .cf_control = (void *)cpufunc_nullop, 183 184 /* TLB functions */ 185 186 .cf_tlb_flushID = cpufunc_nullop, 187 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 188 .cf_tlb_flushI = cpufunc_nullop, 189 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 190 .cf_tlb_flushD = cpufunc_nullop, 191 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 192 193 /* Cache operations */ 194 195 .cf_icache_sync_all = cpufunc_nullop, 196 .cf_icache_sync_range = (void *) cpufunc_nullop, 197 198 .cf_dcache_wbinv_all = arm3_cache_flush, 199 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 200 .cf_dcache_inv_range = (void *)cpufunc_nullop, 201 .cf_dcache_wb_range = (void *)cpufunc_nullop, 202 203 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 204 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 205 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 206 207 .cf_idcache_wbinv_all = cpufunc_nullop, 208 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 209 210 /* Other functions */ 211 212 .cf_flush_prefetchbuf = cpufunc_nullop, 213 .cf_drain_writebuf = cpufunc_nullop, 214 .cf_flush_brnchtgt_C = cpufunc_nullop, 215 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 216 217 .cf_sleep = (void *)cpufunc_nullop, 218 219 /* Soft functions */ 220 221 .cf_dataabt_fixup = early_abort_fixup, 222 .cf_prefetchabt_fixup = cpufunc_null_fixup, 223 224 .cf_setup = (void *)cpufunc_nullop 225 226 }; 227 #endif /* CPU_ARM250 */ 228 229 #ifdef CPU_ARM3 230 struct cpu_functions arm3_cpufuncs = { 231 /* CPU functions */ 232 233 .cf_id = cpufunc_id, 234 .cf_cpwait = cpufunc_nullop, 235 236 /* MMU functions */ 237 238 .cf_control = arm3_control, 239 240 /* TLB functions */ 241 242 .cf_tlb_flushID = cpufunc_nullop, 243 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 244 .cf_tlb_flushI = cpufunc_nullop, 245 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 246 .cf_tlb_flushD = cpufunc_nullop, 247 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 248 249 /* Cache operations */ 250 251 .cf_icache_sync_all = cpufunc_nullop, 252 .cf_icache_sync_range = (void *) cpufunc_nullop, 253 254 .cf_dcache_wbinv_all = arm3_cache_flush, 255 .cf_dcache_wbinv_range = (void *)arm3_cache_flush, 256 .cf_dcache_inv_range = (void *)arm3_cache_flush, 257 .cf_dcache_wb_range = (void *)cpufunc_nullop, 258 259 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 260 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 261 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 262 263 .cf_idcache_wbinv_all = arm3_cache_flush, 264 .cf_idcache_wbinv_range = (void *)arm3_cache_flush, 265 266 /* Other functions */ 267 268 .cf_flush_prefetchbuf = cpufunc_nullop, 269 .cf_drain_writebuf = cpufunc_nullop, 270 .cf_flush_brnchtgt_C = cpufunc_nullop, 271 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 272 273 .cf_sleep = (void *)cpufunc_nullop, 274 275 /* Soft functions */ 276 277 .cf_dataabt_fixup = early_abort_fixup, 278 .cf_prefetchabt_fixup = cpufunc_null_fixup, 279 280 .cf_setup = (void *)cpufunc_nullop 281 282 }; 283 #endif /* CPU_ARM3 */ 284 285 #ifdef CPU_ARM6 286 struct cpu_functions arm6_cpufuncs = { 287 /* CPU functions */ 288 289 .cf_id = cpufunc_id, 290 .cf_cpwait = cpufunc_nullop, 291 292 /* MMU functions */ 293 294 .cf_control = cpufunc_control, 295 .cf_domains = cpufunc_domains, 296 .cf_setttb = arm67_setttb, 297 .cf_faultstatus = cpufunc_faultstatus, 298 .cf_faultaddress = cpufunc_faultaddress, 299 300 /* TLB functions */ 301 302 .cf_tlb_flushID = arm67_tlb_flush, 303 .cf_tlb_flushID_SE = arm67_tlb_purge, 304 .cf_tlb_flushI = arm67_tlb_flush, 305 .cf_tlb_flushI_SE = arm67_tlb_purge, 306 .cf_tlb_flushD = arm67_tlb_flush, 307 .cf_tlb_flushD_SE = arm67_tlb_purge, 308 309 /* Cache operations */ 310 311 .cf_icache_sync_all = cpufunc_nullop, 312 .cf_icache_sync_range = (void *) cpufunc_nullop, 313 314 .cf_dcache_wbinv_all = arm67_cache_flush, 315 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 316 .cf_dcache_inv_range = (void *)arm67_cache_flush, 317 .cf_dcache_wb_range = (void *)cpufunc_nullop, 318 319 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 320 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 321 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 322 323 .cf_idcache_wbinv_all = arm67_cache_flush, 324 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 325 326 /* Other functions */ 327 328 .cf_flush_prefetchbuf = cpufunc_nullop, 329 .cf_drain_writebuf = cpufunc_nullop, 330 .cf_flush_brnchtgt_C = cpufunc_nullop, 331 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 332 333 .cf_sleep = (void *)cpufunc_nullop, 334 335 /* Soft functions */ 336 337 #ifdef ARM6_LATE_ABORT 338 .cf_dataabt_fixup = late_abort_fixup, 339 #else 340 .cf_dataabt_fixup = early_abort_fixup, 341 #endif 342 .cf_prefetchabt_fixup = cpufunc_null_fixup, 343 344 .cf_context_switch = arm67_context_switch, 345 346 .cf_setup = arm6_setup 347 348 }; 349 #endif /* CPU_ARM6 */ 350 351 #ifdef CPU_ARM7 352 struct cpu_functions arm7_cpufuncs = { 353 /* CPU functions */ 354 355 .cf_id = cpufunc_id, 356 .cf_cpwait = cpufunc_nullop, 357 358 /* MMU functions */ 359 360 .cf_control = cpufunc_control, 361 .cf_domains = cpufunc_domains, 362 .cf_setttb = arm67_setttb, 363 .cf_faultstatus = cpufunc_faultstatus, 364 .cf_faultaddress = cpufunc_faultaddress, 365 366 /* TLB functions */ 367 368 .cf_tlb_flushID = arm67_tlb_flush, 369 .cf_tlb_flushID_SE = arm67_tlb_purge, 370 .cf_tlb_flushI = arm67_tlb_flush, 371 .cf_tlb_flushI_SE = arm67_tlb_purge, 372 .cf_tlb_flushD = arm67_tlb_flush, 373 .cf_tlb_flushD_SE = arm67_tlb_purge, 374 375 /* Cache operations */ 376 377 .cf_icache_sync_all = cpufunc_nullop, 378 .cf_icache_sync_range = (void *)cpufunc_nullop, 379 380 .cf_dcache_wbinv_all = arm67_cache_flush, 381 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 382 .cf_dcache_inv_range = (void *)arm67_cache_flush, 383 .cf_dcache_wb_range = (void *)cpufunc_nullop, 384 385 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 386 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 387 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 388 389 .cf_idcache_wbinv_all = arm67_cache_flush, 390 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 391 392 /* Other functions */ 393 394 .cf_flush_prefetchbuf = cpufunc_nullop, 395 .cf_drain_writebuf = cpufunc_nullop, 396 .cf_flush_brnchtgt_C = cpufunc_nullop, 397 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 398 399 .cf_sleep = (void *)cpufunc_nullop, 400 401 /* Soft functions */ 402 403 .cf_dataabt_fixup = late_abort_fixup, 404 .cf_prefetchabt_fixup = cpufunc_null_fixup, 405 406 .cf_context_switch = arm67_context_switch, 407 408 .cf_setup = arm7_setup 409 410 }; 411 #endif /* CPU_ARM7 */ 412 413 #ifdef CPU_ARM7TDMI 414 struct cpu_functions arm7tdmi_cpufuncs = { 415 /* CPU functions */ 416 417 .cf_id = cpufunc_id, 418 .cf_cpwait = cpufunc_nullop, 419 420 /* MMU functions */ 421 422 .cf_control = cpufunc_control, 423 .cf_domains = cpufunc_domains, 424 .cf_setttb = arm7tdmi_setttb, 425 .cf_faultstatus = cpufunc_faultstatus, 426 .cf_faultaddress = cpufunc_faultaddress, 427 428 /* TLB functions */ 429 430 .cf_tlb_flushID = arm7tdmi_tlb_flushID, 431 .cf_tlb_flushID_SE = arm7tdmi_tlb_flushID_SE, 432 .cf_tlb_flushI = arm7tdmi_tlb_flushID, 433 .cf_tlb_flushI_SE = arm7tdmi_tlb_flushID_SE, 434 .cf_tlb_flushD = arm7tdmi_tlb_flushID, 435 .cf_tlb_flushD_SE = arm7tdmi_tlb_flushID_SE, 436 437 /* Cache operations */ 438 439 .cf_icache_sync_all = cpufunc_nullop, 440 .cf_icache_sync_range = (void *)cpufunc_nullop, 441 442 .cf_dcache_wbinv_all = arm7tdmi_cache_flushID, 443 .cf_dcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 444 .cf_dcache_inv_range = (void *)arm7tdmi_cache_flushID, 445 .cf_dcache_wb_range = (void *)cpufunc_nullop, 446 447 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 448 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 449 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 450 451 .cf_idcache_wbinv_all = arm7tdmi_cache_flushID, 452 .cf_idcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 453 454 /* Other functions */ 455 456 .cf_flush_prefetchbuf = cpufunc_nullop, 457 .cf_drain_writebuf = cpufunc_nullop, 458 .cf_flush_brnchtgt_C = cpufunc_nullop, 459 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 460 461 .cf_sleep = (void *)cpufunc_nullop, 462 463 /* Soft functions */ 464 465 .cf_dataabt_fixup = late_abort_fixup, 466 .cf_prefetchabt_fixup = cpufunc_null_fixup, 467 468 .cf_context_switch = arm7tdmi_context_switch, 469 470 .cf_setup = arm7tdmi_setup 471 472 }; 473 #endif /* CPU_ARM7TDMI */ 474 475 #ifdef CPU_ARM8 476 struct cpu_functions arm8_cpufuncs = { 477 /* CPU functions */ 478 479 .cf_id = cpufunc_id, 480 .cf_cpwait = cpufunc_nullop, 481 482 /* MMU functions */ 483 484 .cf_control = cpufunc_control, 485 .cf_domains = cpufunc_domains, 486 .cf_setttb = arm8_setttb, 487 .cf_faultstatus = cpufunc_faultstatus, 488 .cf_faultaddress = cpufunc_faultaddress, 489 490 /* TLB functions */ 491 492 .cf_tlb_flushID = arm8_tlb_flushID, 493 .cf_tlb_flushID_SE = arm8_tlb_flushID_SE, 494 .cf_tlb_flushI = arm8_tlb_flushID, 495 .cf_tlb_flushI_SE = arm8_tlb_flushID_SE, 496 .cf_tlb_flushD = arm8_tlb_flushID, 497 .cf_tlb_flushD_SE = arm8_tlb_flushID_SE, 498 499 /* Cache operations */ 500 501 .cf_icache_sync_all = cpufunc_nullop, 502 .cf_icache_sync_range = (void *)cpufunc_nullop, 503 504 .cf_dcache_wbinv_all = arm8_cache_purgeID, 505 .cf_dcache_wbinv_range = (void *)arm8_cache_purgeID, 506 /*XXX*/ .cf_dcache_inv_range = (void *)arm8_cache_purgeID, 507 .cf_dcache_wb_range = (void *)arm8_cache_cleanID, 508 509 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 510 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 511 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 512 513 .cf_idcache_wbinv_all = arm8_cache_purgeID, 514 .cf_idcache_wbinv_range = (void *)arm8_cache_purgeID, 515 516 /* Other functions */ 517 518 .cf_flush_prefetchbuf = cpufunc_nullop, 519 .cf_drain_writebuf = cpufunc_nullop, 520 .cf_flush_brnchtgt_C = cpufunc_nullop, 521 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 522 523 .cf_sleep = (void *)cpufunc_nullop, 524 525 /* Soft functions */ 526 527 .cf_dataabt_fixup = cpufunc_null_fixup, 528 .cf_prefetchabt_fixup = cpufunc_null_fixup, 529 530 .cf_context_switch = arm8_context_switch, 531 532 .cf_setup = arm8_setup 533 }; 534 #endif /* CPU_ARM8 */ 535 536 #ifdef CPU_ARM9 537 struct cpu_functions arm9_cpufuncs = { 538 /* CPU functions */ 539 540 .cf_id = cpufunc_id, 541 .cf_cpwait = cpufunc_nullop, 542 543 /* MMU functions */ 544 545 .cf_control = cpufunc_control, 546 .cf_domains = cpufunc_domains, 547 .cf_setttb = arm9_setttb, 548 .cf_faultstatus = cpufunc_faultstatus, 549 .cf_faultaddress = cpufunc_faultaddress, 550 551 /* TLB functions */ 552 553 .cf_tlb_flushID = armv4_tlb_flushID, 554 .cf_tlb_flushID_SE = arm9_tlb_flushID_SE, 555 .cf_tlb_flushI = armv4_tlb_flushI, 556 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 557 .cf_tlb_flushD = armv4_tlb_flushD, 558 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 559 560 /* Cache operations */ 561 562 .cf_icache_sync_all = arm9_icache_sync_all, 563 .cf_icache_sync_range = arm9_icache_sync_range, 564 565 .cf_dcache_wbinv_all = arm9_dcache_wbinv_all, 566 .cf_dcache_wbinv_range = arm9_dcache_wbinv_range, 567 /*XXX*/ .cf_dcache_inv_range = arm9_dcache_wbinv_range, 568 .cf_dcache_wb_range = arm9_dcache_wb_range, 569 570 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 571 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 572 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 573 574 .cf_idcache_wbinv_all = arm9_idcache_wbinv_all, 575 .cf_idcache_wbinv_range = arm9_idcache_wbinv_range, 576 577 /* Other functions */ 578 579 .cf_flush_prefetchbuf = cpufunc_nullop, 580 .cf_drain_writebuf = armv4_drain_writebuf, 581 .cf_flush_brnchtgt_C = cpufunc_nullop, 582 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 583 584 .cf_sleep = (void *)cpufunc_nullop, 585 586 /* Soft functions */ 587 588 .cf_dataabt_fixup = cpufunc_null_fixup, 589 .cf_prefetchabt_fixup = cpufunc_null_fixup, 590 591 .cf_context_switch = arm9_context_switch, 592 593 .cf_setup = arm9_setup 594 595 }; 596 #endif /* CPU_ARM9 */ 597 598 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 599 struct cpu_functions armv5_ec_cpufuncs = { 600 /* CPU functions */ 601 602 .cf_id = cpufunc_id, 603 .cf_cpwait = cpufunc_nullop, 604 605 /* MMU functions */ 606 607 .cf_control = cpufunc_control, 608 .cf_domains = cpufunc_domains, 609 .cf_setttb = armv5_ec_setttb, 610 .cf_faultstatus = cpufunc_faultstatus, 611 .cf_faultaddress = cpufunc_faultaddress, 612 613 /* TLB functions */ 614 615 .cf_tlb_flushID = armv4_tlb_flushID, 616 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 617 .cf_tlb_flushI = armv4_tlb_flushI, 618 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 619 .cf_tlb_flushD = armv4_tlb_flushD, 620 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 621 622 /* Cache operations */ 623 624 .cf_icache_sync_all = armv5_ec_icache_sync_all, 625 .cf_icache_sync_range = armv5_ec_icache_sync_range, 626 627 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all, 628 .cf_dcache_wbinv_range = armv5_ec_dcache_wbinv_range, 629 /*XXX*/ .cf_dcache_inv_range = armv5_ec_dcache_wbinv_range, 630 .cf_dcache_wb_range = armv5_ec_dcache_wb_range, 631 632 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 633 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 634 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 635 636 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all, 637 .cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range, 638 639 /* Other functions */ 640 641 .cf_flush_prefetchbuf = cpufunc_nullop, 642 .cf_drain_writebuf = armv4_drain_writebuf, 643 .cf_flush_brnchtgt_C = cpufunc_nullop, 644 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 645 646 .cf_sleep = (void *)cpufunc_nullop, 647 648 /* Soft functions */ 649 650 .cf_dataabt_fixup = cpufunc_null_fixup, 651 .cf_prefetchabt_fixup = cpufunc_null_fixup, 652 653 .cf_context_switch = arm10_context_switch, 654 655 .cf_setup = arm10_setup 656 657 }; 658 #endif /* CPU_ARM9E || CPU_ARM10 */ 659 660 #ifdef CPU_ARM10 661 struct cpu_functions arm10_cpufuncs = { 662 /* CPU functions */ 663 664 .cf_id = cpufunc_id, 665 .cf_cpwait = cpufunc_nullop, 666 667 /* MMU functions */ 668 669 .cf_control = cpufunc_control, 670 .cf_domains = cpufunc_domains, 671 .cf_setttb = armv5_setttb, 672 .cf_faultstatus = cpufunc_faultstatus, 673 .cf_faultaddress = cpufunc_faultaddress, 674 675 /* TLB functions */ 676 677 .cf_tlb_flushID = armv4_tlb_flushID, 678 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 679 .cf_tlb_flushI = armv4_tlb_flushI, 680 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 681 .cf_tlb_flushD = armv4_tlb_flushD, 682 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 683 684 /* Cache operations */ 685 686 .cf_icache_sync_all = armv5_icache_sync_all, 687 .cf_icache_sync_range = armv5_icache_sync_range, 688 689 .cf_dcache_wbinv_all = armv5_dcache_wbinv_all, 690 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range, 691 /*XXX*/ .cf_dcache_inv_range = armv5_dcache_wbinv_range, 692 .cf_dcache_wb_range = armv5_dcache_wb_range, 693 694 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 695 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 696 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 697 698 .cf_idcache_wbinv_all = armv5_idcache_wbinv_all, 699 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range, 700 701 /* Other functions */ 702 703 .cf_flush_prefetchbuf = cpufunc_nullop, 704 .cf_drain_writebuf = armv4_drain_writebuf, 705 .cf_flush_brnchtgt_C = cpufunc_nullop, 706 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 707 708 .cf_sleep = (void *)cpufunc_nullop, 709 710 /* Soft functions */ 711 712 .cf_dataabt_fixup = cpufunc_null_fixup, 713 .cf_prefetchabt_fixup = cpufunc_null_fixup, 714 715 .cf_context_switch = arm10_context_switch, 716 717 .cf_setup = arm10_setup 718 719 }; 720 #endif /* CPU_ARM10 */ 721 722 #ifdef CPU_ARM11 723 struct cpu_functions arm11_cpufuncs = { 724 /* CPU functions */ 725 726 .cf_id = cpufunc_id, 727 .cf_cpwait = cpufunc_nullop, 728 729 /* MMU functions */ 730 731 .cf_control = cpufunc_control, 732 .cf_domains = cpufunc_domains, 733 .cf_setttb = arm11_setttb, 734 .cf_faultstatus = cpufunc_faultstatus, 735 .cf_faultaddress = cpufunc_faultaddress, 736 737 /* TLB functions */ 738 739 .cf_tlb_flushID = arm11_tlb_flushID, 740 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 741 .cf_tlb_flushI = arm11_tlb_flushI, 742 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 743 .cf_tlb_flushD = arm11_tlb_flushD, 744 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 745 746 /* Cache operations */ 747 748 .cf_icache_sync_all = armv6_icache_sync_all, 749 .cf_icache_sync_range = armv6_icache_sync_range, 750 751 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all, 752 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 753 .cf_dcache_inv_range = armv6_dcache_inv_range, 754 .cf_dcache_wb_range = armv6_dcache_wb_range, 755 756 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 757 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 758 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 759 760 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all, 761 .cf_idcache_wbinv_range = armv6_idcache_wbinv_range, 762 763 /* Other functions */ 764 765 .cf_flush_prefetchbuf = cpufunc_nullop, 766 .cf_drain_writebuf = arm11_drain_writebuf, 767 .cf_flush_brnchtgt_C = cpufunc_nullop, 768 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 769 770 .cf_sleep = arm11_sleep, 771 772 /* Soft functions */ 773 774 .cf_dataabt_fixup = cpufunc_null_fixup, 775 .cf_prefetchabt_fixup = cpufunc_null_fixup, 776 777 .cf_context_switch = arm11_context_switch, 778 779 .cf_setup = arm11_setup 780 781 }; 782 #endif /* CPU_ARM11 */ 783 784 #ifdef CPU_ARM1136 785 struct cpu_functions arm1136_cpufuncs = { 786 /* CPU functions */ 787 788 .cf_id = cpufunc_id, 789 .cf_cpwait = cpufunc_nullop, 790 791 /* MMU functions */ 792 793 .cf_control = cpufunc_control, 794 .cf_domains = cpufunc_domains, 795 .cf_setttb = arm11x6_setttb, 796 .cf_faultstatus = cpufunc_faultstatus, 797 .cf_faultaddress = cpufunc_faultaddress, 798 799 /* TLB functions */ 800 801 .cf_tlb_flushID = arm11_tlb_flushID, 802 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 803 .cf_tlb_flushI = arm11_tlb_flushI, 804 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 805 .cf_tlb_flushD = arm11_tlb_flushD, 806 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 807 808 /* Cache operations */ 809 810 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 411920 */ 811 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371025 */ 812 813 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 411920 */ 814 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 815 .cf_dcache_inv_range = armv6_dcache_inv_range, 816 .cf_dcache_wb_range = armv6_dcache_wb_range, 817 818 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 819 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 820 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 821 822 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 411920 */ 823 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371025 */ 824 825 /* Other functions */ 826 827 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf, 828 .cf_drain_writebuf = arm11_drain_writebuf, 829 .cf_flush_brnchtgt_C = cpufunc_nullop, 830 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 831 832 .cf_sleep = arm11_sleep, /* arm1136_sleep_rev0 */ 833 834 /* Soft functions */ 835 836 .cf_dataabt_fixup = cpufunc_null_fixup, 837 .cf_prefetchabt_fixup = cpufunc_null_fixup, 838 839 .cf_context_switch = arm11_context_switch, 840 841 .cf_setup = arm11x6_setup 842 843 }; 844 #endif /* CPU_ARM1136 */ 845 846 #ifdef CPU_ARM1176 847 struct cpu_functions arm1176_cpufuncs = { 848 /* CPU functions */ 849 850 .cf_id = cpufunc_id, 851 .cf_cpwait = cpufunc_nullop, 852 853 /* MMU functions */ 854 855 .cf_control = cpufunc_control, 856 .cf_domains = cpufunc_domains, 857 .cf_setttb = arm11x6_setttb, 858 .cf_faultstatus = cpufunc_faultstatus, 859 .cf_faultaddress = cpufunc_faultaddress, 860 861 /* TLB functions */ 862 863 .cf_tlb_flushID = arm11_tlb_flushID, 864 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 865 .cf_tlb_flushI = arm11_tlb_flushI, 866 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 867 .cf_tlb_flushD = arm11_tlb_flushD, 868 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 869 870 /* Cache operations */ 871 872 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 415045 */ 873 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371367 */ 874 875 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 415045 */ 876 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 877 .cf_dcache_inv_range = armv6_dcache_inv_range, 878 .cf_dcache_wb_range = armv6_dcache_wb_range, 879 880 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 881 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 882 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 883 884 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 415045 */ 885 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371367 */ 886 887 /* Other functions */ 888 889 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf, 890 .cf_drain_writebuf = arm11_drain_writebuf, 891 .cf_flush_brnchtgt_C = cpufunc_nullop, 892 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 893 894 .cf_sleep = arm11x6_sleep, /* no ref. */ 895 896 /* Soft functions */ 897 898 .cf_dataabt_fixup = cpufunc_null_fixup, 899 .cf_prefetchabt_fixup = cpufunc_null_fixup, 900 901 .cf_context_switch = arm11_context_switch, 902 903 .cf_setup = arm11x6_setup 904 905 }; 906 #endif /* CPU_ARM1176 */ 907 908 909 #ifdef CPU_ARM11MPCORE 910 struct cpu_functions arm11mpcore_cpufuncs = { 911 /* CPU functions */ 912 913 .cf_id = cpufunc_id, 914 .cf_cpwait = cpufunc_nullop, 915 916 /* MMU functions */ 917 918 .cf_control = cpufunc_control, 919 .cf_domains = cpufunc_domains, 920 .cf_setttb = arm11_setttb, 921 .cf_faultstatus = cpufunc_faultstatus, 922 .cf_faultaddress = cpufunc_faultaddress, 923 924 /* TLB functions */ 925 926 .cf_tlb_flushID = arm11_tlb_flushID, 927 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 928 .cf_tlb_flushI = arm11_tlb_flushI, 929 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 930 .cf_tlb_flushD = arm11_tlb_flushD, 931 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 932 933 /* Cache operations */ 934 935 .cf_icache_sync_all = armv6_icache_sync_all, 936 .cf_icache_sync_range = armv5_icache_sync_range, 937 938 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all, 939 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range, 940 .cf_dcache_inv_range = armv5_dcache_inv_range, 941 .cf_dcache_wb_range = armv5_dcache_wb_range, 942 943 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 944 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 945 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 946 947 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all, 948 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range, 949 950 /* Other functions */ 951 952 .cf_flush_prefetchbuf = cpufunc_nullop, 953 .cf_drain_writebuf = arm11_drain_writebuf, 954 .cf_flush_brnchtgt_C = cpufunc_nullop, 955 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 956 957 .cf_sleep = arm11_sleep, 958 959 /* Soft functions */ 960 961 .cf_dataabt_fixup = cpufunc_null_fixup, 962 .cf_prefetchabt_fixup = cpufunc_null_fixup, 963 964 .cf_context_switch = arm11_context_switch, 965 966 .cf_setup = arm11mpcore_setup 967 968 }; 969 #endif /* CPU_ARM11MPCORE */ 970 971 #ifdef CPU_SA110 972 struct cpu_functions sa110_cpufuncs = { 973 /* CPU functions */ 974 975 .cf_id = cpufunc_id, 976 .cf_cpwait = cpufunc_nullop, 977 978 /* MMU functions */ 979 980 .cf_control = cpufunc_control, 981 .cf_domains = cpufunc_domains, 982 .cf_setttb = sa1_setttb, 983 .cf_faultstatus = cpufunc_faultstatus, 984 .cf_faultaddress = cpufunc_faultaddress, 985 986 /* TLB functions */ 987 988 .cf_tlb_flushID = armv4_tlb_flushID, 989 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 990 .cf_tlb_flushI = armv4_tlb_flushI, 991 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 992 .cf_tlb_flushD = armv4_tlb_flushD, 993 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 994 995 /* Cache operations */ 996 997 .cf_icache_sync_all = sa1_cache_syncI, 998 .cf_icache_sync_range = sa1_cache_syncI_rng, 999 1000 .cf_dcache_wbinv_all = sa1_cache_purgeD, 1001 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 1002 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 1003 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 1004 1005 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1006 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1007 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1008 1009 .cf_idcache_wbinv_all = sa1_cache_purgeID, 1010 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 1011 1012 /* Other functions */ 1013 1014 .cf_flush_prefetchbuf = cpufunc_nullop, 1015 .cf_drain_writebuf = armv4_drain_writebuf, 1016 .cf_flush_brnchtgt_C = cpufunc_nullop, 1017 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1018 1019 .cf_sleep = (void *)cpufunc_nullop, 1020 1021 /* Soft functions */ 1022 1023 .cf_dataabt_fixup = cpufunc_null_fixup, 1024 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1025 1026 .cf_context_switch = sa110_context_switch, 1027 1028 .cf_setup = sa110_setup 1029 }; 1030 #endif /* CPU_SA110 */ 1031 1032 #if defined(CPU_SA1100) || defined(CPU_SA1110) 1033 struct cpu_functions sa11x0_cpufuncs = { 1034 /* CPU functions */ 1035 1036 .cf_id = cpufunc_id, 1037 .cf_cpwait = cpufunc_nullop, 1038 1039 /* MMU functions */ 1040 1041 .cf_control = cpufunc_control, 1042 .cf_domains = cpufunc_domains, 1043 .cf_setttb = sa1_setttb, 1044 .cf_faultstatus = cpufunc_faultstatus, 1045 .cf_faultaddress = cpufunc_faultaddress, 1046 1047 /* TLB functions */ 1048 1049 .cf_tlb_flushID = armv4_tlb_flushID, 1050 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 1051 .cf_tlb_flushI = armv4_tlb_flushI, 1052 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 1053 .cf_tlb_flushD = armv4_tlb_flushD, 1054 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1055 1056 /* Cache operations */ 1057 1058 .cf_icache_sync_all = sa1_cache_syncI, 1059 .cf_icache_sync_range = sa1_cache_syncI_rng, 1060 1061 .cf_dcache_wbinv_all = sa1_cache_purgeD, 1062 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 1063 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 1064 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 1065 1066 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1067 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1068 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1069 1070 .cf_idcache_wbinv_all = sa1_cache_purgeID, 1071 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 1072 1073 /* Other functions */ 1074 1075 .cf_flush_prefetchbuf = sa11x0_drain_readbuf, 1076 .cf_drain_writebuf = armv4_drain_writebuf, 1077 .cf_flush_brnchtgt_C = cpufunc_nullop, 1078 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1079 1080 .cf_sleep = sa11x0_cpu_sleep, 1081 1082 /* Soft functions */ 1083 1084 .cf_dataabt_fixup = cpufunc_null_fixup, 1085 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1086 1087 .cf_context_switch = sa11x0_context_switch, 1088 1089 .cf_setup = sa11x0_setup 1090 }; 1091 #endif /* CPU_SA1100 || CPU_SA1110 */ 1092 1093 #if defined(CPU_FA526) 1094 struct cpu_functions fa526_cpufuncs = { 1095 /* CPU functions */ 1096 1097 .cf_id = cpufunc_id, 1098 .cf_cpwait = cpufunc_nullop, 1099 1100 /* MMU functions */ 1101 1102 .cf_control = cpufunc_control, 1103 .cf_domains = cpufunc_domains, 1104 .cf_setttb = fa526_setttb, 1105 .cf_faultstatus = cpufunc_faultstatus, 1106 .cf_faultaddress = cpufunc_faultaddress, 1107 1108 /* TLB functions */ 1109 1110 .cf_tlb_flushID = armv4_tlb_flushID, 1111 .cf_tlb_flushID_SE = fa526_tlb_flushID_SE, 1112 .cf_tlb_flushI = armv4_tlb_flushI, 1113 .cf_tlb_flushI_SE = fa526_tlb_flushI_SE, 1114 .cf_tlb_flushD = armv4_tlb_flushD, 1115 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1116 1117 /* Cache operations */ 1118 1119 .cf_icache_sync_all = fa526_icache_sync_all, 1120 .cf_icache_sync_range = fa526_icache_sync_range, 1121 1122 .cf_dcache_wbinv_all = fa526_dcache_wbinv_all, 1123 .cf_dcache_wbinv_range = fa526_dcache_wbinv_range, 1124 .cf_dcache_inv_range = fa526_dcache_inv_range, 1125 .cf_dcache_wb_range = fa526_dcache_wb_range, 1126 1127 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1128 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1129 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1130 1131 .cf_idcache_wbinv_all = fa526_idcache_wbinv_all, 1132 .cf_idcache_wbinv_range = fa526_idcache_wbinv_range, 1133 1134 /* Other functions */ 1135 1136 .cf_flush_prefetchbuf = fa526_flush_prefetchbuf, 1137 .cf_drain_writebuf = armv4_drain_writebuf, 1138 .cf_flush_brnchtgt_C = cpufunc_nullop, 1139 .cf_flush_brnchtgt_E = fa526_flush_brnchtgt_E, 1140 1141 .cf_sleep = fa526_cpu_sleep, 1142 1143 /* Soft functions */ 1144 1145 .cf_dataabt_fixup = cpufunc_null_fixup, 1146 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1147 1148 .cf_context_switch = fa526_context_switch, 1149 1150 .cf_setup = fa526_setup 1151 }; 1152 #endif /* CPU_FA526 */ 1153 1154 #ifdef CPU_IXP12X0 1155 struct cpu_functions ixp12x0_cpufuncs = { 1156 /* CPU functions */ 1157 1158 .cf_id = cpufunc_id, 1159 .cf_cpwait = cpufunc_nullop, 1160 1161 /* MMU functions */ 1162 1163 .cf_control = cpufunc_control, 1164 .cf_domains = cpufunc_domains, 1165 .cf_setttb = sa1_setttb, 1166 .cf_faultstatus = cpufunc_faultstatus, 1167 .cf_faultaddress = cpufunc_faultaddress, 1168 1169 /* TLB functions */ 1170 1171 .cf_tlb_flushID = armv4_tlb_flushID, 1172 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 1173 .cf_tlb_flushI = armv4_tlb_flushI, 1174 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 1175 .cf_tlb_flushD = armv4_tlb_flushD, 1176 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1177 1178 /* Cache operations */ 1179 1180 .cf_icache_sync_all = sa1_cache_syncI, 1181 .cf_icache_sync_range = sa1_cache_syncI_rng, 1182 1183 .cf_dcache_wbinv_all = sa1_cache_purgeD, 1184 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 1185 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 1186 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 1187 1188 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1189 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1190 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1191 1192 .cf_idcache_wbinv_all = sa1_cache_purgeID, 1193 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 1194 1195 /* Other functions */ 1196 1197 .cf_flush_prefetchbuf = ixp12x0_drain_readbuf, 1198 .cf_drain_writebuf = armv4_drain_writebuf, 1199 .cf_flush_brnchtgt_C = cpufunc_nullop, 1200 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1201 1202 .cf_sleep = (void *)cpufunc_nullop, 1203 1204 /* Soft functions */ 1205 1206 .cf_dataabt_fixup = cpufunc_null_fixup, 1207 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1208 1209 .cf_context_switch = ixp12x0_context_switch, 1210 1211 .cf_setup = ixp12x0_setup 1212 }; 1213 #endif /* CPU_IXP12X0 */ 1214 1215 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1216 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) 1217 struct cpu_functions xscale_cpufuncs = { 1218 /* CPU functions */ 1219 1220 .cf_id = cpufunc_id, 1221 .cf_cpwait = xscale_cpwait, 1222 1223 /* MMU functions */ 1224 1225 .cf_control = xscale_control, 1226 .cf_domains = cpufunc_domains, 1227 .cf_setttb = xscale_setttb, 1228 .cf_faultstatus = cpufunc_faultstatus, 1229 .cf_faultaddress = cpufunc_faultaddress, 1230 1231 /* TLB functions */ 1232 1233 .cf_tlb_flushID = armv4_tlb_flushID, 1234 .cf_tlb_flushID_SE = xscale_tlb_flushID_SE, 1235 .cf_tlb_flushI = armv4_tlb_flushI, 1236 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 1237 .cf_tlb_flushD = armv4_tlb_flushD, 1238 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1239 1240 /* Cache operations */ 1241 1242 .cf_icache_sync_all = xscale_cache_syncI, 1243 .cf_icache_sync_range = xscale_cache_syncI_rng, 1244 1245 .cf_dcache_wbinv_all = xscale_cache_purgeD, 1246 .cf_dcache_wbinv_range = xscale_cache_purgeD_rng, 1247 .cf_dcache_inv_range = xscale_cache_flushD_rng, 1248 .cf_dcache_wb_range = xscale_cache_cleanD_rng, 1249 1250 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1251 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1252 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1253 1254 .cf_idcache_wbinv_all = xscale_cache_purgeID, 1255 .cf_idcache_wbinv_range = xscale_cache_purgeID_rng, 1256 1257 /* Other functions */ 1258 1259 .cf_flush_prefetchbuf = cpufunc_nullop, 1260 .cf_drain_writebuf = armv4_drain_writebuf, 1261 .cf_flush_brnchtgt_C = cpufunc_nullop, 1262 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1263 1264 .cf_sleep = xscale_cpu_sleep, 1265 1266 /* Soft functions */ 1267 1268 .cf_dataabt_fixup = cpufunc_null_fixup, 1269 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1270 1271 .cf_context_switch = xscale_context_switch, 1272 1273 .cf_setup = xscale_setup 1274 }; 1275 #endif 1276 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 1277 1278 #if defined(CPU_CORTEX) 1279 struct cpu_functions cortex_cpufuncs = { 1280 /* CPU functions */ 1281 1282 .cf_id = cpufunc_id, 1283 .cf_cpwait = cpufunc_nullop, 1284 1285 /* MMU functions */ 1286 1287 .cf_control = cpufunc_control, 1288 .cf_domains = cpufunc_domains, 1289 .cf_setttb = armv7_setttb, 1290 .cf_faultstatus = cpufunc_faultstatus, 1291 .cf_faultaddress = cpufunc_faultaddress, 1292 1293 /* TLB functions */ 1294 1295 .cf_tlb_flushID = armv7_tlb_flushID, 1296 .cf_tlb_flushID_SE = armv7_tlb_flushID_SE, 1297 .cf_tlb_flushI = armv7_tlb_flushI, 1298 .cf_tlb_flushI_SE = armv7_tlb_flushI_SE, 1299 .cf_tlb_flushD = armv7_tlb_flushD, 1300 .cf_tlb_flushD_SE = armv7_tlb_flushD_SE, 1301 1302 /* Cache operations */ 1303 1304 .cf_icache_sync_all = armv7_icache_sync_all, 1305 .cf_dcache_wbinv_all = armv7_dcache_wbinv_all, 1306 1307 .cf_dcache_inv_range = armv7_dcache_inv_range, 1308 .cf_dcache_wb_range = armv7_dcache_wb_range, 1309 .cf_dcache_wbinv_range = armv7_dcache_wbinv_range, 1310 1311 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1312 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1313 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1314 1315 .cf_icache_sync_range = armv7_icache_sync_range, 1316 .cf_idcache_wbinv_range = armv7_idcache_wbinv_range, 1317 1318 1319 .cf_idcache_wbinv_all = armv7_idcache_wbinv_all, 1320 1321 /* Other functions */ 1322 1323 .cf_flush_prefetchbuf = cpufunc_nullop, 1324 .cf_drain_writebuf = armv7_drain_writebuf, 1325 .cf_flush_brnchtgt_C = cpufunc_nullop, 1326 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1327 1328 .cf_sleep = armv7_cpu_sleep, 1329 1330 /* Soft functions */ 1331 1332 .cf_dataabt_fixup = cpufunc_null_fixup, 1333 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1334 1335 .cf_context_switch = armv7_context_switch, 1336 1337 .cf_setup = armv7_setup 1338 1339 }; 1340 #endif /* CPU_CORTEX */ 1341 1342 #ifdef CPU_PJ4B 1343 struct cpu_functions pj4bv7_cpufuncs = { 1344 /* CPU functions */ 1345 1346 .cf_id = cpufunc_id, 1347 .cf_cpwait = pj4b_drain_writebuf, 1348 1349 /* MMU functions */ 1350 1351 .cf_control = cpufunc_control, 1352 .cf_domains = cpufunc_domains, 1353 .cf_setttb = pj4b_setttb, 1354 .cf_faultstatus = cpufunc_faultstatus, 1355 .cf_faultaddress = cpufunc_faultaddress, 1356 1357 /* TLB functions */ 1358 1359 .cf_tlb_flushID = pj4b_tlb_flushID, 1360 .cf_tlb_flushID_SE = pj4b_tlb_flushID_SE, 1361 .cf_tlb_flushI = pj4b_tlb_flushID, 1362 .cf_tlb_flushI_SE = pj4b_tlb_flushID_SE, 1363 .cf_tlb_flushD = pj4b_tlb_flushID, 1364 .cf_tlb_flushD_SE = pj4b_tlb_flushID_SE, 1365 1366 /* Cache operations */ 1367 1368 .cf_icache_sync_all = armv7_idcache_wbinv_all, 1369 .cf_icache_sync_range = pj4b_icache_sync_range, 1370 1371 .cf_dcache_wbinv_all = armv7_dcache_wbinv_all, 1372 .cf_dcache_wbinv_range = pj4b_dcache_wbinv_range, 1373 .cf_dcache_inv_range = pj4b_dcache_inv_range, 1374 .cf_dcache_wb_range = pj4b_dcache_wb_range, 1375 1376 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1377 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1378 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1379 1380 .cf_idcache_wbinv_all = armv7_idcache_wbinv_all, 1381 .cf_idcache_wbinv_range = pj4b_idcache_wbinv_range, 1382 1383 /* Other functions */ 1384 1385 .cf_flush_prefetchbuf = pj4b_drain_readbuf, 1386 .cf_drain_writebuf = pj4b_drain_writebuf, 1387 .cf_flush_brnchtgt_C = pj4b_flush_brnchtgt_all, 1388 .cf_flush_brnchtgt_E = pj4b_flush_brnchtgt_va, 1389 1390 .cf_sleep = (void *)cpufunc_nullop, 1391 1392 /* Soft functions */ 1393 1394 .cf_dataabt_fixup = cpufunc_null_fixup, 1395 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1396 1397 .cf_context_switch = pj4b_context_switch, 1398 1399 .cf_setup = pj4bv7_setup 1400 }; 1401 #endif /* CPU_PJ4B */ 1402 1403 #ifdef CPU_SHEEVA 1404 struct cpu_functions sheeva_cpufuncs = { 1405 /* CPU functions */ 1406 1407 .cf_id = cpufunc_id, 1408 .cf_cpwait = cpufunc_nullop, 1409 1410 /* MMU functions */ 1411 1412 .cf_control = cpufunc_control, 1413 .cf_domains = cpufunc_domains, 1414 .cf_setttb = armv5_ec_setttb, 1415 .cf_faultstatus = cpufunc_faultstatus, 1416 .cf_faultaddress = cpufunc_faultaddress, 1417 1418 /* TLB functions */ 1419 1420 .cf_tlb_flushID = armv4_tlb_flushID, 1421 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 1422 .cf_tlb_flushI = armv4_tlb_flushI, 1423 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 1424 .cf_tlb_flushD = armv4_tlb_flushD, 1425 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1426 1427 /* Cache operations */ 1428 1429 .cf_icache_sync_all = armv5_ec_icache_sync_all, 1430 .cf_icache_sync_range = armv5_ec_icache_sync_range, 1431 1432 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all, 1433 .cf_dcache_wbinv_range = sheeva_dcache_wbinv_range, 1434 .cf_dcache_inv_range = sheeva_dcache_inv_range, 1435 .cf_dcache_wb_range = sheeva_dcache_wb_range, 1436 1437 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1438 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1439 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1440 1441 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all, 1442 .cf_idcache_wbinv_range = sheeva_idcache_wbinv_range, 1443 1444 /* Other functions */ 1445 1446 .cf_flush_prefetchbuf = cpufunc_nullop, 1447 .cf_drain_writebuf = armv4_drain_writebuf, 1448 .cf_flush_brnchtgt_C = cpufunc_nullop, 1449 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1450 1451 .cf_sleep = (void *)sheeva_cpu_sleep, 1452 1453 /* Soft functions */ 1454 1455 .cf_dataabt_fixup = cpufunc_null_fixup, 1456 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1457 1458 .cf_context_switch = arm10_context_switch, 1459 1460 .cf_setup = sheeva_setup 1461 }; 1462 #endif /* CPU_SHEEVA */ 1463 1464 1465 /* 1466 * Global constants also used by locore.s 1467 */ 1468 1469 struct cpu_functions cpufuncs; 1470 u_int cputype; 1471 1472 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \ 1473 defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_ARM11) || \ 1474 defined(CPU_FA526) || \ 1475 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1476 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 1477 defined(CPU_CORTEX) || defined(CPU_PJ4B) || defined(CPU_SHEEVA) 1478 static void get_cachetype_cp15(void); 1479 1480 /* Additional cache information local to this file. Log2 of some of the 1481 above numbers. */ 1482 static int arm_dcache_log2_nsets; 1483 static int arm_dcache_log2_assoc; 1484 static int arm_dcache_log2_linesize; 1485 1486 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1487 static inline u_int 1488 get_cachesize_cp15(int cssr) 1489 { 1490 u_int csid; 1491 1492 #if ((CPU_CORTEX) > 0) || defined(CPU_PJ4B) 1493 __asm volatile(".arch\tarmv7a"); 1494 __asm volatile("mcr p15, 2, %0, c0, c0, 0" :: "r" (cssr)); 1495 __asm volatile("isb"); /* sync to the new cssr */ 1496 #else 1497 __asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr)); 1498 #endif 1499 __asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid)); 1500 return csid; 1501 } 1502 #endif 1503 1504 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1505 static void 1506 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr) 1507 { 1508 u_int csid; 1509 u_int nsets; 1510 1511 if (clidr & 6) { 1512 csid = get_cachesize_cp15(level << 1); /* select dcache values */ 1513 nsets = CPU_CSID_NUMSETS(csid) + 1; 1514 info->dcache_ways = CPU_CSID_ASSOC(csid) + 1; 1515 info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4); 1516 info->dcache_size = info->dcache_line_size * info->dcache_ways * nsets; 1517 1518 if (level == 0) { 1519 arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1; 1520 arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4; 1521 arm_dcache_log2_nsets = 31 - __builtin_clz(nsets*2-1); 1522 } 1523 } 1524 1525 info->cache_unified = (clidr == 4); 1526 1527 if (level > 0) { 1528 info->dcache_type = CACHE_TYPE_PIPT; 1529 info->icache_type = CACHE_TYPE_PIPT; 1530 } 1531 1532 if (info->cache_unified) { 1533 info->icache_ways = info->dcache_ways; 1534 info->icache_line_size = info->dcache_line_size; 1535 info->icache_size = info->dcache_size; 1536 } else { 1537 csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select icache values */ 1538 nsets = CPU_CSID_NUMSETS(csid) + 1; 1539 info->icache_ways = CPU_CSID_ASSOC(csid) + 1; 1540 info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4); 1541 info->icache_size = info->icache_line_size * info->icache_ways * nsets; 1542 } 1543 if (level == 0 1544 && info->dcache_size / info->dcache_ways <= PAGE_SIZE 1545 && info->icache_size / info->icache_ways <= PAGE_SIZE) { 1546 arm_cache_prefer_mask = 0; 1547 } 1548 } 1549 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */ 1550 1551 static void 1552 get_cachetype_cp15(void) 1553 { 1554 u_int ctype, isize, dsize; 1555 u_int multiplier; 1556 1557 __asm volatile("mrc p15, 0, %0, c0, c0, 1" 1558 : "=r" (ctype)); 1559 1560 /* 1561 * ...and thus spake the ARM ARM: 1562 * 1563 * If an <opcode2> value corresponding to an unimplemented or 1564 * reserved ID register is encountered, the System Control 1565 * processor returns the value of the main ID register. 1566 */ 1567 if (ctype == cpu_id()) 1568 goto out; 1569 1570 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1571 if (CPU_CT_FORMAT(ctype) == 4) { 1572 u_int clidr = armreg_clidr_read(); 1573 1574 if (CPU_CT4_L1IPOLICY(ctype) == CPU_CT4_L1_PIPT) { 1575 arm_pcache.icache_type = CACHE_TYPE_PIPT; 1576 } else { 1577 arm_pcache.icache_type = CACHE_TYPE_VIPT; 1578 arm_cache_prefer_mask = PAGE_SIZE; 1579 } 1580 #ifdef CPU_CORTEX 1581 if (CPU_ID_CORTEX_P(cpu_id())) { 1582 arm_pcache.dcache_type = CACHE_TYPE_PIPT; 1583 } else 1584 #endif 1585 { 1586 arm_pcache.dcache_type = CACHE_TYPE_VIPT; 1587 } 1588 arm_pcache.cache_type = CPU_CT_CTYPE_WB14; 1589 1590 get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7); 1591 arm_dcache_align = arm_pcache.dcache_line_size; 1592 clidr >>= 3; 1593 if (clidr & 7) { 1594 get_cacheinfo_clidr(&arm_scache, 1, clidr & 7); 1595 if (arm_scache.dcache_line_size < arm_dcache_align) 1596 arm_dcache_align = arm_scache.dcache_line_size; 1597 } 1598 if (arm_pcache.dcache_type == CACHE_TYPE_PIPT 1599 && arm_pcache.icache_type == CACHE_TYPE_PIPT) { 1600 arm_cache_prefer_mask = 0; 1601 } 1602 goto out; 1603 } 1604 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */ 1605 1606 if ((ctype & CPU_CT_S) == 0) 1607 arm_pcache.cache_unified = 1; 1608 1609 /* 1610 * If you want to know how this code works, go read the ARM ARM. 1611 */ 1612 1613 arm_pcache.cache_type = CPU_CT_CTYPE(ctype); 1614 1615 if (arm_pcache.cache_unified == 0) { 1616 isize = CPU_CT_ISIZE(ctype); 1617 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; 1618 arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); 1619 if (CPU_CT_xSIZE_ASSOC(isize) == 0) { 1620 if (isize & CPU_CT_xSIZE_M) 1621 arm_pcache.icache_line_size = 0; /* not present */ 1622 else 1623 arm_pcache.icache_ways = 1; 1624 } else { 1625 arm_pcache.icache_ways = multiplier << 1626 (CPU_CT_xSIZE_ASSOC(isize) - 1); 1627 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1628 arm_pcache.icache_type = CACHE_TYPE_VIPT; 1629 if (CPU_CT_xSIZE_P & isize) 1630 arm_cache_prefer_mask |= 1631 __BIT(9 + CPU_CT_xSIZE_SIZE(isize) 1632 - CPU_CT_xSIZE_ASSOC(isize)) 1633 - PAGE_SIZE; 1634 #endif 1635 } 1636 arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); 1637 } 1638 1639 dsize = CPU_CT_DSIZE(ctype); 1640 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; 1641 arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); 1642 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { 1643 if (dsize & CPU_CT_xSIZE_M) 1644 arm_pcache.dcache_line_size = 0; /* not present */ 1645 else 1646 arm_pcache.dcache_ways = 1; 1647 } else { 1648 arm_pcache.dcache_ways = multiplier << 1649 (CPU_CT_xSIZE_ASSOC(dsize) - 1); 1650 #if (ARM_MMU_V6) > 0 1651 arm_pcache.dcache_type = CACHE_TYPE_VIPT; 1652 if ((CPU_CT_xSIZE_P & dsize) 1653 && CPU_ID_ARM11_P(curcpu()->ci_arm_cpuid)) { 1654 arm_cache_prefer_mask |= 1655 __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) 1656 - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE; 1657 } 1658 #endif 1659 } 1660 arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); 1661 1662 arm_dcache_align = arm_pcache.dcache_line_size; 1663 1664 arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; 1665 arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; 1666 arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - 1667 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); 1668 1669 out: 1670 KASSERTMSG(arm_dcache_align <= CACHE_LINE_SIZE, 1671 "arm_dcache_align=%u CACHE_LINE_SIZE=%u", 1672 arm_dcache_align, CACHE_LINE_SIZE); 1673 arm_dcache_align_mask = arm_dcache_align - 1; 1674 } 1675 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */ 1676 1677 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 1678 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \ 1679 defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0) 1680 /* Cache information for CPUs without cache type registers. */ 1681 struct cachetab { 1682 uint32_t ct_cpuid; 1683 int ct_pcache_type; 1684 int ct_pcache_unified; 1685 int ct_pdcache_size; 1686 int ct_pdcache_line_size; 1687 int ct_pdcache_ways; 1688 int ct_picache_size; 1689 int ct_picache_line_size; 1690 int ct_picache_ways; 1691 }; 1692 1693 struct cachetab cachetab[] = { 1694 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */ 1695 { CPU_ID_ARM2, 0, 1, 0, 0, 0, 0, 0, 0 }, 1696 { CPU_ID_ARM250, 0, 1, 0, 0, 0, 0, 0, 0 }, 1697 { CPU_ID_ARM3, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 1698 { CPU_ID_ARM610, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 1699 { CPU_ID_ARM710, CPU_CT_CTYPE_WT, 1, 8192, 32, 4, 0, 0, 0 }, 1700 { CPU_ID_ARM7500, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 1701 { CPU_ID_ARM710A, CPU_CT_CTYPE_WT, 1, 8192, 16, 4, 0, 0, 0 }, 1702 { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 1703 /* XXX is this type right for SA-1? */ 1704 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, 1705 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 1706 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 1707 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */ 1708 { 0, 0, 0, 0, 0, 0, 0, 0} 1709 }; 1710 1711 static void get_cachetype_table(void); 1712 1713 static void 1714 get_cachetype_table(void) 1715 { 1716 int i; 1717 uint32_t cpuid = cpu_id(); 1718 1719 for (i = 0; cachetab[i].ct_cpuid != 0; i++) { 1720 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) { 1721 arm_pcache.cache_type = cachetab[i].ct_pcache_type; 1722 arm_pcache.cache_unified = cachetab[i].ct_pcache_unified; 1723 arm_pcache.dcache_size = cachetab[i].ct_pdcache_size; 1724 arm_pcache.dcache_line_size = 1725 cachetab[i].ct_pdcache_line_size; 1726 arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways; 1727 arm_pcache.icache_size = cachetab[i].ct_picache_size; 1728 arm_pcache.icache_line_size = 1729 cachetab[i].ct_picache_line_size; 1730 arm_pcache.icache_ways = cachetab[i].ct_picache_ways; 1731 } 1732 } 1733 1734 arm_dcache_align = arm_pcache.dcache_line_size; 1735 arm_dcache_align_mask = arm_dcache_align - 1; 1736 } 1737 1738 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */ 1739 1740 /* 1741 * Cannot panic here as we may not have a console yet ... 1742 */ 1743 1744 int 1745 set_cpufuncs(void) 1746 { 1747 if (cputype == 0) { 1748 cputype = cpufunc_id(); 1749 cputype &= CPU_ID_CPU_MASK; 1750 } 1751 1752 /* 1753 * NOTE: cpu_do_powersave defaults to off. If we encounter a 1754 * CPU type where we want to use it by default, then we set it. 1755 */ 1756 #ifdef CPU_ARM2 1757 if (cputype == CPU_ID_ARM2) { 1758 cpufuncs = arm2_cpufuncs; 1759 get_cachetype_table(); 1760 return 0; 1761 } 1762 #endif /* CPU_ARM2 */ 1763 #ifdef CPU_ARM250 1764 if (cputype == CPU_ID_ARM250) { 1765 cpufuncs = arm250_cpufuncs; 1766 get_cachetype_table(); 1767 return 0; 1768 } 1769 #endif 1770 #ifdef CPU_ARM3 1771 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1772 (cputype & 0x00000f00) == 0x00000300) { 1773 cpufuncs = arm3_cpufuncs; 1774 get_cachetype_table(); 1775 return 0; 1776 } 1777 #endif /* CPU_ARM3 */ 1778 #ifdef CPU_ARM6 1779 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1780 (cputype & 0x00000f00) == 0x00000600) { 1781 cpufuncs = arm6_cpufuncs; 1782 get_cachetype_table(); 1783 pmap_pte_init_generic(); 1784 return 0; 1785 } 1786 #endif /* CPU_ARM6 */ 1787 #ifdef CPU_ARM7 1788 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1789 CPU_ID_IS7(cputype) && 1790 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) { 1791 cpufuncs = arm7_cpufuncs; 1792 get_cachetype_table(); 1793 pmap_pte_init_generic(); 1794 return 0; 1795 } 1796 #endif /* CPU_ARM7 */ 1797 #ifdef CPU_ARM7TDMI 1798 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1799 CPU_ID_IS7(cputype) && 1800 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) { 1801 cpufuncs = arm7tdmi_cpufuncs; 1802 get_cachetype_cp15(); 1803 pmap_pte_init_generic(); 1804 return 0; 1805 } 1806 #endif 1807 #ifdef CPU_ARM8 1808 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1809 (cputype & 0x0000f000) == 0x00008000) { 1810 cpufuncs = arm8_cpufuncs; 1811 get_cachetype_cp15(); 1812 pmap_pte_init_arm8(); 1813 return 0; 1814 } 1815 #endif /* CPU_ARM8 */ 1816 #ifdef CPU_ARM9 1817 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD || 1818 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) && 1819 (cputype & 0x0000f000) == 0x00009000) { 1820 cpufuncs = arm9_cpufuncs; 1821 get_cachetype_cp15(); 1822 arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize; 1823 arm9_dcache_sets_max = 1824 (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) - 1825 arm9_dcache_sets_inc; 1826 arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc); 1827 arm9_dcache_index_max = 0U - arm9_dcache_index_inc; 1828 #ifdef ARM9_CACHE_WRITE_THROUGH 1829 pmap_pte_init_arm9(); 1830 #else 1831 pmap_pte_init_generic(); 1832 #endif 1833 return 0; 1834 } 1835 #endif /* CPU_ARM9 */ 1836 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 1837 if (cputype == CPU_ID_ARM926EJS || 1838 cputype == CPU_ID_ARM1026EJS) { 1839 cpufuncs = armv5_ec_cpufuncs; 1840 get_cachetype_cp15(); 1841 pmap_pte_init_generic(); 1842 return 0; 1843 } 1844 #endif /* CPU_ARM9E || CPU_ARM10 */ 1845 #if defined(CPU_SHEEVA) 1846 if (cputype == CPU_ID_MV88SV131 || 1847 cputype == CPU_ID_MV88FR571_VD) { 1848 cpufuncs = sheeva_cpufuncs; 1849 get_cachetype_cp15(); 1850 pmap_pte_init_generic(); 1851 cpu_do_powersave = 1; /* Enable powersave */ 1852 return 0; 1853 } 1854 #endif /* CPU_SHEEVA */ 1855 #ifdef CPU_ARM10 1856 if (/* cputype == CPU_ID_ARM1020T || */ 1857 cputype == CPU_ID_ARM1020E) { 1858 /* 1859 * Select write-through cacheing (this isn't really an 1860 * option on ARM1020T). 1861 */ 1862 cpufuncs = arm10_cpufuncs; 1863 get_cachetype_cp15(); 1864 armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize; 1865 armv5_dcache_sets_max = 1866 (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) - 1867 armv5_dcache_sets_inc; 1868 armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc); 1869 armv5_dcache_index_max = 0U - armv5_dcache_index_inc; 1870 pmap_pte_init_generic(); 1871 return 0; 1872 } 1873 #endif /* CPU_ARM10 */ 1874 1875 1876 #if defined(CPU_ARM11MPCORE) 1877 if (cputype == CPU_ID_ARM11MPCORE) { 1878 cpufuncs = arm11mpcore_cpufuncs; 1879 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6) 1880 cpu_armv6_p = true; 1881 #endif 1882 get_cachetype_cp15(); 1883 armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize; 1884 armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize + 1885 arm_dcache_log2_nsets)) - armv5_dcache_sets_inc; 1886 armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc); 1887 armv5_dcache_index_max = 0U - armv5_dcache_index_inc; 1888 cpu_do_powersave = 1; /* Enable powersave */ 1889 pmap_pte_init_arm11mpcore(); 1890 if (arm_cache_prefer_mask) 1891 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 1892 1893 return 0; 1894 1895 } 1896 #endif /* CPU_ARM11MPCORE */ 1897 1898 #if defined(CPU_ARM11) 1899 if (cputype == CPU_ID_ARM1136JS || 1900 cputype == CPU_ID_ARM1136JSR1 || 1901 cputype == CPU_ID_ARM1176JZS) { 1902 cpufuncs = arm11_cpufuncs; 1903 #if defined(CPU_ARM1136) 1904 if (cputype == CPU_ID_ARM1136JS && 1905 cputype == CPU_ID_ARM1136JSR1) { 1906 cpufuncs = arm1136_cpufuncs; 1907 if (cputype == CPU_ID_ARM1136JS) 1908 cpufuncs.cf_sleep = arm1136_sleep_rev0; 1909 } 1910 #endif 1911 #if defined(CPU_ARM1176) 1912 if (cputype == CPU_ID_ARM1176JZS) { 1913 cpufuncs = arm1176_cpufuncs; 1914 } 1915 #endif 1916 #if defined(CPU_ARMV7) || defined(CPU_PRE_ARMV6) 1917 cpu_armv6_p = true; 1918 #endif 1919 cpu_do_powersave = 1; /* Enable powersave */ 1920 get_cachetype_cp15(); 1921 #ifdef ARM11_CACHE_WRITE_THROUGH 1922 pmap_pte_init_arm11(); 1923 #else 1924 pmap_pte_init_generic(); 1925 #endif 1926 if (arm_cache_prefer_mask) 1927 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 1928 1929 /* 1930 * Start and reset the PMC Cycle Counter. 1931 */ 1932 armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C); 1933 return 0; 1934 } 1935 #endif /* CPU_ARM11 */ 1936 #ifdef CPU_SA110 1937 if (cputype == CPU_ID_SA110) { 1938 cpufuncs = sa110_cpufuncs; 1939 get_cachetype_table(); 1940 pmap_pte_init_sa1(); 1941 return 0; 1942 } 1943 #endif /* CPU_SA110 */ 1944 #ifdef CPU_SA1100 1945 if (cputype == CPU_ID_SA1100) { 1946 cpufuncs = sa11x0_cpufuncs; 1947 get_cachetype_table(); 1948 pmap_pte_init_sa1(); 1949 1950 /* Use powersave on this CPU. */ 1951 cpu_do_powersave = 1; 1952 1953 return 0; 1954 } 1955 #endif /* CPU_SA1100 */ 1956 #ifdef CPU_SA1110 1957 if (cputype == CPU_ID_SA1110) { 1958 cpufuncs = sa11x0_cpufuncs; 1959 get_cachetype_table(); 1960 pmap_pte_init_sa1(); 1961 1962 /* Use powersave on this CPU. */ 1963 cpu_do_powersave = 1; 1964 1965 return 0; 1966 } 1967 #endif /* CPU_SA1110 */ 1968 #ifdef CPU_FA526 1969 if (cputype == CPU_ID_FA526) { 1970 cpufuncs = fa526_cpufuncs; 1971 get_cachetype_cp15(); 1972 pmap_pte_init_generic(); 1973 1974 /* Use powersave on this CPU. */ 1975 cpu_do_powersave = 1; 1976 1977 return 0; 1978 } 1979 #endif /* CPU_FA526 */ 1980 #ifdef CPU_IXP12X0 1981 if (cputype == CPU_ID_IXP1200) { 1982 cpufuncs = ixp12x0_cpufuncs; 1983 get_cachetype_table(); 1984 pmap_pte_init_sa1(); 1985 return 0; 1986 } 1987 #endif /* CPU_IXP12X0 */ 1988 #ifdef CPU_XSCALE_80200 1989 if (cputype == CPU_ID_80200) { 1990 int rev = cpufunc_id() & CPU_ID_REVISION_MASK; 1991 1992 i80200_icu_init(); 1993 1994 /* 1995 * Reset the Performance Monitoring Unit to a 1996 * pristine state: 1997 * - CCNT, PMN0, PMN1 reset to 0 1998 * - overflow indications cleared 1999 * - all counters disabled 2000 */ 2001 __asm volatile("mcr p14, 0, %0, c0, c0, 0" 2002 : 2003 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 2004 PMNC_CC_IF)); 2005 2006 #if defined(XSCALE_CCLKCFG) 2007 /* 2008 * Crank CCLKCFG to maximum legal value. 2009 */ 2010 __asm volatile ("mcr p14, 0, %0, c6, c0, 0" 2011 : 2012 : "r" (XSCALE_CCLKCFG)); 2013 #endif 2014 2015 /* 2016 * XXX Disable ECC in the Bus Controller Unit; we 2017 * don't really support it, yet. Clear any pending 2018 * error indications. 2019 */ 2020 __asm volatile("mcr p13, 0, %0, c0, c1, 0" 2021 : 2022 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV)); 2023 2024 cpufuncs = xscale_cpufuncs; 2025 #if defined(PERFCTRS) 2026 xscale_pmu_init(); 2027 #endif 2028 2029 /* 2030 * i80200 errata: Step-A0 and A1 have a bug where 2031 * D$ dirty bits are not cleared on "invalidate by 2032 * address". 2033 * 2034 * Workaround: Clean cache line before invalidating. 2035 */ 2036 if (rev == 0 || rev == 1) 2037 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng; 2038 2039 get_cachetype_cp15(); 2040 pmap_pte_init_xscale(); 2041 return 0; 2042 } 2043 #endif /* CPU_XSCALE_80200 */ 2044 #ifdef CPU_XSCALE_80321 2045 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 || 2046 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 || 2047 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) { 2048 i80321_icu_init(); 2049 2050 /* 2051 * Reset the Performance Monitoring Unit to a 2052 * pristine state: 2053 * - CCNT, PMN0, PMN1 reset to 0 2054 * - overflow indications cleared 2055 * - all counters disabled 2056 */ 2057 __asm volatile("mcr p14, 0, %0, c0, c0, 0" 2058 : 2059 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 2060 PMNC_CC_IF)); 2061 2062 cpufuncs = xscale_cpufuncs; 2063 #if defined(PERFCTRS) 2064 xscale_pmu_init(); 2065 #endif 2066 2067 get_cachetype_cp15(); 2068 pmap_pte_init_xscale(); 2069 return 0; 2070 } 2071 #endif /* CPU_XSCALE_80321 */ 2072 #ifdef __CPU_XSCALE_PXA2XX 2073 /* ignore core revision to test PXA2xx CPUs */ 2074 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X || 2075 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 || 2076 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) { 2077 2078 cpufuncs = xscale_cpufuncs; 2079 #if defined(PERFCTRS) 2080 xscale_pmu_init(); 2081 #endif 2082 2083 get_cachetype_cp15(); 2084 pmap_pte_init_xscale(); 2085 2086 /* Use powersave on this CPU. */ 2087 cpu_do_powersave = 1; 2088 2089 return 0; 2090 } 2091 #endif /* __CPU_XSCALE_PXA2XX */ 2092 #ifdef CPU_XSCALE_IXP425 2093 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 || 2094 cputype == CPU_ID_IXP425_266) { 2095 ixp425_icu_init(); 2096 2097 cpufuncs = xscale_cpufuncs; 2098 #if defined(PERFCTRS) 2099 xscale_pmu_init(); 2100 #endif 2101 2102 get_cachetype_cp15(); 2103 pmap_pte_init_xscale(); 2104 2105 return 0; 2106 } 2107 #endif /* CPU_XSCALE_IXP425 */ 2108 #if defined(CPU_CORTEX) 2109 if (CPU_ID_CORTEX_P(cputype)) { 2110 cpufuncs = cortex_cpufuncs; 2111 cpu_do_powersave = 1; /* Enable powersave */ 2112 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6) 2113 cpu_armv7_p = true; 2114 #endif 2115 get_cachetype_cp15(); 2116 pmap_pte_init_armv7(); 2117 if (arm_cache_prefer_mask) 2118 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 2119 /* 2120 * Start and reset the PMC Cycle Counter. 2121 */ 2122 armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C); 2123 armreg_pmcntenset_write(CORTEX_CNTENS_C); 2124 return 0; 2125 } 2126 #endif /* CPU_CORTEX */ 2127 2128 #if defined(CPU_PJ4B) 2129 if ((cputype == CPU_ID_MV88SV581X_V6 || 2130 cputype == CPU_ID_MV88SV581X_V7 || 2131 cputype == CPU_ID_MV88SV584X_V7 || 2132 cputype == CPU_ID_ARM_88SV581X_V6 || 2133 cputype == CPU_ID_ARM_88SV581X_V7) && 2134 (armreg_pfr0_read() & ARM_PFR0_THUMBEE_MASK)) { 2135 cpufuncs = pj4bv7_cpufuncs; 2136 #if defined(CPU_ARMV6) || defined(CPU_PRE_ARMV6) 2137 cpu_armv7_p = true; 2138 #endif 2139 get_cachetype_cp15(); 2140 pmap_pte_init_armv7(); 2141 return 0; 2142 } 2143 #endif /* CPU_PJ4B */ 2144 2145 /* 2146 * Bzzzz. And the answer was ... 2147 */ 2148 panic("No support for this CPU type (%08x) in kernel", cputype); 2149 return(ARCHITECTURE_NOT_PRESENT); 2150 } 2151 2152 #ifdef CPU_ARM2 2153 u_int arm2_id(void) 2154 { 2155 2156 return CPU_ID_ARM2; 2157 } 2158 #endif /* CPU_ARM2 */ 2159 2160 #ifdef CPU_ARM250 2161 u_int arm250_id(void) 2162 { 2163 2164 return CPU_ID_ARM250; 2165 } 2166 #endif /* CPU_ARM250 */ 2167 2168 /* 2169 * Fixup routines for data and prefetch aborts. 2170 * 2171 * Several compile time symbols are used 2172 * 2173 * DEBUG_FAULT_CORRECTION - Print debugging information during the 2174 * correction of registers after a fault. 2175 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts 2176 * when defined should use late aborts 2177 */ 2178 2179 2180 /* 2181 * Null abort fixup routine. 2182 * For use when no fixup is required. 2183 */ 2184 int 2185 cpufunc_null_fixup(void *arg) 2186 { 2187 return(ABORT_FIXUP_OK); 2188 } 2189 2190 2191 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 2192 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) 2193 2194 #ifdef DEBUG_FAULT_CORRECTION 2195 #define DFC_PRINTF(x) printf x 2196 #define DFC_DISASSEMBLE(x) disassemble(x) 2197 #else 2198 #define DFC_PRINTF(x) /* nothing */ 2199 #define DFC_DISASSEMBLE(x) /* nothing */ 2200 #endif 2201 2202 /* 2203 * "Early" data abort fixup. 2204 * 2205 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used 2206 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI]. 2207 * 2208 * In early aborts, we may have to fix up LDM, STM, LDC and STC. 2209 */ 2210 int 2211 early_abort_fixup(void *arg) 2212 { 2213 trapframe_t *frame = arg; 2214 u_int fault_pc; 2215 u_int fault_instruction; 2216 int saved_lr = 0; 2217 2218 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2219 2220 /* Ok an abort in SVC mode */ 2221 2222 /* 2223 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2224 * as the fault happened in svc mode but we need it in the 2225 * usr slot so we can treat the registers as an array of ints 2226 * during fixing. 2227 * NOTE: This PC is in the position but writeback is not 2228 * allowed on r15. 2229 * Doing it like this is more efficient than trapping this 2230 * case in all possible locations in the following fixup code. 2231 */ 2232 2233 saved_lr = frame->tf_usr_lr; 2234 frame->tf_usr_lr = frame->tf_svc_lr; 2235 2236 /* 2237 * Note the trapframe does not have the SVC r13 so a fault 2238 * from an instruction with writeback to r13 in SVC mode is 2239 * not allowed. This should not happen as the kstack is 2240 * always valid. 2241 */ 2242 } 2243 2244 /* Get fault address and status from the CPU */ 2245 2246 fault_pc = frame->tf_pc; 2247 fault_instruction = *((volatile unsigned int *)fault_pc); 2248 2249 /* Decode the fault instruction and fix the registers as needed */ 2250 2251 if ((fault_instruction & 0x0e000000) == 0x08000000) { 2252 int base; 2253 int loop; 2254 int count; 2255 int *registers = &frame->tf_r0; 2256 2257 DFC_PRINTF(("LDM/STM\n")); 2258 DFC_DISASSEMBLE(fault_pc); 2259 if (fault_instruction & (1 << 21)) { 2260 DFC_PRINTF(("This instruction must be corrected\n")); 2261 base = (fault_instruction >> 16) & 0x0f; 2262 if (base == 15) 2263 return ABORT_FIXUP_FAILED; 2264 /* Count registers transferred */ 2265 count = 0; 2266 for (loop = 0; loop < 16; ++loop) { 2267 if (fault_instruction & (1<<loop)) 2268 ++count; 2269 } 2270 DFC_PRINTF(("%d registers used\n", count)); 2271 DFC_PRINTF(("Corrected r%d by %d bytes ", 2272 base, count * 4)); 2273 if (fault_instruction & (1 << 23)) { 2274 DFC_PRINTF(("down\n")); 2275 registers[base] -= count * 4; 2276 } else { 2277 DFC_PRINTF(("up\n")); 2278 registers[base] += count * 4; 2279 } 2280 } 2281 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) { 2282 int base; 2283 int offset; 2284 int *registers = &frame->tf_r0; 2285 2286 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */ 2287 2288 DFC_DISASSEMBLE(fault_pc); 2289 2290 /* Only need to fix registers if write back is turned on */ 2291 2292 if ((fault_instruction & (1 << 21)) != 0) { 2293 base = (fault_instruction >> 16) & 0x0f; 2294 if (base == 13 && 2295 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 2296 return ABORT_FIXUP_FAILED; 2297 if (base == 15) 2298 return ABORT_FIXUP_FAILED; 2299 2300 offset = (fault_instruction & 0xff) << 2; 2301 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2302 if ((fault_instruction & (1 << 23)) != 0) 2303 offset = -offset; 2304 registers[base] += offset; 2305 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2306 } 2307 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) 2308 return ABORT_FIXUP_FAILED; 2309 2310 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2311 2312 /* Ok an abort in SVC mode */ 2313 2314 /* 2315 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2316 * as the fault happened in svc mode but we need it in the 2317 * usr slot so we can treat the registers as an array of ints 2318 * during fixing. 2319 * NOTE: This PC is in the position but writeback is not 2320 * allowed on r15. 2321 * Doing it like this is more efficient than trapping this 2322 * case in all possible locations in the prior fixup code. 2323 */ 2324 2325 frame->tf_svc_lr = frame->tf_usr_lr; 2326 frame->tf_usr_lr = saved_lr; 2327 2328 /* 2329 * Note the trapframe does not have the SVC r13 so a fault 2330 * from an instruction with writeback to r13 in SVC mode is 2331 * not allowed. This should not happen as the kstack is 2332 * always valid. 2333 */ 2334 } 2335 2336 return(ABORT_FIXUP_OK); 2337 } 2338 #endif /* CPU_ARM2/250/3/6/7 */ 2339 2340 2341 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \ 2342 defined(CPU_ARM7TDMI) 2343 /* 2344 * "Late" (base updated) data abort fixup 2345 * 2346 * For ARM6 (in late-abort mode) and ARM7. 2347 * 2348 * In this model, all data-transfer instructions need fixing up. We defer 2349 * LDM, STM, LDC and STC fixup to the early-abort handler. 2350 */ 2351 int 2352 late_abort_fixup(void *arg) 2353 { 2354 trapframe_t *frame = arg; 2355 u_int fault_pc; 2356 u_int fault_instruction; 2357 int saved_lr = 0; 2358 2359 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2360 2361 /* Ok an abort in SVC mode */ 2362 2363 /* 2364 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2365 * as the fault happened in svc mode but we need it in the 2366 * usr slot so we can treat the registers as an array of ints 2367 * during fixing. 2368 * NOTE: This PC is in the position but writeback is not 2369 * allowed on r15. 2370 * Doing it like this is more efficient than trapping this 2371 * case in all possible locations in the following fixup code. 2372 */ 2373 2374 saved_lr = frame->tf_usr_lr; 2375 frame->tf_usr_lr = frame->tf_svc_lr; 2376 2377 /* 2378 * Note the trapframe does not have the SVC r13 so a fault 2379 * from an instruction with writeback to r13 in SVC mode is 2380 * not allowed. This should not happen as the kstack is 2381 * always valid. 2382 */ 2383 } 2384 2385 /* Get fault address and status from the CPU */ 2386 2387 fault_pc = frame->tf_pc; 2388 fault_instruction = *((volatile unsigned int *)fault_pc); 2389 2390 /* Decode the fault instruction and fix the registers as needed */ 2391 2392 /* Was is a swap instruction ? */ 2393 2394 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) { 2395 DFC_DISASSEMBLE(fault_pc); 2396 } else if ((fault_instruction & 0x0c000000) == 0x04000000) { 2397 2398 /* Was is a ldr/str instruction */ 2399 /* This is for late abort only */ 2400 2401 int base; 2402 int offset; 2403 int *registers = &frame->tf_r0; 2404 2405 DFC_DISASSEMBLE(fault_pc); 2406 2407 /* This is for late abort only */ 2408 2409 if ((fault_instruction & (1 << 24)) == 0 2410 || (fault_instruction & (1 << 21)) != 0) { 2411 /* postindexed ldr/str with no writeback */ 2412 2413 base = (fault_instruction >> 16) & 0x0f; 2414 if (base == 13 && 2415 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 2416 return ABORT_FIXUP_FAILED; 2417 if (base == 15) 2418 return ABORT_FIXUP_FAILED; 2419 DFC_PRINTF(("late abt fix: r%d=%08x : ", 2420 base, registers[base])); 2421 if ((fault_instruction & (1 << 25)) == 0) { 2422 /* Immediate offset - easy */ 2423 2424 offset = fault_instruction & 0xfff; 2425 if ((fault_instruction & (1 << 23))) 2426 offset = -offset; 2427 registers[base] += offset; 2428 DFC_PRINTF(("imm=%08x ", offset)); 2429 } else { 2430 /* offset is a shifted register */ 2431 int shift; 2432 2433 offset = fault_instruction & 0x0f; 2434 if (offset == base) 2435 return ABORT_FIXUP_FAILED; 2436 2437 /* 2438 * Register offset - hard we have to 2439 * cope with shifts ! 2440 */ 2441 offset = registers[offset]; 2442 2443 if ((fault_instruction & (1 << 4)) == 0) 2444 /* shift with amount */ 2445 shift = (fault_instruction >> 7) & 0x1f; 2446 else { 2447 /* shift with register */ 2448 if ((fault_instruction & (1 << 7)) != 0) 2449 /* undefined for now so bail out */ 2450 return ABORT_FIXUP_FAILED; 2451 shift = ((fault_instruction >> 8) & 0xf); 2452 if (base == shift) 2453 return ABORT_FIXUP_FAILED; 2454 DFC_PRINTF(("shift reg=%d ", shift)); 2455 shift = registers[shift]; 2456 } 2457 DFC_PRINTF(("shift=%08x ", shift)); 2458 switch (((fault_instruction >> 5) & 0x3)) { 2459 case 0 : /* Logical left */ 2460 offset = (int)(((u_int)offset) << shift); 2461 break; 2462 case 1 : /* Logical Right */ 2463 if (shift == 0) shift = 32; 2464 offset = (int)(((u_int)offset) >> shift); 2465 break; 2466 case 2 : /* Arithmetic Right */ 2467 if (shift == 0) shift = 32; 2468 offset = (int)(((int)offset) >> shift); 2469 break; 2470 case 3 : /* Rotate right (rol or rxx) */ 2471 return ABORT_FIXUP_FAILED; 2472 break; 2473 } 2474 2475 DFC_PRINTF(("abt: fixed LDR/STR with " 2476 "register offset\n")); 2477 if ((fault_instruction & (1 << 23))) 2478 offset = -offset; 2479 DFC_PRINTF(("offset=%08x ", offset)); 2480 registers[base] += offset; 2481 } 2482 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2483 } 2484 } 2485 2486 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2487 2488 /* Ok an abort in SVC mode */ 2489 2490 /* 2491 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2492 * as the fault happened in svc mode but we need it in the 2493 * usr slot so we can treat the registers as an array of ints 2494 * during fixing. 2495 * NOTE: This PC is in the position but writeback is not 2496 * allowed on r15. 2497 * Doing it like this is more efficient than trapping this 2498 * case in all possible locations in the prior fixup code. 2499 */ 2500 2501 frame->tf_svc_lr = frame->tf_usr_lr; 2502 frame->tf_usr_lr = saved_lr; 2503 2504 /* 2505 * Note the trapframe does not have the SVC r13 so a fault 2506 * from an instruction with writeback to r13 in SVC mode is 2507 * not allowed. This should not happen as the kstack is 2508 * always valid. 2509 */ 2510 } 2511 2512 /* 2513 * Now let the early-abort fixup routine have a go, in case it 2514 * was an LDM, STM, LDC or STC that faulted. 2515 */ 2516 2517 return early_abort_fixup(arg); 2518 } 2519 #endif /* CPU_ARM6(LATE)/7/7TDMI */ 2520 2521 /* 2522 * CPU Setup code 2523 */ 2524 2525 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \ 2526 defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \ 2527 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \ 2528 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 2529 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 2530 defined(CPU_ARM10) || defined(CPU_ARM11) || \ 2531 defined(CPU_FA526) || defined(CPU_CORTEX) || defined(CPU_SHEEVA) 2532 2533 #define IGN 0 2534 #define OR 1 2535 #define BIC 2 2536 2537 struct cpu_option { 2538 const char *co_name; 2539 int co_falseop; 2540 int co_trueop; 2541 int co_value; 2542 }; 2543 2544 static u_int parse_cpu_options(char *, struct cpu_option *, u_int); 2545 2546 static u_int 2547 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl) 2548 { 2549 int integer; 2550 2551 if (args == NULL) 2552 return(cpuctrl); 2553 2554 while (optlist->co_name) { 2555 if (get_bootconf_option(args, optlist->co_name, 2556 BOOTOPT_TYPE_BOOLEAN, &integer)) { 2557 if (integer) { 2558 if (optlist->co_trueop == OR) 2559 cpuctrl |= optlist->co_value; 2560 else if (optlist->co_trueop == BIC) 2561 cpuctrl &= ~optlist->co_value; 2562 } else { 2563 if (optlist->co_falseop == OR) 2564 cpuctrl |= optlist->co_value; 2565 else if (optlist->co_falseop == BIC) 2566 cpuctrl &= ~optlist->co_value; 2567 } 2568 } 2569 ++optlist; 2570 } 2571 return(cpuctrl); 2572 } 2573 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */ 2574 2575 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \ 2576 || defined(CPU_ARM8) 2577 struct cpu_option arm678_options[] = { 2578 #ifdef COMPAT_12 2579 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE }, 2580 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 2581 #endif /* COMPAT_12 */ 2582 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2583 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2584 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2585 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2586 { NULL, IGN, IGN, 0 } 2587 }; 2588 2589 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */ 2590 2591 #ifdef CPU_ARM6 2592 struct cpu_option arm6_options[] = { 2593 { "arm6.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2594 { "arm6.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2595 { "arm6.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2596 { "arm6.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2597 { NULL, IGN, IGN, 0 } 2598 }; 2599 2600 void 2601 arm6_setup(char *args) 2602 { 2603 2604 /* Set up default control registers bits */ 2605 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2606 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2607 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2608 #if 0 2609 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2610 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2611 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2612 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 2613 | CPU_CONTROL_AFLT_ENABLE; 2614 #endif 2615 2616 #ifdef ARM6_LATE_ABORT 2617 cpuctrl |= CPU_CONTROL_LABT_ENABLE; 2618 #endif /* ARM6_LATE_ABORT */ 2619 2620 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2621 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2622 #endif 2623 2624 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2625 cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl); 2626 2627 #ifdef __ARMEB__ 2628 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2629 #endif 2630 2631 /* Clear out the cache */ 2632 cpu_idcache_wbinv_all(); 2633 2634 /* Set the control register */ 2635 curcpu()->ci_ctrl = cpuctrl; 2636 cpu_control(0xffffffff, cpuctrl); 2637 } 2638 #endif /* CPU_ARM6 */ 2639 2640 #ifdef CPU_ARM7 2641 struct cpu_option arm7_options[] = { 2642 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2643 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2644 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2645 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2646 #ifdef COMPAT_12 2647 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2648 #endif /* COMPAT_12 */ 2649 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2650 { NULL, IGN, IGN, 0 } 2651 }; 2652 2653 void 2654 arm7_setup(char *args) 2655 { 2656 2657 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2658 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2659 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2660 #if 0 2661 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2662 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2663 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2664 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE 2665 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 2666 | CPU_CONTROL_AFLT_ENABLE; 2667 #endif 2668 2669 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2670 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2671 #endif 2672 2673 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2674 cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl); 2675 2676 #ifdef __ARMEB__ 2677 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2678 #endif 2679 2680 /* Clear out the cache */ 2681 cpu_idcache_wbinv_all(); 2682 2683 /* Set the control register */ 2684 curcpu()->ci_ctrl = cpuctrl; 2685 cpu_control(0xffffffff, cpuctrl); 2686 } 2687 #endif /* CPU_ARM7 */ 2688 2689 #ifdef CPU_ARM7TDMI 2690 struct cpu_option arm7tdmi_options[] = { 2691 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2692 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2693 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2694 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2695 #ifdef COMPAT_12 2696 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2697 #endif /* COMPAT_12 */ 2698 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2699 { NULL, IGN, IGN, 0 } 2700 }; 2701 2702 void 2703 arm7tdmi_setup(char *args) 2704 { 2705 int cpuctrl; 2706 2707 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2708 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2709 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2710 2711 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2712 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl); 2713 2714 #ifdef __ARMEB__ 2715 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2716 #endif 2717 2718 /* Clear out the cache */ 2719 cpu_idcache_wbinv_all(); 2720 2721 /* Set the control register */ 2722 curcpu()->ci_ctrl = cpuctrl; 2723 cpu_control(0xffffffff, cpuctrl); 2724 } 2725 #endif /* CPU_ARM7TDMI */ 2726 2727 #ifdef CPU_ARM8 2728 struct cpu_option arm8_options[] = { 2729 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2730 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2731 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2732 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2733 #ifdef COMPAT_12 2734 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2735 #endif /* COMPAT_12 */ 2736 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2737 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2738 { NULL, IGN, IGN, 0 } 2739 }; 2740 2741 void 2742 arm8_setup(char *args) 2743 { 2744 int integer; 2745 int clocktest; 2746 int setclock = 0; 2747 2748 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2749 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2750 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2751 #if 0 2752 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2753 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2754 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2755 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE 2756 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE; 2757 #endif 2758 2759 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2760 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2761 #endif 2762 2763 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2764 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl); 2765 2766 #ifdef __ARMEB__ 2767 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2768 #endif 2769 2770 /* Get clock configuration */ 2771 clocktest = arm8_clock_config(0, 0) & 0x0f; 2772 2773 /* Special ARM8 clock and test configuration */ 2774 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2775 clocktest = 0; 2776 setclock = 1; 2777 } 2778 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2779 if (integer) 2780 clocktest |= 0x01; 2781 else 2782 clocktest &= ~(0x01); 2783 setclock = 1; 2784 } 2785 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2786 if (integer) 2787 clocktest |= 0x02; 2788 else 2789 clocktest &= ~(0x02); 2790 setclock = 1; 2791 } 2792 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) { 2793 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2; 2794 setclock = 1; 2795 } 2796 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) { 2797 clocktest |= (integer & 7) << 5; 2798 setclock = 1; 2799 } 2800 2801 /* Clear out the cache */ 2802 cpu_idcache_wbinv_all(); 2803 2804 /* Set the control register */ 2805 curcpu()->ci_ctrl = cpuctrl; 2806 cpu_control(0xffffffff, cpuctrl); 2807 2808 /* Set the clock/test register */ 2809 if (setclock) 2810 arm8_clock_config(0x7f, clocktest); 2811 } 2812 #endif /* CPU_ARM8 */ 2813 2814 #ifdef CPU_ARM9 2815 struct cpu_option arm9_options[] = { 2816 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2817 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2818 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2819 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2820 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2821 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2822 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2823 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2824 { NULL, IGN, IGN, 0 } 2825 }; 2826 2827 void 2828 arm9_setup(char *args) 2829 { 2830 2831 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2832 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2833 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2834 | CPU_CONTROL_WBUF_ENABLE; 2835 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2836 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2837 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2838 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2839 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2840 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC 2841 | CPU_CONTROL_ROUNDROBIN; 2842 2843 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2844 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2845 #endif 2846 2847 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl); 2848 2849 #ifdef __ARMEB__ 2850 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2851 #endif 2852 2853 #ifndef ARM_HAS_VBAR 2854 if (vector_page == ARM_VECTORS_HIGH) 2855 cpuctrl |= CPU_CONTROL_VECRELOC; 2856 #endif 2857 2858 /* Clear out the cache */ 2859 cpu_idcache_wbinv_all(); 2860 2861 /* Set the control register */ 2862 curcpu()->ci_ctrl = cpuctrl; 2863 cpu_control(cpuctrlmask, cpuctrl); 2864 2865 } 2866 #endif /* CPU_ARM9 */ 2867 2868 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 2869 struct cpu_option arm10_options[] = { 2870 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2871 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2872 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2873 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2874 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2875 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2876 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2877 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2878 { NULL, IGN, IGN, 0 } 2879 }; 2880 2881 void 2882 arm10_setup(char *args) 2883 { 2884 2885 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2886 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2887 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; 2888 #if 0 2889 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2890 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2891 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2892 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2893 | CPU_CONTROL_BPRD_ENABLE 2894 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2895 #endif 2896 2897 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2898 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2899 #endif 2900 2901 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl); 2902 2903 #ifdef __ARMEB__ 2904 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2905 #endif 2906 2907 #ifndef ARM_HAS_VBAR 2908 if (vector_page == ARM_VECTORS_HIGH) 2909 cpuctrl |= CPU_CONTROL_VECRELOC; 2910 #endif 2911 2912 /* Clear out the cache */ 2913 cpu_idcache_wbinv_all(); 2914 2915 /* Now really make sure they are clean. */ 2916 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2917 2918 /* Set the control register */ 2919 curcpu()->ci_ctrl = cpuctrl; 2920 cpu_control(0xffffffff, cpuctrl); 2921 2922 /* And again. */ 2923 cpu_idcache_wbinv_all(); 2924 } 2925 #endif /* CPU_ARM9E || CPU_ARM10 */ 2926 2927 #if defined(CPU_ARM11) 2928 struct cpu_option arm11_options[] = { 2929 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2930 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2931 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2932 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2933 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2934 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2935 { "arm11.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2936 { NULL, IGN, IGN, 0 } 2937 }; 2938 2939 void 2940 arm11_setup(char *args) 2941 { 2942 2943 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2944 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2945 /* | CPU_CONTROL_BPRD_ENABLE */; 2946 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2947 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2948 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE 2949 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2950 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2951 2952 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2953 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2954 #endif 2955 2956 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2957 2958 #ifdef __ARMEB__ 2959 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2960 #endif 2961 2962 #ifndef ARM_HAS_VBAR 2963 if (vector_page == ARM_VECTORS_HIGH) 2964 cpuctrl |= CPU_CONTROL_VECRELOC; 2965 #endif 2966 2967 /* Clear out the cache */ 2968 cpu_idcache_wbinv_all(); 2969 2970 /* Now really make sure they are clean. */ 2971 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2972 2973 /* Allow detection code to find the VFP if it's fitted. */ 2974 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 2975 2976 /* Set the control register */ 2977 curcpu()->ci_ctrl = cpuctrl; 2978 cpu_control(cpuctrlmask, cpuctrl); 2979 2980 /* And again. */ 2981 cpu_idcache_wbinv_all(); 2982 } 2983 #endif /* CPU_ARM11 */ 2984 2985 #if defined(CPU_ARM11MPCORE) 2986 2987 void 2988 arm11mpcore_setup(char *args) 2989 { 2990 2991 int cpuctrl = CPU_CONTROL_IC_ENABLE 2992 | CPU_CONTROL_DC_ENABLE 2993 | CPU_CONTROL_BPRD_ENABLE ; 2994 int cpuctrlmask = CPU_CONTROL_IC_ENABLE 2995 | CPU_CONTROL_DC_ENABLE 2996 | CPU_CONTROL_BPRD_ENABLE 2997 | CPU_CONTROL_AFLT_ENABLE 2998 | CPU_CONTROL_VECRELOC; 2999 3000 #ifdef ARM11MPCORE_MMU_COMPAT 3001 /* XXX: S and R? */ 3002 #endif 3003 3004 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3005 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3006 #endif 3007 3008 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 3009 3010 #ifndef ARM_HAS_VBAR 3011 if (vector_page == ARM_VECTORS_HIGH) 3012 cpuctrl |= CPU_CONTROL_VECRELOC; 3013 #endif 3014 3015 /* Clear out the cache */ 3016 cpu_idcache_wbinv_all(); 3017 3018 /* Now really make sure they are clean. */ 3019 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 3020 3021 /* Allow detection code to find the VFP if it's fitted. */ 3022 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 3023 3024 /* Set the control register */ 3025 curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl); 3026 3027 /* And again. */ 3028 cpu_idcache_wbinv_all(); 3029 } 3030 #endif /* CPU_ARM11MPCORE */ 3031 3032 #ifdef CPU_PJ4B 3033 void 3034 pj4bv7_setup(char *args) 3035 { 3036 int cpuctrl; 3037 3038 pj4b_config(); 3039 3040 cpuctrl = CPU_CONTROL_MMU_ENABLE; 3041 #ifdef ARM32_DISABLE_ALIGNMENT_FAULTS 3042 cpuctrl |= CPU_CONTROL_UNAL_ENABLE; 3043 #else 3044 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3045 #endif 3046 cpuctrl |= CPU_CONTROL_DC_ENABLE; 3047 cpuctrl |= CPU_CONTROL_IC_ENABLE; 3048 cpuctrl |= (0xf << 3); 3049 cpuctrl |= CPU_CONTROL_BPRD_ENABLE; 3050 cpuctrl |= (0x5 << 16) | (1 < 22); 3051 cpuctrl |= CPU_CONTROL_XP_ENABLE; 3052 3053 #ifndef ARM_HAS_VBAR 3054 if (vector_page == ARM_VECTORS_HIGH) 3055 cpuctrl |= CPU_CONTROL_VECRELOC; 3056 #endif 3057 3058 /* Clear out the cache */ 3059 cpu_idcache_wbinv_all(); 3060 3061 /* Set the control register */ 3062 cpu_control(0xffffffff, cpuctrl); 3063 3064 /* And again. */ 3065 cpu_idcache_wbinv_all(); 3066 3067 curcpu()->ci_ctrl = cpuctrl; 3068 } 3069 #endif /* CPU_PJ4B */ 3070 3071 #if defined(CPU_CORTEX) 3072 struct cpu_option armv7_options[] = { 3073 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3074 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3075 { "armv7.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3076 { "armv7.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3077 { "armv7.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3078 { NULL, IGN, IGN, 0} 3079 }; 3080 3081 void 3082 armv7_setup(char *args) 3083 { 3084 3085 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE 3086 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE 3087 #ifdef __ARMEB__ 3088 | CPU_CONTROL_EX_BEND 3089 #endif 3090 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3091 | CPU_CONTROL_AFLT_ENABLE; 3092 #endif 3093 | CPU_CONTROL_UNAL_ENABLE; 3094 3095 int cpuctrlmask = cpuctrl | CPU_CONTROL_AFLT_ENABLE; 3096 3097 3098 cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl); 3099 3100 #ifndef ARM_HAS_VBAR 3101 if (vector_page == ARM_VECTORS_HIGH) 3102 cpuctrl |= CPU_CONTROL_VECRELOC; 3103 #endif 3104 3105 /* Clear out the cache */ 3106 cpu_idcache_wbinv_all(); 3107 3108 /* Set the control register */ 3109 curcpu()->ci_ctrl = cpuctrl; 3110 cpu_control(cpuctrlmask, cpuctrl); 3111 } 3112 #endif /* CPU_CORTEX */ 3113 3114 3115 #if defined(CPU_ARM1136) || defined(CPU_ARM1176) 3116 void 3117 arm11x6_setup(char *args) 3118 { 3119 int cpuctrl, cpuctrl_wax; 3120 uint32_t auxctrl, auxctrl_wax; 3121 uint32_t tmp, tmp2; 3122 uint32_t sbz=0; 3123 uint32_t cpuid; 3124 3125 cpuid = cpu_id(); 3126 3127 cpuctrl = 3128 CPU_CONTROL_MMU_ENABLE | 3129 CPU_CONTROL_DC_ENABLE | 3130 CPU_CONTROL_WBUF_ENABLE | 3131 CPU_CONTROL_32BP_ENABLE | 3132 CPU_CONTROL_32BD_ENABLE | 3133 CPU_CONTROL_LABT_ENABLE | 3134 CPU_CONTROL_SYST_ENABLE | 3135 CPU_CONTROL_UNAL_ENABLE | 3136 CPU_CONTROL_IC_ENABLE; 3137 3138 /* 3139 * "write as existing" bits 3140 * inverse of this is mask 3141 */ 3142 cpuctrl_wax = 3143 (3 << 30) | 3144 (1 << 29) | 3145 (1 << 28) | 3146 (3 << 26) | 3147 (3 << 19) | 3148 (1 << 17); 3149 3150 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3151 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3152 #endif 3153 3154 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 3155 3156 #ifdef __ARMEB__ 3157 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3158 #endif 3159 3160 #ifndef ARM_HAS_VBAR 3161 if (vector_page == ARM_VECTORS_HIGH) 3162 cpuctrl |= CPU_CONTROL_VECRELOC; 3163 #endif 3164 3165 auxctrl = 0; 3166 auxctrl_wax = ~0; 3167 /* 3168 * This options enables the workaround for the 364296 ARM1136 3169 * r0pX errata (possible cache data corruption with 3170 * hit-under-miss enabled). It sets the undocumented bit 31 in 3171 * the auxiliary control register and the FI bit in the control 3172 * register, thus disabling hit-under-miss without putting the 3173 * processor into full low interrupt latency mode. ARM11MPCore 3174 * is not affected. 3175 */ 3176 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */ 3177 cpuctrl |= CPU_CONTROL_FI_ENABLE; 3178 auxctrl = ARM1136_AUXCTL_PFI; 3179 auxctrl_wax = ~ARM1136_AUXCTL_PFI; 3180 } 3181 3182 /* 3183 * Enable an errata workaround 3184 */ 3185 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */ 3186 auxctrl = ARM1176_AUXCTL_PHD; 3187 auxctrl_wax = ~ARM1176_AUXCTL_PHD; 3188 } 3189 3190 /* Clear out the cache */ 3191 cpu_idcache_wbinv_all(); 3192 3193 /* Now really make sure they are clean. */ 3194 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz)); 3195 3196 /* Allow detection code to find the VFP if it's fitted. */ 3197 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 3198 3199 /* Set the control register */ 3200 curcpu()->ci_ctrl = cpuctrl; 3201 cpu_control(~cpuctrl_wax, cpuctrl); 3202 3203 __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t" 3204 "and %1, %0, %2\n\t" 3205 "orr %1, %1, %3\n\t" 3206 "teq %0, %1\n\t" 3207 "mcrne p15, 0, %1, c1, c0, 1\n\t" 3208 : "=r"(tmp), "=r"(tmp2) : 3209 "r"(auxctrl_wax), "r"(auxctrl)); 3210 3211 /* And again. */ 3212 cpu_idcache_wbinv_all(); 3213 } 3214 #endif /* CPU_ARM1136 || CPU_ARM1176 */ 3215 3216 #ifdef CPU_SA110 3217 struct cpu_option sa110_options[] = { 3218 #ifdef COMPAT_12 3219 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3220 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3221 #endif /* COMPAT_12 */ 3222 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3223 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3224 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3225 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3226 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3227 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3228 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3229 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3230 { NULL, IGN, IGN, 0 } 3231 }; 3232 3233 void 3234 sa110_setup(char *args) 3235 { 3236 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3237 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3238 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3239 | CPU_CONTROL_WBUF_ENABLE; 3240 #if 0 3241 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3242 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3243 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3244 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3245 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3246 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3247 | CPU_CONTROL_CPCLK; 3248 #endif 3249 3250 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3251 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3252 #endif 3253 3254 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl); 3255 3256 #ifdef __ARMEB__ 3257 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3258 #endif 3259 3260 #ifndef ARM_HAS_VBAR 3261 if (vector_page == ARM_VECTORS_HIGH) 3262 cpuctrl |= CPU_CONTROL_VECRELOC; 3263 #endif 3264 3265 /* Clear out the cache */ 3266 cpu_idcache_wbinv_all(); 3267 3268 /* Set the control register */ 3269 curcpu()->ci_ctrl = cpuctrl; 3270 #if 0 3271 cpu_control(cpuctrlmask, cpuctrl); 3272 #endif 3273 cpu_control(0xffffffff, cpuctrl); 3274 3275 /* 3276 * enable clockswitching, note that this doesn't read or write to r0, 3277 * r0 is just to make it valid asm 3278 */ 3279 __asm volatile ("mcr p15, 0, r0, c15, c1, 2"); 3280 } 3281 #endif /* CPU_SA110 */ 3282 3283 #if defined(CPU_SA1100) || defined(CPU_SA1110) 3284 struct cpu_option sa11x0_options[] = { 3285 #ifdef COMPAT_12 3286 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3287 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3288 #endif /* COMPAT_12 */ 3289 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3290 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3291 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3292 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3293 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3294 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3295 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3296 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3297 { NULL, IGN, IGN, 0 } 3298 }; 3299 3300 void 3301 sa11x0_setup(char *args) 3302 { 3303 3304 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3305 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3306 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3307 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 3308 #if 0 3309 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3310 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3311 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3312 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3313 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3314 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3315 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3316 #endif 3317 3318 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3319 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3320 #endif 3321 3322 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl); 3323 3324 #ifdef __ARMEB__ 3325 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3326 #endif 3327 3328 #ifndef ARM_HAS_VBAR 3329 if (vector_page == ARM_VECTORS_HIGH) 3330 cpuctrl |= CPU_CONTROL_VECRELOC; 3331 #endif 3332 3333 /* Clear out the cache */ 3334 cpu_idcache_wbinv_all(); 3335 3336 /* Set the control register */ 3337 curcpu()->ci_ctrl = cpuctrl; 3338 cpu_control(0xffffffff, cpuctrl); 3339 } 3340 #endif /* CPU_SA1100 || CPU_SA1110 */ 3341 3342 #if defined(CPU_FA526) 3343 struct cpu_option fa526_options[] = { 3344 #ifdef COMPAT_12 3345 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3346 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3347 #endif /* COMPAT_12 */ 3348 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3349 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3350 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3351 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3352 { NULL, IGN, IGN, 0 } 3353 }; 3354 3355 void 3356 fa526_setup(char *args) 3357 { 3358 3359 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3360 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3361 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3362 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 3363 #if 0 3364 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3365 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3366 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3367 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3368 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3369 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3370 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3371 #endif 3372 3373 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3374 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3375 #endif 3376 3377 cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl); 3378 3379 #ifdef __ARMEB__ 3380 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3381 #endif 3382 3383 #ifndef ARM_HAS_VBAR 3384 if (vector_page == ARM_VECTORS_HIGH) 3385 cpuctrl |= CPU_CONTROL_VECRELOC; 3386 #endif 3387 3388 /* Clear out the cache */ 3389 cpu_idcache_wbinv_all(); 3390 3391 /* Set the control register */ 3392 curcpu()->ci_ctrl = cpuctrl; 3393 cpu_control(0xffffffff, cpuctrl); 3394 } 3395 #endif /* CPU_FA526 */ 3396 3397 #if defined(CPU_IXP12X0) 3398 struct cpu_option ixp12x0_options[] = { 3399 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3400 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3401 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3402 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3403 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3404 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3405 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3406 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3407 { NULL, IGN, IGN, 0 } 3408 }; 3409 3410 void 3411 ixp12x0_setup(char *args) 3412 { 3413 3414 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE 3415 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE 3416 | CPU_CONTROL_IC_ENABLE; 3417 3418 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE 3419 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE 3420 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE 3421 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE 3422 | CPU_CONTROL_VECRELOC; 3423 3424 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3425 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3426 #endif 3427 3428 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl); 3429 3430 #ifdef __ARMEB__ 3431 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3432 #endif 3433 3434 #ifndef ARM_HAS_VBAR 3435 if (vector_page == ARM_VECTORS_HIGH) 3436 cpuctrl |= CPU_CONTROL_VECRELOC; 3437 #endif 3438 3439 /* Clear out the cache */ 3440 cpu_idcache_wbinv_all(); 3441 3442 /* Set the control register */ 3443 curcpu()->ci_ctrl = cpuctrl; 3444 /* cpu_control(0xffffffff, cpuctrl); */ 3445 cpu_control(cpuctrlmask, cpuctrl); 3446 } 3447 #endif /* CPU_IXP12X0 */ 3448 3449 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 3450 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || defined(CPU_CORTEX) 3451 struct cpu_option xscale_options[] = { 3452 #ifdef COMPAT_12 3453 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3454 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3455 #endif /* COMPAT_12 */ 3456 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3457 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3458 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3459 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3460 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3461 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3462 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3463 { NULL, IGN, IGN, 0 } 3464 }; 3465 3466 void 3467 xscale_setup(char *args) 3468 { 3469 uint32_t auxctl; 3470 3471 /* 3472 * The XScale Write Buffer is always enabled. Our option 3473 * is to enable/disable coalescing. Note that bits 6:3 3474 * must always be enabled. 3475 */ 3476 3477 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3478 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3479 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3480 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE 3481 | CPU_CONTROL_BPRD_ENABLE; 3482 #if 0 3483 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3484 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3485 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3486 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3487 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3488 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3489 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3490 #endif 3491 3492 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3493 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3494 #endif 3495 3496 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl); 3497 3498 #ifdef __ARMEB__ 3499 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3500 #endif 3501 3502 #ifndef ARM_HAS_VBAR 3503 if (vector_page == ARM_VECTORS_HIGH) 3504 cpuctrl |= CPU_CONTROL_VECRELOC; 3505 #endif 3506 3507 /* Clear out the cache */ 3508 cpu_idcache_wbinv_all(); 3509 3510 /* 3511 * Set the control register. Note that bits 6:3 must always 3512 * be set to 1. 3513 */ 3514 curcpu()->ci_ctrl = cpuctrl; 3515 #if 0 3516 cpu_control(cpuctrlmask, cpuctrl); 3517 #endif 3518 cpu_control(0xffffffff, cpuctrl); 3519 3520 /* Make sure write coalescing is turned on */ 3521 __asm volatile("mrc p15, 0, %0, c1, c0, 1" 3522 : "=r" (auxctl)); 3523 #ifdef XSCALE_NO_COALESCE_WRITES 3524 auxctl |= XSCALE_AUXCTL_K; 3525 #else 3526 auxctl &= ~XSCALE_AUXCTL_K; 3527 #endif 3528 __asm volatile("mcr p15, 0, %0, c1, c0, 1" 3529 : : "r" (auxctl)); 3530 } 3531 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 3532 3533 #if defined(CPU_SHEEVA) 3534 struct cpu_option sheeva_options[] = { 3535 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3536 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3537 { "sheeva.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3538 { "sheeva.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3539 { "sheeva.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3540 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3541 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3542 { "sheeva.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3543 { NULL, IGN, IGN, 0 } 3544 }; 3545 3546 void 3547 sheeva_setup(char *args) 3548 { 3549 uint32_t sheeva_ext; 3550 3551 int cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 3552 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3553 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; 3554 #if 0 3555 int cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 3556 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3557 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3558 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3559 | CPU_CONTROL_BPRD_ENABLE 3560 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 3561 #endif 3562 3563 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3564 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3565 #endif 3566 3567 cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl); 3568 3569 /* Enable DCache Streaming Switch and Write Allocate */ 3570 __asm volatile("mrc p15, 1, %0, c15, c1, 0" 3571 : "=r" (sheeva_ext)); 3572 3573 sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN; 3574 3575 __asm volatile("mcr p15, 1, %0, c15, c1, 0" 3576 :: "r" (sheeva_ext)); 3577 3578 /* 3579 * Sheeva has L2 Cache. Enable/Disable it here. 3580 * Really not support yet... 3581 */ 3582 3583 #ifdef __ARMEB__ 3584 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3585 #endif 3586 3587 #ifndef ARM_HAS_VBAR 3588 if (vector_page == ARM_VECTORS_HIGH) 3589 cpuctrl |= CPU_CONTROL_VECRELOC; 3590 #endif 3591 3592 /* Clear out the cache */ 3593 cpu_idcache_wbinv_all(); 3594 3595 /* Now really make sure they are clean. */ 3596 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 3597 3598 /* Set the control register */ 3599 curcpu()->ci_ctrl = cpuctrl; 3600 cpu_control(0xffffffff, cpuctrl); 3601 3602 /* And again. */ 3603 cpu_idcache_wbinv_all(); 3604 } 3605 #endif /* CPU_SHEEVA */ 3606