1 /* $NetBSD: cpufunc.c,v 1.119 2012/12/28 03:48:00 msaitoh Exp $ */ 2 3 /* 4 * arm7tdmi support code Copyright (c) 2001 John Fremlin 5 * arm8 support code Copyright (c) 1997 ARM Limited 6 * arm8 support code Copyright (c) 1997 Causality Limited 7 * arm9 support code Copyright (C) 2001 ARM Ltd 8 * arm11 support code Copyright (c) 2007 Microsoft 9 * cortexa8 support code Copyright (c) 2008 3am Software Foundry 10 * cortexa8 improvements Copyright (c) Goeran Weinholt 11 * Copyright (c) 1997 Mark Brinicombe. 12 * Copyright (c) 1997 Causality Limited 13 * All rights reserved. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by Causality Limited. 26 * 4. The name of Causality Limited may not be used to endorse or promote 27 * products derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 31 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 33 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 34 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 35 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 36 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * RiscBSD kernel project 43 * 44 * cpufuncs.c 45 * 46 * C functions for supporting CPU / MMU / TLB specific operations. 47 * 48 * Created : 30/01/97 49 */ 50 51 #include <sys/cdefs.h> 52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.119 2012/12/28 03:48:00 msaitoh Exp $"); 53 54 #include "opt_compat_netbsd.h" 55 #include "opt_cpuoptions.h" 56 #include "opt_perfctrs.h" 57 58 #include <sys/types.h> 59 #include <sys/param.h> 60 #include <sys/pmc.h> 61 #include <sys/systm.h> 62 #include <machine/cpu.h> 63 #include <machine/bootconfig.h> 64 #include <arch/arm/arm/disassem.h> 65 66 #include <uvm/uvm.h> 67 68 #include <arm/cpuconf.h> 69 #include <arm/cpufunc.h> 70 71 #ifdef CPU_XSCALE_80200 72 #include <arm/xscale/i80200reg.h> 73 #include <arm/xscale/i80200var.h> 74 #endif 75 76 #ifdef CPU_XSCALE_80321 77 #include <arm/xscale/i80321reg.h> 78 #include <arm/xscale/i80321var.h> 79 #endif 80 81 #ifdef CPU_XSCALE_IXP425 82 #include <arm/xscale/ixp425reg.h> 83 #include <arm/xscale/ixp425var.h> 84 #endif 85 86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) 87 #include <arm/xscale/xscalereg.h> 88 #endif 89 90 #if defined(PERFCTRS) 91 struct arm_pmc_funcs *arm_pmc; 92 #endif 93 94 /* PRIMARY CACHE VARIABLES */ 95 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 96 u_int arm_cache_prefer_mask; 97 #endif 98 struct arm_cache_info arm_pcache; 99 struct arm_cache_info arm_scache; 100 101 u_int arm_dcache_align; 102 u_int arm_dcache_align_mask; 103 104 /* 1 == use cpu_sleep(), 0 == don't */ 105 int cpu_do_powersave; 106 107 #ifdef CPU_ARM2 108 struct cpu_functions arm2_cpufuncs = { 109 /* CPU functions */ 110 111 .cf_id = arm2_id, 112 .cf_cpwait = cpufunc_nullop, 113 114 /* MMU functions */ 115 116 .cf_control = (void *)cpufunc_nullop, 117 118 /* TLB functions */ 119 120 .cf_tlb_flushID = cpufunc_nullop, 121 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 122 .cf_tlb_flushI = cpufunc_nullop, 123 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 124 .cf_tlb_flushD = cpufunc_nullop, 125 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 126 127 /* Cache operations */ 128 129 .cf_icache_sync_all = cpufunc_nullop, 130 .cf_icache_sync_range = (void *) cpufunc_nullop, 131 132 .cf_dcache_wbinv_all = arm3_cache_flush, 133 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 134 .cf_dcache_inv_range = (void *)cpufunc_nullop, 135 .cf_dcache_wb_range = (void *)cpufunc_nullop, 136 137 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 138 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 139 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 140 141 .cf_idcache_wbinv_all = cpufunc_nullop, 142 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 143 144 /* Other functions */ 145 146 .cf_flush_prefetchbuf = cpufunc_nullop, 147 .cf_drain_writebuf = cpufunc_nullop, 148 .cf_flush_brnchtgt_C = cpufunc_nullop, 149 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 150 151 .cf_sleep = (void *)cpufunc_nullop, 152 153 /* Soft functions */ 154 155 .cf_dataabt_fixup = early_abort_fixup, 156 .cf_prefetchabt_fixup = cpufunc_null_fixup, 157 158 .cf_setup = (void *)cpufunc_nullop 159 160 }; 161 #endif /* CPU_ARM2 */ 162 163 #ifdef CPU_ARM250 164 struct cpu_functions arm250_cpufuncs = { 165 /* CPU functions */ 166 167 .cf_id = arm250_id, 168 .cf_cpwait = cpufunc_nullop, 169 170 /* MMU functions */ 171 172 .cf_control = (void *)cpufunc_nullop, 173 174 /* TLB functions */ 175 176 .cf_tlb_flushID = cpufunc_nullop, 177 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 178 .cf_tlb_flushI = cpufunc_nullop, 179 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 180 .cf_tlb_flushD = cpufunc_nullop, 181 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 182 183 /* Cache operations */ 184 185 .cf_icache_sync_all = cpufunc_nullop, 186 .cf_icache_sync_range = (void *) cpufunc_nullop, 187 188 .cf_dcache_wbinv_all = arm3_cache_flush, 189 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 190 .cf_dcache_inv_range = (void *)cpufunc_nullop, 191 .cf_dcache_wb_range = (void *)cpufunc_nullop, 192 193 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 194 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 195 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 196 197 .cf_idcache_wbinv_all = cpufunc_nullop, 198 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 199 200 /* Other functions */ 201 202 .cf_flush_prefetchbuf = cpufunc_nullop, 203 .cf_drain_writebuf = cpufunc_nullop, 204 .cf_flush_brnchtgt_C = cpufunc_nullop, 205 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 206 207 .cf_sleep = (void *)cpufunc_nullop, 208 209 /* Soft functions */ 210 211 .cf_dataabt_fixup = early_abort_fixup, 212 .cf_prefetchabt_fixup = cpufunc_null_fixup, 213 214 .cf_setup = (void *)cpufunc_nullop 215 216 }; 217 #endif /* CPU_ARM250 */ 218 219 #ifdef CPU_ARM3 220 struct cpu_functions arm3_cpufuncs = { 221 /* CPU functions */ 222 223 .cf_id = cpufunc_id, 224 .cf_cpwait = cpufunc_nullop, 225 226 /* MMU functions */ 227 228 .cf_control = arm3_control, 229 230 /* TLB functions */ 231 232 .cf_tlb_flushID = cpufunc_nullop, 233 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 234 .cf_tlb_flushI = cpufunc_nullop, 235 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 236 .cf_tlb_flushD = cpufunc_nullop, 237 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 238 239 /* Cache operations */ 240 241 .cf_icache_sync_all = cpufunc_nullop, 242 .cf_icache_sync_range = (void *) cpufunc_nullop, 243 244 .cf_dcache_wbinv_all = arm3_cache_flush, 245 .cf_dcache_wbinv_range = (void *)arm3_cache_flush, 246 .cf_dcache_inv_range = (void *)arm3_cache_flush, 247 .cf_dcache_wb_range = (void *)cpufunc_nullop, 248 249 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 250 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 251 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 252 253 .cf_idcache_wbinv_all = arm3_cache_flush, 254 .cf_idcache_wbinv_range = (void *)arm3_cache_flush, 255 256 /* Other functions */ 257 258 .cf_flush_prefetchbuf = cpufunc_nullop, 259 .cf_drain_writebuf = cpufunc_nullop, 260 .cf_flush_brnchtgt_C = cpufunc_nullop, 261 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 262 263 .cf_sleep = (void *)cpufunc_nullop, 264 265 /* Soft functions */ 266 267 .cf_dataabt_fixup = early_abort_fixup, 268 .cf_prefetchabt_fixup = cpufunc_null_fixup, 269 270 .cf_setup = (void *)cpufunc_nullop 271 272 }; 273 #endif /* CPU_ARM3 */ 274 275 #ifdef CPU_ARM6 276 struct cpu_functions arm6_cpufuncs = { 277 /* CPU functions */ 278 279 .cf_id = cpufunc_id, 280 .cf_cpwait = cpufunc_nullop, 281 282 /* MMU functions */ 283 284 .cf_control = cpufunc_control, 285 .cf_domains = cpufunc_domains, 286 .cf_setttb = arm67_setttb, 287 .cf_faultstatus = cpufunc_faultstatus, 288 .cf_faultaddress = cpufunc_faultaddress, 289 290 /* TLB functions */ 291 292 .cf_tlb_flushID = arm67_tlb_flush, 293 .cf_tlb_flushID_SE = arm67_tlb_purge, 294 .cf_tlb_flushI = arm67_tlb_flush, 295 .cf_tlb_flushI_SE = arm67_tlb_purge, 296 .cf_tlb_flushD = arm67_tlb_flush, 297 .cf_tlb_flushD_SE = arm67_tlb_purge, 298 299 /* Cache operations */ 300 301 .cf_icache_sync_all = cpufunc_nullop, 302 .cf_icache_sync_range = (void *) cpufunc_nullop, 303 304 .cf_dcache_wbinv_all = arm67_cache_flush, 305 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 306 .cf_dcache_inv_range = (void *)arm67_cache_flush, 307 .cf_dcache_wb_range = (void *)cpufunc_nullop, 308 309 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 310 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 311 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 312 313 .cf_idcache_wbinv_all = arm67_cache_flush, 314 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 315 316 /* Other functions */ 317 318 .cf_flush_prefetchbuf = cpufunc_nullop, 319 .cf_drain_writebuf = cpufunc_nullop, 320 .cf_flush_brnchtgt_C = cpufunc_nullop, 321 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 322 323 .cf_sleep = (void *)cpufunc_nullop, 324 325 /* Soft functions */ 326 327 #ifdef ARM6_LATE_ABORT 328 .cf_dataabt_fixup = late_abort_fixup, 329 #else 330 .cf_dataabt_fixup = early_abort_fixup, 331 #endif 332 .cf_prefetchabt_fixup = cpufunc_null_fixup, 333 334 .cf_context_switch = arm67_context_switch, 335 336 .cf_setup = arm6_setup 337 338 }; 339 #endif /* CPU_ARM6 */ 340 341 #ifdef CPU_ARM7 342 struct cpu_functions arm7_cpufuncs = { 343 /* CPU functions */ 344 345 .cf_id = cpufunc_id, 346 .cf_cpwait = cpufunc_nullop, 347 348 /* MMU functions */ 349 350 .cf_control = cpufunc_control, 351 .cf_domains = cpufunc_domains, 352 .cf_setttb = arm67_setttb, 353 .cf_faultstatus = cpufunc_faultstatus, 354 .cf_faultaddress = cpufunc_faultaddress, 355 356 /* TLB functions */ 357 358 .cf_tlb_flushID = arm67_tlb_flush, 359 .cf_tlb_flushID_SE = arm67_tlb_purge, 360 .cf_tlb_flushI = arm67_tlb_flush, 361 .cf_tlb_flushI_SE = arm67_tlb_purge, 362 .cf_tlb_flushD = arm67_tlb_flush, 363 .cf_tlb_flushD_SE = arm67_tlb_purge, 364 365 /* Cache operations */ 366 367 .cf_icache_sync_all = cpufunc_nullop, 368 .cf_icache_sync_range = (void *)cpufunc_nullop, 369 370 .cf_dcache_wbinv_all = arm67_cache_flush, 371 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 372 .cf_dcache_inv_range = (void *)arm67_cache_flush, 373 .cf_dcache_wb_range = (void *)cpufunc_nullop, 374 375 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 376 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 377 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 378 379 .cf_idcache_wbinv_all = arm67_cache_flush, 380 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 381 382 /* Other functions */ 383 384 .cf_flush_prefetchbuf = cpufunc_nullop, 385 .cf_drain_writebuf = cpufunc_nullop, 386 .cf_flush_brnchtgt_C = cpufunc_nullop, 387 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 388 389 .cf_sleep = (void *)cpufunc_nullop, 390 391 /* Soft functions */ 392 393 .cf_dataabt_fixup = late_abort_fixup, 394 .cf_prefetchabt_fixup = cpufunc_null_fixup, 395 396 .cf_context_switch = arm67_context_switch, 397 398 .cf_setup = arm7_setup 399 400 }; 401 #endif /* CPU_ARM7 */ 402 403 #ifdef CPU_ARM7TDMI 404 struct cpu_functions arm7tdmi_cpufuncs = { 405 /* CPU functions */ 406 407 .cf_id = cpufunc_id, 408 .cf_cpwait = cpufunc_nullop, 409 410 /* MMU functions */ 411 412 .cf_control = cpufunc_control, 413 .cf_domains = cpufunc_domains, 414 .cf_setttb = arm7tdmi_setttb, 415 .cf_faultstatus = cpufunc_faultstatus, 416 .cf_faultaddress = cpufunc_faultaddress, 417 418 /* TLB functions */ 419 420 .cf_tlb_flushID = arm7tdmi_tlb_flushID, 421 .cf_tlb_flushID_SE = arm7tdmi_tlb_flushID_SE, 422 .cf_tlb_flushI = arm7tdmi_tlb_flushID, 423 .cf_tlb_flushI_SE = arm7tdmi_tlb_flushID_SE, 424 .cf_tlb_flushD = arm7tdmi_tlb_flushID, 425 .cf_tlb_flushD_SE = arm7tdmi_tlb_flushID_SE, 426 427 /* Cache operations */ 428 429 .cf_icache_sync_all = cpufunc_nullop, 430 .cf_icache_sync_range = (void *)cpufunc_nullop, 431 432 .cf_dcache_wbinv_all = arm7tdmi_cache_flushID, 433 .cf_dcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 434 .cf_dcache_inv_range = (void *)arm7tdmi_cache_flushID, 435 .cf_dcache_wb_range = (void *)cpufunc_nullop, 436 437 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 438 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 439 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 440 441 .cf_idcache_wbinv_all = arm7tdmi_cache_flushID, 442 .cf_idcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 443 444 /* Other functions */ 445 446 .cf_flush_prefetchbuf = cpufunc_nullop, 447 .cf_drain_writebuf = cpufunc_nullop, 448 .cf_flush_brnchtgt_C = cpufunc_nullop, 449 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 450 451 .cf_sleep = (void *)cpufunc_nullop, 452 453 /* Soft functions */ 454 455 .cf_dataabt_fixup = late_abort_fixup, 456 .cf_prefetchabt_fixup = cpufunc_null_fixup, 457 458 .cf_context_switch = arm7tdmi_context_switch, 459 460 .cf_setup = arm7tdmi_setup 461 462 }; 463 #endif /* CPU_ARM7TDMI */ 464 465 #ifdef CPU_ARM8 466 struct cpu_functions arm8_cpufuncs = { 467 /* CPU functions */ 468 469 .cf_id = cpufunc_id, 470 .cf_cpwait = cpufunc_nullop, 471 472 /* MMU functions */ 473 474 .cf_control = cpufunc_control, 475 .cf_domains = cpufunc_domains, 476 .cf_setttb = arm8_setttb, 477 .cf_faultstatus = cpufunc_faultstatus, 478 .cf_faultaddress = cpufunc_faultaddress, 479 480 /* TLB functions */ 481 482 .cf_tlb_flushID = arm8_tlb_flushID, 483 .cf_tlb_flushID_SE = arm8_tlb_flushID_SE, 484 .cf_tlb_flushI = arm8_tlb_flushID, 485 .cf_tlb_flushI_SE = arm8_tlb_flushID_SE, 486 .cf_tlb_flushD = arm8_tlb_flushID, 487 .cf_tlb_flushD_SE = arm8_tlb_flushID_SE, 488 489 /* Cache operations */ 490 491 .cf_icache_sync_all = cpufunc_nullop, 492 .cf_icache_sync_range = (void *)cpufunc_nullop, 493 494 .cf_dcache_wbinv_all = arm8_cache_purgeID, 495 .cf_dcache_wbinv_range = (void *)arm8_cache_purgeID, 496 /*XXX*/ .cf_dcache_inv_range = (void *)arm8_cache_purgeID, 497 .cf_dcache_wb_range = (void *)arm8_cache_cleanID, 498 499 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 500 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 501 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 502 503 .cf_idcache_wbinv_all = arm8_cache_purgeID, 504 .cf_idcache_wbinv_range = (void *)arm8_cache_purgeID, 505 506 /* Other functions */ 507 508 .cf_flush_prefetchbuf = cpufunc_nullop, 509 .cf_drain_writebuf = cpufunc_nullop, 510 .cf_flush_brnchtgt_C = cpufunc_nullop, 511 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 512 513 .cf_sleep = (void *)cpufunc_nullop, 514 515 /* Soft functions */ 516 517 .cf_dataabt_fixup = cpufunc_null_fixup, 518 .cf_prefetchabt_fixup = cpufunc_null_fixup, 519 520 .cf_context_switch = arm8_context_switch, 521 522 .cf_setup = arm8_setup 523 }; 524 #endif /* CPU_ARM8 */ 525 526 #ifdef CPU_ARM9 527 struct cpu_functions arm9_cpufuncs = { 528 /* CPU functions */ 529 530 .cf_id = cpufunc_id, 531 .cf_cpwait = cpufunc_nullop, 532 533 /* MMU functions */ 534 535 .cf_control = cpufunc_control, 536 .cf_domains = cpufunc_domains, 537 .cf_setttb = arm9_setttb, 538 .cf_faultstatus = cpufunc_faultstatus, 539 .cf_faultaddress = cpufunc_faultaddress, 540 541 /* TLB functions */ 542 543 .cf_tlb_flushID = armv4_tlb_flushID, 544 .cf_tlb_flushID_SE = arm9_tlb_flushID_SE, 545 .cf_tlb_flushI = armv4_tlb_flushI, 546 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 547 .cf_tlb_flushD = armv4_tlb_flushD, 548 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 549 550 /* Cache operations */ 551 552 .cf_icache_sync_all = arm9_icache_sync_all, 553 .cf_icache_sync_range = arm9_icache_sync_range, 554 555 .cf_dcache_wbinv_all = arm9_dcache_wbinv_all, 556 .cf_dcache_wbinv_range = arm9_dcache_wbinv_range, 557 /*XXX*/ .cf_dcache_inv_range = arm9_dcache_wbinv_range, 558 .cf_dcache_wb_range = arm9_dcache_wb_range, 559 560 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 561 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 562 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 563 564 .cf_idcache_wbinv_all = arm9_idcache_wbinv_all, 565 .cf_idcache_wbinv_range = arm9_idcache_wbinv_range, 566 567 /* Other functions */ 568 569 .cf_flush_prefetchbuf = cpufunc_nullop, 570 .cf_drain_writebuf = armv4_drain_writebuf, 571 .cf_flush_brnchtgt_C = cpufunc_nullop, 572 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 573 574 .cf_sleep = (void *)cpufunc_nullop, 575 576 /* Soft functions */ 577 578 .cf_dataabt_fixup = cpufunc_null_fixup, 579 .cf_prefetchabt_fixup = cpufunc_null_fixup, 580 581 .cf_context_switch = arm9_context_switch, 582 583 .cf_setup = arm9_setup 584 585 }; 586 #endif /* CPU_ARM9 */ 587 588 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 589 struct cpu_functions armv5_ec_cpufuncs = { 590 /* CPU functions */ 591 592 .cf_id = cpufunc_id, 593 .cf_cpwait = cpufunc_nullop, 594 595 /* MMU functions */ 596 597 .cf_control = cpufunc_control, 598 .cf_domains = cpufunc_domains, 599 .cf_setttb = armv5_ec_setttb, 600 .cf_faultstatus = cpufunc_faultstatus, 601 .cf_faultaddress = cpufunc_faultaddress, 602 603 /* TLB functions */ 604 605 .cf_tlb_flushID = armv4_tlb_flushID, 606 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 607 .cf_tlb_flushI = armv4_tlb_flushI, 608 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 609 .cf_tlb_flushD = armv4_tlb_flushD, 610 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 611 612 /* Cache operations */ 613 614 .cf_icache_sync_all = armv5_ec_icache_sync_all, 615 .cf_icache_sync_range = armv5_ec_icache_sync_range, 616 617 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all, 618 .cf_dcache_wbinv_range = armv5_ec_dcache_wbinv_range, 619 /*XXX*/ .cf_dcache_inv_range = armv5_ec_dcache_wbinv_range, 620 .cf_dcache_wb_range = armv5_ec_dcache_wb_range, 621 622 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 623 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 624 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 625 626 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all, 627 .cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range, 628 629 /* Other functions */ 630 631 .cf_flush_prefetchbuf = cpufunc_nullop, 632 .cf_drain_writebuf = armv4_drain_writebuf, 633 .cf_flush_brnchtgt_C = cpufunc_nullop, 634 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 635 636 .cf_sleep = (void *)cpufunc_nullop, 637 638 /* Soft functions */ 639 640 .cf_dataabt_fixup = cpufunc_null_fixup, 641 .cf_prefetchabt_fixup = cpufunc_null_fixup, 642 643 .cf_context_switch = arm10_context_switch, 644 645 .cf_setup = arm10_setup 646 647 }; 648 #endif /* CPU_ARM9E || CPU_ARM10 */ 649 650 #ifdef CPU_ARM10 651 struct cpu_functions arm10_cpufuncs = { 652 /* CPU functions */ 653 654 .cf_id = cpufunc_id, 655 .cf_cpwait = cpufunc_nullop, 656 657 /* MMU functions */ 658 659 .cf_control = cpufunc_control, 660 .cf_domains = cpufunc_domains, 661 .cf_setttb = armv5_setttb, 662 .cf_faultstatus = cpufunc_faultstatus, 663 .cf_faultaddress = cpufunc_faultaddress, 664 665 /* TLB functions */ 666 667 .cf_tlb_flushID = armv4_tlb_flushID, 668 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 669 .cf_tlb_flushI = armv4_tlb_flushI, 670 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 671 .cf_tlb_flushD = armv4_tlb_flushD, 672 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 673 674 /* Cache operations */ 675 676 .cf_icache_sync_all = armv5_icache_sync_all, 677 .cf_icache_sync_range = armv5_icache_sync_range, 678 679 .cf_dcache_wbinv_all = armv5_dcache_wbinv_all, 680 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range, 681 /*XXX*/ .cf_dcache_inv_range = armv5_dcache_wbinv_range, 682 .cf_dcache_wb_range = armv5_dcache_wb_range, 683 684 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 685 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 686 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 687 688 .cf_idcache_wbinv_all = armv5_idcache_wbinv_all, 689 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range, 690 691 /* Other functions */ 692 693 .cf_flush_prefetchbuf = cpufunc_nullop, 694 .cf_drain_writebuf = armv4_drain_writebuf, 695 .cf_flush_brnchtgt_C = cpufunc_nullop, 696 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 697 698 .cf_sleep = (void *)cpufunc_nullop, 699 700 /* Soft functions */ 701 702 .cf_dataabt_fixup = cpufunc_null_fixup, 703 .cf_prefetchabt_fixup = cpufunc_null_fixup, 704 705 .cf_context_switch = arm10_context_switch, 706 707 .cf_setup = arm10_setup 708 709 }; 710 #endif /* CPU_ARM10 */ 711 712 #ifdef CPU_ARM11 713 struct cpu_functions arm11_cpufuncs = { 714 /* CPU functions */ 715 716 .cf_id = cpufunc_id, 717 .cf_cpwait = cpufunc_nullop, 718 719 /* MMU functions */ 720 721 .cf_control = cpufunc_control, 722 .cf_domains = cpufunc_domains, 723 .cf_setttb = arm11_setttb, 724 .cf_faultstatus = cpufunc_faultstatus, 725 .cf_faultaddress = cpufunc_faultaddress, 726 727 /* TLB functions */ 728 729 .cf_tlb_flushID = arm11_tlb_flushID, 730 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 731 .cf_tlb_flushI = arm11_tlb_flushI, 732 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 733 .cf_tlb_flushD = arm11_tlb_flushD, 734 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 735 736 /* Cache operations */ 737 738 .cf_icache_sync_all = armv6_icache_sync_all, 739 .cf_icache_sync_range = armv6_icache_sync_range, 740 741 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all, 742 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 743 .cf_dcache_inv_range = armv6_dcache_inv_range, 744 .cf_dcache_wb_range = armv6_dcache_wb_range, 745 746 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 747 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 748 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 749 750 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all, 751 .cf_idcache_wbinv_range = armv6_idcache_wbinv_range, 752 753 /* Other functions */ 754 755 .cf_flush_prefetchbuf = cpufunc_nullop, 756 .cf_drain_writebuf = arm11_drain_writebuf, 757 .cf_flush_brnchtgt_C = cpufunc_nullop, 758 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 759 760 .cf_sleep = arm11_sleep, 761 762 /* Soft functions */ 763 764 .cf_dataabt_fixup = cpufunc_null_fixup, 765 .cf_prefetchabt_fixup = cpufunc_null_fixup, 766 767 .cf_context_switch = arm11_context_switch, 768 769 .cf_setup = arm11_setup 770 771 }; 772 #endif /* CPU_ARM11 */ 773 774 #ifdef CPU_ARM1136 775 struct cpu_functions arm1136_cpufuncs = { 776 /* CPU functions */ 777 778 .cf_id = cpufunc_id, 779 .cf_cpwait = cpufunc_nullop, 780 781 /* MMU functions */ 782 783 .cf_control = cpufunc_control, 784 .cf_domains = cpufunc_domains, 785 .cf_setttb = arm11x6_setttb, 786 .cf_faultstatus = cpufunc_faultstatus, 787 .cf_faultaddress = cpufunc_faultaddress, 788 789 /* TLB functions */ 790 791 .cf_tlb_flushID = arm11_tlb_flushID, 792 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 793 .cf_tlb_flushI = arm11_tlb_flushI, 794 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 795 .cf_tlb_flushD = arm11_tlb_flushD, 796 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 797 798 /* Cache operations */ 799 800 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 411920 */ 801 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371025 */ 802 803 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 411920 */ 804 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 805 .cf_dcache_inv_range = armv6_dcache_inv_range, 806 .cf_dcache_wb_range = armv6_dcache_wb_range, 807 808 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 809 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 810 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 811 812 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 411920 */ 813 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371025 */ 814 815 /* Other functions */ 816 817 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf, 818 .cf_drain_writebuf = arm11_drain_writebuf, 819 .cf_flush_brnchtgt_C = cpufunc_nullop, 820 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 821 822 .cf_sleep = arm11_sleep, /* arm1136_sleep_rev0 */ 823 824 /* Soft functions */ 825 826 .cf_dataabt_fixup = cpufunc_null_fixup, 827 .cf_prefetchabt_fixup = cpufunc_null_fixup, 828 829 .cf_context_switch = arm11_context_switch, 830 831 .cf_setup = arm11x6_setup 832 833 }; 834 #endif /* CPU_ARM1136 */ 835 836 #ifdef CPU_ARM1176 837 struct cpu_functions arm1176_cpufuncs = { 838 /* CPU functions */ 839 840 .cf_id = cpufunc_id, 841 .cf_cpwait = cpufunc_nullop, 842 843 /* MMU functions */ 844 845 .cf_control = cpufunc_control, 846 .cf_domains = cpufunc_domains, 847 .cf_setttb = arm11x6_setttb, 848 .cf_faultstatus = cpufunc_faultstatus, 849 .cf_faultaddress = cpufunc_faultaddress, 850 851 /* TLB functions */ 852 853 .cf_tlb_flushID = arm11_tlb_flushID, 854 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 855 .cf_tlb_flushI = arm11_tlb_flushI, 856 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 857 .cf_tlb_flushD = arm11_tlb_flushD, 858 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 859 860 /* Cache operations */ 861 862 .cf_icache_sync_all = arm11x6_icache_sync_all, /* 415045 */ 863 .cf_icache_sync_range = arm11x6_icache_sync_range, /* 371367 */ 864 865 .cf_dcache_wbinv_all = arm11x6_dcache_wbinv_all, /* 415045 */ 866 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 867 .cf_dcache_inv_range = armv6_dcache_inv_range, 868 .cf_dcache_wb_range = armv6_dcache_wb_range, 869 870 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 871 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 872 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 873 874 .cf_idcache_wbinv_all = arm11x6_idcache_wbinv_all, /* 415045 */ 875 .cf_idcache_wbinv_range = arm11x6_idcache_wbinv_range, /* 371367 */ 876 877 /* Other functions */ 878 879 .cf_flush_prefetchbuf = arm11x6_flush_prefetchbuf, 880 .cf_drain_writebuf = arm11_drain_writebuf, 881 .cf_flush_brnchtgt_C = cpufunc_nullop, 882 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 883 884 .cf_sleep = arm11x6_sleep, /* no ref. */ 885 886 /* Soft functions */ 887 888 .cf_dataabt_fixup = cpufunc_null_fixup, 889 .cf_prefetchabt_fixup = cpufunc_null_fixup, 890 891 .cf_context_switch = arm11_context_switch, 892 893 .cf_setup = arm11x6_setup 894 895 }; 896 #endif /* CPU_ARM1176 */ 897 898 899 #ifdef CPU_ARM11MPCORE 900 struct cpu_functions arm11mpcore_cpufuncs = { 901 /* CPU functions */ 902 903 .cf_id = cpufunc_id, 904 .cf_cpwait = cpufunc_nullop, 905 906 /* MMU functions */ 907 908 .cf_control = cpufunc_control, 909 .cf_domains = cpufunc_domains, 910 .cf_setttb = arm11_setttb, 911 .cf_faultstatus = cpufunc_faultstatus, 912 .cf_faultaddress = cpufunc_faultaddress, 913 914 /* TLB functions */ 915 916 .cf_tlb_flushID = arm11_tlb_flushID, 917 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 918 .cf_tlb_flushI = arm11_tlb_flushI, 919 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 920 .cf_tlb_flushD = arm11_tlb_flushD, 921 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 922 923 /* Cache operations */ 924 925 .cf_icache_sync_all = armv6_icache_sync_all, 926 .cf_icache_sync_range = armv5_icache_sync_range, 927 928 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all, 929 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range, 930 .cf_dcache_inv_range = armv5_dcache_inv_range, 931 .cf_dcache_wb_range = armv5_dcache_wb_range, 932 933 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 934 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 935 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 936 937 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all, 938 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range, 939 940 /* Other functions */ 941 942 .cf_flush_prefetchbuf = cpufunc_nullop, 943 .cf_drain_writebuf = arm11_drain_writebuf, 944 .cf_flush_brnchtgt_C = cpufunc_nullop, 945 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 946 947 .cf_sleep = arm11_sleep, 948 949 /* Soft functions */ 950 951 .cf_dataabt_fixup = cpufunc_null_fixup, 952 .cf_prefetchabt_fixup = cpufunc_null_fixup, 953 954 .cf_context_switch = arm11_context_switch, 955 956 .cf_setup = arm11mpcore_setup 957 958 }; 959 #endif /* CPU_ARM11MPCORE */ 960 961 #ifdef CPU_SA110 962 struct cpu_functions sa110_cpufuncs = { 963 /* CPU functions */ 964 965 .cf_id = cpufunc_id, 966 .cf_cpwait = cpufunc_nullop, 967 968 /* MMU functions */ 969 970 .cf_control = cpufunc_control, 971 .cf_domains = cpufunc_domains, 972 .cf_setttb = sa1_setttb, 973 .cf_faultstatus = cpufunc_faultstatus, 974 .cf_faultaddress = cpufunc_faultaddress, 975 976 /* TLB functions */ 977 978 .cf_tlb_flushID = armv4_tlb_flushID, 979 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 980 .cf_tlb_flushI = armv4_tlb_flushI, 981 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 982 .cf_tlb_flushD = armv4_tlb_flushD, 983 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 984 985 /* Cache operations */ 986 987 .cf_icache_sync_all = sa1_cache_syncI, 988 .cf_icache_sync_range = sa1_cache_syncI_rng, 989 990 .cf_dcache_wbinv_all = sa1_cache_purgeD, 991 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 992 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 993 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 994 995 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 996 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 997 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 998 999 .cf_idcache_wbinv_all = sa1_cache_purgeID, 1000 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 1001 1002 /* Other functions */ 1003 1004 .cf_flush_prefetchbuf = cpufunc_nullop, 1005 .cf_drain_writebuf = armv4_drain_writebuf, 1006 .cf_flush_brnchtgt_C = cpufunc_nullop, 1007 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1008 1009 .cf_sleep = (void *)cpufunc_nullop, 1010 1011 /* Soft functions */ 1012 1013 .cf_dataabt_fixup = cpufunc_null_fixup, 1014 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1015 1016 .cf_context_switch = sa110_context_switch, 1017 1018 .cf_setup = sa110_setup 1019 }; 1020 #endif /* CPU_SA110 */ 1021 1022 #if defined(CPU_SA1100) || defined(CPU_SA1110) 1023 struct cpu_functions sa11x0_cpufuncs = { 1024 /* CPU functions */ 1025 1026 .cf_id = cpufunc_id, 1027 .cf_cpwait = cpufunc_nullop, 1028 1029 /* MMU functions */ 1030 1031 .cf_control = cpufunc_control, 1032 .cf_domains = cpufunc_domains, 1033 .cf_setttb = sa1_setttb, 1034 .cf_faultstatus = cpufunc_faultstatus, 1035 .cf_faultaddress = cpufunc_faultaddress, 1036 1037 /* TLB functions */ 1038 1039 .cf_tlb_flushID = armv4_tlb_flushID, 1040 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 1041 .cf_tlb_flushI = armv4_tlb_flushI, 1042 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 1043 .cf_tlb_flushD = armv4_tlb_flushD, 1044 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1045 1046 /* Cache operations */ 1047 1048 .cf_icache_sync_all = sa1_cache_syncI, 1049 .cf_icache_sync_range = sa1_cache_syncI_rng, 1050 1051 .cf_dcache_wbinv_all = sa1_cache_purgeD, 1052 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 1053 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 1054 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 1055 1056 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1057 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1058 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1059 1060 .cf_idcache_wbinv_all = sa1_cache_purgeID, 1061 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 1062 1063 /* Other functions */ 1064 1065 .cf_flush_prefetchbuf = sa11x0_drain_readbuf, 1066 .cf_drain_writebuf = armv4_drain_writebuf, 1067 .cf_flush_brnchtgt_C = cpufunc_nullop, 1068 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1069 1070 .cf_sleep = sa11x0_cpu_sleep, 1071 1072 /* Soft functions */ 1073 1074 .cf_dataabt_fixup = cpufunc_null_fixup, 1075 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1076 1077 .cf_context_switch = sa11x0_context_switch, 1078 1079 .cf_setup = sa11x0_setup 1080 }; 1081 #endif /* CPU_SA1100 || CPU_SA1110 */ 1082 1083 #if defined(CPU_FA526) 1084 struct cpu_functions fa526_cpufuncs = { 1085 /* CPU functions */ 1086 1087 .cf_id = cpufunc_id, 1088 .cf_cpwait = cpufunc_nullop, 1089 1090 /* MMU functions */ 1091 1092 .cf_control = cpufunc_control, 1093 .cf_domains = cpufunc_domains, 1094 .cf_setttb = fa526_setttb, 1095 .cf_faultstatus = cpufunc_faultstatus, 1096 .cf_faultaddress = cpufunc_faultaddress, 1097 1098 /* TLB functions */ 1099 1100 .cf_tlb_flushID = armv4_tlb_flushID, 1101 .cf_tlb_flushID_SE = fa526_tlb_flushID_SE, 1102 .cf_tlb_flushI = armv4_tlb_flushI, 1103 .cf_tlb_flushI_SE = fa526_tlb_flushI_SE, 1104 .cf_tlb_flushD = armv4_tlb_flushD, 1105 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1106 1107 /* Cache operations */ 1108 1109 .cf_icache_sync_all = fa526_icache_sync_all, 1110 .cf_icache_sync_range = fa526_icache_sync_range, 1111 1112 .cf_dcache_wbinv_all = fa526_dcache_wbinv_all, 1113 .cf_dcache_wbinv_range = fa526_dcache_wbinv_range, 1114 .cf_dcache_inv_range = fa526_dcache_inv_range, 1115 .cf_dcache_wb_range = fa526_dcache_wb_range, 1116 1117 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1118 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1119 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1120 1121 .cf_idcache_wbinv_all = fa526_idcache_wbinv_all, 1122 .cf_idcache_wbinv_range = fa526_idcache_wbinv_range, 1123 1124 /* Other functions */ 1125 1126 .cf_flush_prefetchbuf = fa526_flush_prefetchbuf, 1127 .cf_drain_writebuf = armv4_drain_writebuf, 1128 .cf_flush_brnchtgt_C = cpufunc_nullop, 1129 .cf_flush_brnchtgt_E = fa526_flush_brnchtgt_E, 1130 1131 .cf_sleep = fa526_cpu_sleep, 1132 1133 /* Soft functions */ 1134 1135 .cf_dataabt_fixup = cpufunc_null_fixup, 1136 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1137 1138 .cf_context_switch = fa526_context_switch, 1139 1140 .cf_setup = fa526_setup 1141 }; 1142 #endif /* CPU_FA526 */ 1143 1144 #ifdef CPU_IXP12X0 1145 struct cpu_functions ixp12x0_cpufuncs = { 1146 /* CPU functions */ 1147 1148 .cf_id = cpufunc_id, 1149 .cf_cpwait = cpufunc_nullop, 1150 1151 /* MMU functions */ 1152 1153 .cf_control = cpufunc_control, 1154 .cf_domains = cpufunc_domains, 1155 .cf_setttb = sa1_setttb, 1156 .cf_faultstatus = cpufunc_faultstatus, 1157 .cf_faultaddress = cpufunc_faultaddress, 1158 1159 /* TLB functions */ 1160 1161 .cf_tlb_flushID = armv4_tlb_flushID, 1162 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 1163 .cf_tlb_flushI = armv4_tlb_flushI, 1164 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 1165 .cf_tlb_flushD = armv4_tlb_flushD, 1166 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1167 1168 /* Cache operations */ 1169 1170 .cf_icache_sync_all = sa1_cache_syncI, 1171 .cf_icache_sync_range = sa1_cache_syncI_rng, 1172 1173 .cf_dcache_wbinv_all = sa1_cache_purgeD, 1174 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 1175 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 1176 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 1177 1178 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1179 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1180 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1181 1182 .cf_idcache_wbinv_all = sa1_cache_purgeID, 1183 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 1184 1185 /* Other functions */ 1186 1187 .cf_flush_prefetchbuf = ixp12x0_drain_readbuf, 1188 .cf_drain_writebuf = armv4_drain_writebuf, 1189 .cf_flush_brnchtgt_C = cpufunc_nullop, 1190 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1191 1192 .cf_sleep = (void *)cpufunc_nullop, 1193 1194 /* Soft functions */ 1195 1196 .cf_dataabt_fixup = cpufunc_null_fixup, 1197 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1198 1199 .cf_context_switch = ixp12x0_context_switch, 1200 1201 .cf_setup = ixp12x0_setup 1202 }; 1203 #endif /* CPU_IXP12X0 */ 1204 1205 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1206 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) 1207 struct cpu_functions xscale_cpufuncs = { 1208 /* CPU functions */ 1209 1210 .cf_id = cpufunc_id, 1211 .cf_cpwait = xscale_cpwait, 1212 1213 /* MMU functions */ 1214 1215 .cf_control = xscale_control, 1216 .cf_domains = cpufunc_domains, 1217 .cf_setttb = xscale_setttb, 1218 .cf_faultstatus = cpufunc_faultstatus, 1219 .cf_faultaddress = cpufunc_faultaddress, 1220 1221 /* TLB functions */ 1222 1223 .cf_tlb_flushID = armv4_tlb_flushID, 1224 .cf_tlb_flushID_SE = xscale_tlb_flushID_SE, 1225 .cf_tlb_flushI = armv4_tlb_flushI, 1226 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 1227 .cf_tlb_flushD = armv4_tlb_flushD, 1228 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1229 1230 /* Cache operations */ 1231 1232 .cf_icache_sync_all = xscale_cache_syncI, 1233 .cf_icache_sync_range = xscale_cache_syncI_rng, 1234 1235 .cf_dcache_wbinv_all = xscale_cache_purgeD, 1236 .cf_dcache_wbinv_range = xscale_cache_purgeD_rng, 1237 .cf_dcache_inv_range = xscale_cache_flushD_rng, 1238 .cf_dcache_wb_range = xscale_cache_cleanD_rng, 1239 1240 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1241 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1242 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1243 1244 .cf_idcache_wbinv_all = xscale_cache_purgeID, 1245 .cf_idcache_wbinv_range = xscale_cache_purgeID_rng, 1246 1247 /* Other functions */ 1248 1249 .cf_flush_prefetchbuf = cpufunc_nullop, 1250 .cf_drain_writebuf = armv4_drain_writebuf, 1251 .cf_flush_brnchtgt_C = cpufunc_nullop, 1252 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1253 1254 .cf_sleep = xscale_cpu_sleep, 1255 1256 /* Soft functions */ 1257 1258 .cf_dataabt_fixup = cpufunc_null_fixup, 1259 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1260 1261 .cf_context_switch = xscale_context_switch, 1262 1263 .cf_setup = xscale_setup 1264 }; 1265 #endif 1266 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 1267 1268 #if defined(CPU_CORTEX) 1269 struct cpu_functions cortex_cpufuncs = { 1270 /* CPU functions */ 1271 1272 .cf_id = cpufunc_id, 1273 .cf_cpwait = cpufunc_nullop, 1274 1275 /* MMU functions */ 1276 1277 .cf_control = cpufunc_control, 1278 .cf_domains = cpufunc_domains, 1279 .cf_setttb = armv7_setttb, 1280 .cf_faultstatus = cpufunc_faultstatus, 1281 .cf_faultaddress = cpufunc_faultaddress, 1282 1283 /* TLB functions */ 1284 1285 .cf_tlb_flushID = arm11_tlb_flushID, 1286 .cf_tlb_flushID_SE = armv7_tlb_flushID_SE, 1287 .cf_tlb_flushI = arm11_tlb_flushI, 1288 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 1289 .cf_tlb_flushD = arm11_tlb_flushD, 1290 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 1291 1292 /* Cache operations */ 1293 1294 .cf_icache_sync_all = armv7_icache_sync_all, 1295 .cf_dcache_wbinv_all = armv7_dcache_wbinv_all, 1296 1297 .cf_dcache_inv_range = armv7_dcache_inv_range, 1298 .cf_dcache_wb_range = armv7_dcache_wb_range, 1299 .cf_dcache_wbinv_range = armv7_dcache_wbinv_range, 1300 1301 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1302 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1303 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1304 1305 .cf_icache_sync_range = armv7_icache_sync_range, 1306 .cf_idcache_wbinv_range = armv7_idcache_wbinv_range, 1307 1308 1309 .cf_idcache_wbinv_all = armv7_idcache_wbinv_all, 1310 1311 /* Other functions */ 1312 1313 .cf_flush_prefetchbuf = cpufunc_nullop, 1314 .cf_drain_writebuf = armv7_drain_writebuf, 1315 .cf_flush_brnchtgt_C = cpufunc_nullop, 1316 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1317 1318 .cf_sleep = armv7_cpu_sleep, 1319 1320 /* Soft functions */ 1321 1322 .cf_dataabt_fixup = cpufunc_null_fixup, 1323 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1324 1325 .cf_context_switch = armv7_context_switch, 1326 1327 .cf_setup = armv7_setup 1328 1329 }; 1330 #endif /* CPU_CORTEX */ 1331 1332 #ifdef CPU_SHEEVA 1333 struct cpu_functions sheeva_cpufuncs = { 1334 /* CPU functions */ 1335 1336 .cf_id = cpufunc_id, 1337 .cf_cpwait = cpufunc_nullop, 1338 1339 /* MMU functions */ 1340 1341 .cf_control = cpufunc_control, 1342 .cf_domains = cpufunc_domains, 1343 .cf_setttb = armv5_ec_setttb, 1344 .cf_faultstatus = cpufunc_faultstatus, 1345 .cf_faultaddress = cpufunc_faultaddress, 1346 1347 /* TLB functions */ 1348 1349 .cf_tlb_flushID = armv4_tlb_flushID, 1350 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 1351 .cf_tlb_flushI = armv4_tlb_flushI, 1352 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 1353 .cf_tlb_flushD = armv4_tlb_flushD, 1354 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1355 1356 /* Cache operations */ 1357 1358 .cf_icache_sync_all = armv5_ec_icache_sync_all, 1359 .cf_icache_sync_range = armv5_ec_icache_sync_range, 1360 1361 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all, 1362 .cf_dcache_wbinv_range = sheeva_dcache_wbinv_range, 1363 .cf_dcache_inv_range = sheeva_dcache_inv_range, 1364 .cf_dcache_wb_range = sheeva_dcache_wb_range, 1365 1366 .cf_sdcache_wbinv_range = (void *)cpufunc_nullop, 1367 .cf_sdcache_inv_range = (void *)cpufunc_nullop, 1368 .cf_sdcache_wb_range = (void *)cpufunc_nullop, 1369 1370 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all, 1371 .cf_idcache_wbinv_range = sheeva_idcache_wbinv_range, 1372 1373 /* Other functions */ 1374 1375 .cf_flush_prefetchbuf = cpufunc_nullop, 1376 .cf_drain_writebuf = armv4_drain_writebuf, 1377 .cf_flush_brnchtgt_C = cpufunc_nullop, 1378 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1379 1380 .cf_sleep = (void *)sheeva_cpu_sleep, 1381 1382 /* Soft functions */ 1383 1384 .cf_dataabt_fixup = cpufunc_null_fixup, 1385 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1386 1387 .cf_context_switch = arm10_context_switch, 1388 1389 .cf_setup = sheeva_setup 1390 }; 1391 #endif /* CPU_SHEEVA */ 1392 1393 1394 /* 1395 * Global constants also used by locore.s 1396 */ 1397 1398 struct cpu_functions cpufuncs; 1399 u_int cputype; 1400 1401 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \ 1402 defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_ARM11) || \ 1403 defined(CPU_FA526) || \ 1404 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1405 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 1406 defined(CPU_CORTEX) || defined(CPU_SHEEVA) 1407 static void get_cachetype_cp15(void); 1408 1409 /* Additional cache information local to this file. Log2 of some of the 1410 above numbers. */ 1411 static int arm_dcache_log2_nsets; 1412 static int arm_dcache_log2_assoc; 1413 static int arm_dcache_log2_linesize; 1414 1415 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1416 static inline u_int 1417 get_cachesize_cp15(int cssr) 1418 { 1419 u_int csid; 1420 1421 #if (CPU_CORTEX) > 0 1422 __asm volatile(".arch\tarmv7a"); 1423 __asm volatile("mcr p15, 2, %0, c0, c0, 0" :: "r" (cssr)); 1424 __asm volatile("isb"); /* sync to the new cssr */ 1425 #else 1426 __asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr)); 1427 #endif 1428 __asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid)); 1429 return csid; 1430 } 1431 #endif 1432 1433 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1434 static void 1435 get_cacheinfo_clidr(struct arm_cache_info *info, u_int level, u_int clidr) 1436 { 1437 u_int csid; 1438 u_int nsets; 1439 1440 if (clidr & 6) { 1441 csid = get_cachesize_cp15(level << 1); /* select L1 dcache values */ 1442 nsets = CPU_CSID_NUMSETS(csid) + 1; 1443 info->dcache_ways = CPU_CSID_ASSOC(csid) + 1; 1444 info->dcache_line_size = 1U << (CPU_CSID_LEN(csid) + 4); 1445 info->dcache_size = info->dcache_line_size * info->dcache_ways * nsets; 1446 1447 if (level == 0) { 1448 arm_dcache_log2_assoc = CPU_CSID_ASSOC(csid) + 1; 1449 arm_dcache_log2_linesize = CPU_CSID_LEN(csid) + 4; 1450 arm_dcache_log2_nsets = 31 - __builtin_clz(nsets); 1451 } 1452 } 1453 1454 info->cache_unified = (clidr == 4); 1455 1456 if (clidr & 1) { 1457 csid = get_cachesize_cp15((level << 1)|CPU_CSSR_InD); /* select L1 icache values */ 1458 nsets = CPU_CSID_NUMSETS(csid) + 1; 1459 info->icache_ways = CPU_CSID_ASSOC(csid) + 1; 1460 info->icache_line_size = 1U << (CPU_CSID_LEN(csid) + 4); 1461 info->icache_size = info->icache_line_size * info->icache_ways * nsets; 1462 } else { 1463 info->icache_ways = info->dcache_ways; 1464 info->icache_line_size = info->dcache_line_size; 1465 info->icache_size = info->dcache_size; 1466 } 1467 } 1468 #endif /* (ARM_MMU_V6 + ARM_MMU_V7) > 0 */ 1469 1470 static void 1471 get_cachetype_cp15(void) 1472 { 1473 u_int ctype, isize, dsize; 1474 u_int multiplier; 1475 1476 __asm volatile("mrc p15, 0, %0, c0, c0, 1" 1477 : "=r" (ctype)); 1478 1479 /* 1480 * ...and thus spake the ARM ARM: 1481 * 1482 * If an <opcode2> value corresponding to an unimplemented or 1483 * reserved ID register is encountered, the System Control 1484 * processor returns the value of the main ID register. 1485 */ 1486 if (ctype == cpu_id()) 1487 goto out; 1488 1489 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1490 if (CPU_CT_FORMAT(ctype) == 4) { 1491 u_int clidr = armreg_clidr_read(); 1492 1493 arm_cache_prefer_mask = PAGE_SIZE; 1494 arm_pcache.cache_type = CPU_CT_CTYPE_WB14; 1495 1496 get_cacheinfo_clidr(&arm_pcache, 0, clidr & 7); 1497 arm_dcache_align = arm_pcache.dcache_line_size; 1498 clidr >>= 3; 1499 if (clidr & 7) { 1500 get_cacheinfo_clidr(&arm_scache, 1, clidr & 7); 1501 if (arm_scache.dcache_line_size < arm_dcache_align) 1502 arm_dcache_align = arm_scache.dcache_line_size; 1503 } 1504 goto out; 1505 } 1506 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */ 1507 1508 if ((ctype & CPU_CT_S) == 0) 1509 arm_pcache.cache_unified = 1; 1510 1511 /* 1512 * If you want to know how this code works, go read the ARM ARM. 1513 */ 1514 1515 arm_pcache.cache_type = CPU_CT_CTYPE(ctype); 1516 1517 if (arm_pcache.cache_unified == 0) { 1518 isize = CPU_CT_ISIZE(ctype); 1519 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; 1520 arm_pcache.icache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); 1521 if (CPU_CT_xSIZE_ASSOC(isize) == 0) { 1522 if (isize & CPU_CT_xSIZE_M) 1523 arm_pcache.icache_line_size = 0; /* not present */ 1524 else 1525 arm_pcache.icache_ways = 1; 1526 } else { 1527 arm_pcache.icache_ways = multiplier << 1528 (CPU_CT_xSIZE_ASSOC(isize) - 1); 1529 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1530 if (CPU_CT_xSIZE_P & isize) 1531 arm_cache_prefer_mask |= 1532 __BIT(9 + CPU_CT_xSIZE_SIZE(isize) 1533 - CPU_CT_xSIZE_ASSOC(isize)) 1534 - PAGE_SIZE; 1535 #endif 1536 } 1537 arm_pcache.icache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); 1538 } 1539 1540 dsize = CPU_CT_DSIZE(ctype); 1541 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; 1542 arm_pcache.dcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); 1543 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { 1544 if (dsize & CPU_CT_xSIZE_M) 1545 arm_pcache.dcache_line_size = 0; /* not present */ 1546 else 1547 arm_pcache.dcache_ways = 1; 1548 } else { 1549 arm_pcache.dcache_ways = multiplier << 1550 (CPU_CT_xSIZE_ASSOC(dsize) - 1); 1551 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1552 if (CPU_CT_xSIZE_P & dsize) 1553 arm_cache_prefer_mask |= 1554 __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) 1555 - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE; 1556 #endif 1557 } 1558 arm_pcache.dcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); 1559 1560 arm_dcache_align = arm_pcache.dcache_line_size; 1561 1562 arm_dcache_log2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; 1563 arm_dcache_log2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; 1564 arm_dcache_log2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - 1565 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); 1566 1567 out: 1568 arm_dcache_align_mask = arm_dcache_align - 1; 1569 } 1570 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */ 1571 1572 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 1573 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \ 1574 defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0) 1575 /* Cache information for CPUs without cache type registers. */ 1576 struct cachetab { 1577 uint32_t ct_cpuid; 1578 int ct_pcache_type; 1579 int ct_pcache_unified; 1580 int ct_pdcache_size; 1581 int ct_pdcache_line_size; 1582 int ct_pdcache_ways; 1583 int ct_picache_size; 1584 int ct_picache_line_size; 1585 int ct_picache_ways; 1586 }; 1587 1588 struct cachetab cachetab[] = { 1589 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */ 1590 { CPU_ID_ARM2, 0, 1, 0, 0, 0, 0, 0, 0 }, 1591 { CPU_ID_ARM250, 0, 1, 0, 0, 0, 0, 0, 0 }, 1592 { CPU_ID_ARM3, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 1593 { CPU_ID_ARM610, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 1594 { CPU_ID_ARM710, CPU_CT_CTYPE_WT, 1, 8192, 32, 4, 0, 0, 0 }, 1595 { CPU_ID_ARM7500, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 1596 { CPU_ID_ARM710A, CPU_CT_CTYPE_WT, 1, 8192, 16, 4, 0, 0, 0 }, 1597 { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 1598 /* XXX is this type right for SA-1? */ 1599 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, 1600 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 1601 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 1602 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */ 1603 { 0, 0, 0, 0, 0, 0, 0, 0} 1604 }; 1605 1606 static void get_cachetype_table(void); 1607 1608 static void 1609 get_cachetype_table(void) 1610 { 1611 int i; 1612 uint32_t cpuid = cpu_id(); 1613 1614 for (i = 0; cachetab[i].ct_cpuid != 0; i++) { 1615 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) { 1616 arm_pcache.cache_type = cachetab[i].ct_pcache_type; 1617 arm_pcache.cache_unified = cachetab[i].ct_pcache_unified; 1618 arm_pcache.dcache_size = cachetab[i].ct_pdcache_size; 1619 arm_pcache.dcache_line_size = 1620 cachetab[i].ct_pdcache_line_size; 1621 arm_pcache.dcache_ways = cachetab[i].ct_pdcache_ways; 1622 arm_pcache.icache_size = cachetab[i].ct_picache_size; 1623 arm_pcache.icache_line_size = 1624 cachetab[i].ct_picache_line_size; 1625 arm_pcache.icache_ways = cachetab[i].ct_picache_ways; 1626 } 1627 } 1628 1629 arm_dcache_align = arm_pcache.dcache_line_size; 1630 arm_dcache_align_mask = arm_dcache_align - 1; 1631 } 1632 1633 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */ 1634 1635 /* 1636 * Cannot panic here as we may not have a console yet ... 1637 */ 1638 1639 int 1640 set_cpufuncs(void) 1641 { 1642 if (cputype == 0) { 1643 cputype = cpufunc_id(); 1644 cputype &= CPU_ID_CPU_MASK; 1645 } 1646 1647 /* 1648 * NOTE: cpu_do_powersave defaults to off. If we encounter a 1649 * CPU type where we want to use it by default, then we set it. 1650 */ 1651 #ifdef CPU_ARM2 1652 if (cputype == CPU_ID_ARM2) { 1653 cpufuncs = arm2_cpufuncs; 1654 get_cachetype_table(); 1655 return 0; 1656 } 1657 #endif /* CPU_ARM2 */ 1658 #ifdef CPU_ARM250 1659 if (cputype == CPU_ID_ARM250) { 1660 cpufuncs = arm250_cpufuncs; 1661 get_cachetype_table(); 1662 return 0; 1663 } 1664 #endif 1665 #ifdef CPU_ARM3 1666 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1667 (cputype & 0x00000f00) == 0x00000300) { 1668 cpufuncs = arm3_cpufuncs; 1669 get_cachetype_table(); 1670 return 0; 1671 } 1672 #endif /* CPU_ARM3 */ 1673 #ifdef CPU_ARM6 1674 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1675 (cputype & 0x00000f00) == 0x00000600) { 1676 cpufuncs = arm6_cpufuncs; 1677 get_cachetype_table(); 1678 pmap_pte_init_generic(); 1679 return 0; 1680 } 1681 #endif /* CPU_ARM6 */ 1682 #ifdef CPU_ARM7 1683 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1684 CPU_ID_IS7(cputype) && 1685 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) { 1686 cpufuncs = arm7_cpufuncs; 1687 get_cachetype_table(); 1688 pmap_pte_init_generic(); 1689 return 0; 1690 } 1691 #endif /* CPU_ARM7 */ 1692 #ifdef CPU_ARM7TDMI 1693 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1694 CPU_ID_IS7(cputype) && 1695 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) { 1696 cpufuncs = arm7tdmi_cpufuncs; 1697 get_cachetype_cp15(); 1698 pmap_pte_init_generic(); 1699 return 0; 1700 } 1701 #endif 1702 #ifdef CPU_ARM8 1703 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1704 (cputype & 0x0000f000) == 0x00008000) { 1705 cpufuncs = arm8_cpufuncs; 1706 get_cachetype_cp15(); 1707 pmap_pte_init_arm8(); 1708 return 0; 1709 } 1710 #endif /* CPU_ARM8 */ 1711 #ifdef CPU_ARM9 1712 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD || 1713 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) && 1714 (cputype & 0x0000f000) == 0x00009000) { 1715 cpufuncs = arm9_cpufuncs; 1716 get_cachetype_cp15(); 1717 arm9_dcache_sets_inc = 1U << arm_dcache_log2_linesize; 1718 arm9_dcache_sets_max = 1719 (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) - 1720 arm9_dcache_sets_inc; 1721 arm9_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc); 1722 arm9_dcache_index_max = 0U - arm9_dcache_index_inc; 1723 #ifdef ARM9_CACHE_WRITE_THROUGH 1724 pmap_pte_init_arm9(); 1725 #else 1726 pmap_pte_init_generic(); 1727 #endif 1728 return 0; 1729 } 1730 #endif /* CPU_ARM9 */ 1731 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 1732 if (cputype == CPU_ID_ARM926EJS || 1733 cputype == CPU_ID_ARM1026EJS) { 1734 cpufuncs = armv5_ec_cpufuncs; 1735 get_cachetype_cp15(); 1736 pmap_pte_init_generic(); 1737 return 0; 1738 } 1739 #endif /* CPU_ARM9E || CPU_ARM10 */ 1740 #if defined(CPU_SHEEVA) 1741 if (cputype == CPU_ID_MV88SV131 || 1742 cputype == CPU_ID_MV88FR571_VD) { 1743 cpufuncs = sheeva_cpufuncs; 1744 get_cachetype_cp15(); 1745 pmap_pte_init_generic(); 1746 cpu_do_powersave = 1; /* Enable powersave */ 1747 return 0; 1748 } 1749 #endif /* CPU_SHEEVA */ 1750 #ifdef CPU_ARM10 1751 if (/* cputype == CPU_ID_ARM1020T || */ 1752 cputype == CPU_ID_ARM1020E) { 1753 /* 1754 * Select write-through cacheing (this isn't really an 1755 * option on ARM1020T). 1756 */ 1757 cpufuncs = arm10_cpufuncs; 1758 get_cachetype_cp15(); 1759 armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize; 1760 armv5_dcache_sets_max = 1761 (1U << (arm_dcache_log2_linesize + arm_dcache_log2_nsets)) - 1762 armv5_dcache_sets_inc; 1763 armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc); 1764 armv5_dcache_index_max = 0U - armv5_dcache_index_inc; 1765 pmap_pte_init_generic(); 1766 return 0; 1767 } 1768 #endif /* CPU_ARM10 */ 1769 1770 1771 #if defined(CPU_ARM11MPCORE) 1772 if (cputype == CPU_ID_ARM11MPCORE) { 1773 cpufuncs = arm11mpcore_cpufuncs; 1774 get_cachetype_cp15(); 1775 armv5_dcache_sets_inc = 1U << arm_dcache_log2_linesize; 1776 armv5_dcache_sets_max = (1U << (arm_dcache_log2_linesize + 1777 arm_dcache_log2_nsets)) - armv5_dcache_sets_inc; 1778 armv5_dcache_index_inc = 1U << (32 - arm_dcache_log2_assoc); 1779 armv5_dcache_index_max = 0U - armv5_dcache_index_inc; 1780 cpu_do_powersave = 1; /* Enable powersave */ 1781 pmap_pte_init_arm11mpcore(); 1782 if (arm_cache_prefer_mask) 1783 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 1784 1785 return 0; 1786 1787 } 1788 #endif /* CPU_ARM11MPCORE */ 1789 1790 #if defined(CPU_ARM11) 1791 if (cputype == CPU_ID_ARM1136JS || 1792 cputype == CPU_ID_ARM1136JSR1 || 1793 cputype == CPU_ID_ARM1176JZS) { 1794 cpufuncs = arm11_cpufuncs; 1795 #if defined(CPU_ARM1136) 1796 if (cputype == CPU_ID_ARM1136JS && 1797 cputype == CPU_ID_ARM1136JSR1) { 1798 cpufuncs = arm1136_cpufuncs; 1799 if (cputype == CPU_ID_ARM1136JS) 1800 cpufuncs.cf_sleep = arm1136_sleep_rev0; 1801 } 1802 #endif 1803 #if defined(CPU_ARM1176) 1804 if (cputype == CPU_ID_ARM1176JZS) { 1805 cpufuncs = arm1176_cpufuncs; 1806 } 1807 #endif 1808 cpu_do_powersave = 1; /* Enable powersave */ 1809 get_cachetype_cp15(); 1810 #ifdef ARM11_CACHE_WRITE_THROUGH 1811 pmap_pte_init_arm11(); 1812 #else 1813 pmap_pte_init_generic(); 1814 #endif 1815 if (arm_cache_prefer_mask) 1816 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 1817 1818 /* 1819 * Start and reset the PMC Cycle Counter. 1820 */ 1821 armreg_pmcrv6_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C); 1822 return 0; 1823 } 1824 #endif /* CPU_ARM11 */ 1825 #ifdef CPU_SA110 1826 if (cputype == CPU_ID_SA110) { 1827 cpufuncs = sa110_cpufuncs; 1828 get_cachetype_table(); 1829 pmap_pte_init_sa1(); 1830 return 0; 1831 } 1832 #endif /* CPU_SA110 */ 1833 #ifdef CPU_SA1100 1834 if (cputype == CPU_ID_SA1100) { 1835 cpufuncs = sa11x0_cpufuncs; 1836 get_cachetype_table(); 1837 pmap_pte_init_sa1(); 1838 1839 /* Use powersave on this CPU. */ 1840 cpu_do_powersave = 1; 1841 1842 return 0; 1843 } 1844 #endif /* CPU_SA1100 */ 1845 #ifdef CPU_SA1110 1846 if (cputype == CPU_ID_SA1110) { 1847 cpufuncs = sa11x0_cpufuncs; 1848 get_cachetype_table(); 1849 pmap_pte_init_sa1(); 1850 1851 /* Use powersave on this CPU. */ 1852 cpu_do_powersave = 1; 1853 1854 return 0; 1855 } 1856 #endif /* CPU_SA1110 */ 1857 #ifdef CPU_FA526 1858 if (cputype == CPU_ID_FA526) { 1859 cpufuncs = fa526_cpufuncs; 1860 get_cachetype_cp15(); 1861 pmap_pte_init_generic(); 1862 1863 /* Use powersave on this CPU. */ 1864 cpu_do_powersave = 1; 1865 1866 return 0; 1867 } 1868 #endif /* CPU_FA526 */ 1869 #ifdef CPU_IXP12X0 1870 if (cputype == CPU_ID_IXP1200) { 1871 cpufuncs = ixp12x0_cpufuncs; 1872 get_cachetype_table(); 1873 pmap_pte_init_sa1(); 1874 return 0; 1875 } 1876 #endif /* CPU_IXP12X0 */ 1877 #ifdef CPU_XSCALE_80200 1878 if (cputype == CPU_ID_80200) { 1879 int rev = cpufunc_id() & CPU_ID_REVISION_MASK; 1880 1881 i80200_icu_init(); 1882 1883 /* 1884 * Reset the Performance Monitoring Unit to a 1885 * pristine state: 1886 * - CCNT, PMN0, PMN1 reset to 0 1887 * - overflow indications cleared 1888 * - all counters disabled 1889 */ 1890 __asm volatile("mcr p14, 0, %0, c0, c0, 0" 1891 : 1892 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 1893 PMNC_CC_IF)); 1894 1895 #if defined(XSCALE_CCLKCFG) 1896 /* 1897 * Crank CCLKCFG to maximum legal value. 1898 */ 1899 __asm volatile ("mcr p14, 0, %0, c6, c0, 0" 1900 : 1901 : "r" (XSCALE_CCLKCFG)); 1902 #endif 1903 1904 /* 1905 * XXX Disable ECC in the Bus Controller Unit; we 1906 * don't really support it, yet. Clear any pending 1907 * error indications. 1908 */ 1909 __asm volatile("mcr p13, 0, %0, c0, c1, 0" 1910 : 1911 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV)); 1912 1913 cpufuncs = xscale_cpufuncs; 1914 #if defined(PERFCTRS) 1915 xscale_pmu_init(); 1916 #endif 1917 1918 /* 1919 * i80200 errata: Step-A0 and A1 have a bug where 1920 * D$ dirty bits are not cleared on "invalidate by 1921 * address". 1922 * 1923 * Workaround: Clean cache line before invalidating. 1924 */ 1925 if (rev == 0 || rev == 1) 1926 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng; 1927 1928 get_cachetype_cp15(); 1929 pmap_pte_init_xscale(); 1930 return 0; 1931 } 1932 #endif /* CPU_XSCALE_80200 */ 1933 #ifdef CPU_XSCALE_80321 1934 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 || 1935 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 || 1936 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) { 1937 i80321_icu_init(); 1938 1939 /* 1940 * Reset the Performance Monitoring Unit to a 1941 * pristine state: 1942 * - CCNT, PMN0, PMN1 reset to 0 1943 * - overflow indications cleared 1944 * - all counters disabled 1945 */ 1946 __asm volatile("mcr p14, 0, %0, c0, c0, 0" 1947 : 1948 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 1949 PMNC_CC_IF)); 1950 1951 cpufuncs = xscale_cpufuncs; 1952 #if defined(PERFCTRS) 1953 xscale_pmu_init(); 1954 #endif 1955 1956 get_cachetype_cp15(); 1957 pmap_pte_init_xscale(); 1958 return 0; 1959 } 1960 #endif /* CPU_XSCALE_80321 */ 1961 #ifdef __CPU_XSCALE_PXA2XX 1962 /* ignore core revision to test PXA2xx CPUs */ 1963 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X || 1964 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 || 1965 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) { 1966 1967 cpufuncs = xscale_cpufuncs; 1968 #if defined(PERFCTRS) 1969 xscale_pmu_init(); 1970 #endif 1971 1972 get_cachetype_cp15(); 1973 pmap_pte_init_xscale(); 1974 1975 /* Use powersave on this CPU. */ 1976 cpu_do_powersave = 1; 1977 1978 return 0; 1979 } 1980 #endif /* __CPU_XSCALE_PXA2XX */ 1981 #ifdef CPU_XSCALE_IXP425 1982 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 || 1983 cputype == CPU_ID_IXP425_266) { 1984 ixp425_icu_init(); 1985 1986 cpufuncs = xscale_cpufuncs; 1987 #if defined(PERFCTRS) 1988 xscale_pmu_init(); 1989 #endif 1990 1991 get_cachetype_cp15(); 1992 pmap_pte_init_xscale(); 1993 1994 return 0; 1995 } 1996 #endif /* CPU_XSCALE_IXP425 */ 1997 #if defined(CPU_CORTEX) 1998 if (CPU_ID_CORTEX_P(cputype)) { 1999 cpufuncs = cortex_cpufuncs; 2000 cpu_do_powersave = 1; /* Enable powersave */ 2001 get_cachetype_cp15(); 2002 pmap_pte_init_armv7(); 2003 if (arm_cache_prefer_mask) 2004 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 2005 /* 2006 * Start and reset the PMC Cycle Counter. 2007 */ 2008 armreg_pmcr_write(ARM11_PMCCTL_E | ARM11_PMCCTL_P | ARM11_PMCCTL_C); 2009 armreg_pmcntenset_write(CORTEX_CNTENS_C); 2010 return 0; 2011 } 2012 #endif /* CPU_CORTEX */ 2013 /* 2014 * Bzzzz. And the answer was ... 2015 */ 2016 panic("No support for this CPU type (%08x) in kernel", cputype); 2017 return(ARCHITECTURE_NOT_PRESENT); 2018 } 2019 2020 #ifdef CPU_ARM2 2021 u_int arm2_id(void) 2022 { 2023 2024 return CPU_ID_ARM2; 2025 } 2026 #endif /* CPU_ARM2 */ 2027 2028 #ifdef CPU_ARM250 2029 u_int arm250_id(void) 2030 { 2031 2032 return CPU_ID_ARM250; 2033 } 2034 #endif /* CPU_ARM250 */ 2035 2036 /* 2037 * Fixup routines for data and prefetch aborts. 2038 * 2039 * Several compile time symbols are used 2040 * 2041 * DEBUG_FAULT_CORRECTION - Print debugging information during the 2042 * correction of registers after a fault. 2043 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts 2044 * when defined should use late aborts 2045 */ 2046 2047 2048 /* 2049 * Null abort fixup routine. 2050 * For use when no fixup is required. 2051 */ 2052 int 2053 cpufunc_null_fixup(void *arg) 2054 { 2055 return(ABORT_FIXUP_OK); 2056 } 2057 2058 2059 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 2060 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) 2061 2062 #ifdef DEBUG_FAULT_CORRECTION 2063 #define DFC_PRINTF(x) printf x 2064 #define DFC_DISASSEMBLE(x) disassemble(x) 2065 #else 2066 #define DFC_PRINTF(x) /* nothing */ 2067 #define DFC_DISASSEMBLE(x) /* nothing */ 2068 #endif 2069 2070 /* 2071 * "Early" data abort fixup. 2072 * 2073 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used 2074 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI]. 2075 * 2076 * In early aborts, we may have to fix up LDM, STM, LDC and STC. 2077 */ 2078 int 2079 early_abort_fixup(void *arg) 2080 { 2081 trapframe_t *frame = arg; 2082 u_int fault_pc; 2083 u_int fault_instruction; 2084 int saved_lr = 0; 2085 2086 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2087 2088 /* Ok an abort in SVC mode */ 2089 2090 /* 2091 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2092 * as the fault happened in svc mode but we need it in the 2093 * usr slot so we can treat the registers as an array of ints 2094 * during fixing. 2095 * NOTE: This PC is in the position but writeback is not 2096 * allowed on r15. 2097 * Doing it like this is more efficient than trapping this 2098 * case in all possible locations in the following fixup code. 2099 */ 2100 2101 saved_lr = frame->tf_usr_lr; 2102 frame->tf_usr_lr = frame->tf_svc_lr; 2103 2104 /* 2105 * Note the trapframe does not have the SVC r13 so a fault 2106 * from an instruction with writeback to r13 in SVC mode is 2107 * not allowed. This should not happen as the kstack is 2108 * always valid. 2109 */ 2110 } 2111 2112 /* Get fault address and status from the CPU */ 2113 2114 fault_pc = frame->tf_pc; 2115 fault_instruction = *((volatile unsigned int *)fault_pc); 2116 2117 /* Decode the fault instruction and fix the registers as needed */ 2118 2119 if ((fault_instruction & 0x0e000000) == 0x08000000) { 2120 int base; 2121 int loop; 2122 int count; 2123 int *registers = &frame->tf_r0; 2124 2125 DFC_PRINTF(("LDM/STM\n")); 2126 DFC_DISASSEMBLE(fault_pc); 2127 if (fault_instruction & (1 << 21)) { 2128 DFC_PRINTF(("This instruction must be corrected\n")); 2129 base = (fault_instruction >> 16) & 0x0f; 2130 if (base == 15) 2131 return ABORT_FIXUP_FAILED; 2132 /* Count registers transferred */ 2133 count = 0; 2134 for (loop = 0; loop < 16; ++loop) { 2135 if (fault_instruction & (1<<loop)) 2136 ++count; 2137 } 2138 DFC_PRINTF(("%d registers used\n", count)); 2139 DFC_PRINTF(("Corrected r%d by %d bytes ", 2140 base, count * 4)); 2141 if (fault_instruction & (1 << 23)) { 2142 DFC_PRINTF(("down\n")); 2143 registers[base] -= count * 4; 2144 } else { 2145 DFC_PRINTF(("up\n")); 2146 registers[base] += count * 4; 2147 } 2148 } 2149 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) { 2150 int base; 2151 int offset; 2152 int *registers = &frame->tf_r0; 2153 2154 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */ 2155 2156 DFC_DISASSEMBLE(fault_pc); 2157 2158 /* Only need to fix registers if write back is turned on */ 2159 2160 if ((fault_instruction & (1 << 21)) != 0) { 2161 base = (fault_instruction >> 16) & 0x0f; 2162 if (base == 13 && 2163 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 2164 return ABORT_FIXUP_FAILED; 2165 if (base == 15) 2166 return ABORT_FIXUP_FAILED; 2167 2168 offset = (fault_instruction & 0xff) << 2; 2169 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2170 if ((fault_instruction & (1 << 23)) != 0) 2171 offset = -offset; 2172 registers[base] += offset; 2173 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2174 } 2175 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) 2176 return ABORT_FIXUP_FAILED; 2177 2178 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2179 2180 /* Ok an abort in SVC mode */ 2181 2182 /* 2183 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2184 * as the fault happened in svc mode but we need it in the 2185 * usr slot so we can treat the registers as an array of ints 2186 * during fixing. 2187 * NOTE: This PC is in the position but writeback is not 2188 * allowed on r15. 2189 * Doing it like this is more efficient than trapping this 2190 * case in all possible locations in the prior fixup code. 2191 */ 2192 2193 frame->tf_svc_lr = frame->tf_usr_lr; 2194 frame->tf_usr_lr = saved_lr; 2195 2196 /* 2197 * Note the trapframe does not have the SVC r13 so a fault 2198 * from an instruction with writeback to r13 in SVC mode is 2199 * not allowed. This should not happen as the kstack is 2200 * always valid. 2201 */ 2202 } 2203 2204 return(ABORT_FIXUP_OK); 2205 } 2206 #endif /* CPU_ARM2/250/3/6/7 */ 2207 2208 2209 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \ 2210 defined(CPU_ARM7TDMI) 2211 /* 2212 * "Late" (base updated) data abort fixup 2213 * 2214 * For ARM6 (in late-abort mode) and ARM7. 2215 * 2216 * In this model, all data-transfer instructions need fixing up. We defer 2217 * LDM, STM, LDC and STC fixup to the early-abort handler. 2218 */ 2219 int 2220 late_abort_fixup(void *arg) 2221 { 2222 trapframe_t *frame = arg; 2223 u_int fault_pc; 2224 u_int fault_instruction; 2225 int saved_lr = 0; 2226 2227 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2228 2229 /* Ok an abort in SVC mode */ 2230 2231 /* 2232 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2233 * as the fault happened in svc mode but we need it in the 2234 * usr slot so we can treat the registers as an array of ints 2235 * during fixing. 2236 * NOTE: This PC is in the position but writeback is not 2237 * allowed on r15. 2238 * Doing it like this is more efficient than trapping this 2239 * case in all possible locations in the following fixup code. 2240 */ 2241 2242 saved_lr = frame->tf_usr_lr; 2243 frame->tf_usr_lr = frame->tf_svc_lr; 2244 2245 /* 2246 * Note the trapframe does not have the SVC r13 so a fault 2247 * from an instruction with writeback to r13 in SVC mode is 2248 * not allowed. This should not happen as the kstack is 2249 * always valid. 2250 */ 2251 } 2252 2253 /* Get fault address and status from the CPU */ 2254 2255 fault_pc = frame->tf_pc; 2256 fault_instruction = *((volatile unsigned int *)fault_pc); 2257 2258 /* Decode the fault instruction and fix the registers as needed */ 2259 2260 /* Was is a swap instruction ? */ 2261 2262 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) { 2263 DFC_DISASSEMBLE(fault_pc); 2264 } else if ((fault_instruction & 0x0c000000) == 0x04000000) { 2265 2266 /* Was is a ldr/str instruction */ 2267 /* This is for late abort only */ 2268 2269 int base; 2270 int offset; 2271 int *registers = &frame->tf_r0; 2272 2273 DFC_DISASSEMBLE(fault_pc); 2274 2275 /* This is for late abort only */ 2276 2277 if ((fault_instruction & (1 << 24)) == 0 2278 || (fault_instruction & (1 << 21)) != 0) { 2279 /* postindexed ldr/str with no writeback */ 2280 2281 base = (fault_instruction >> 16) & 0x0f; 2282 if (base == 13 && 2283 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 2284 return ABORT_FIXUP_FAILED; 2285 if (base == 15) 2286 return ABORT_FIXUP_FAILED; 2287 DFC_PRINTF(("late abt fix: r%d=%08x : ", 2288 base, registers[base])); 2289 if ((fault_instruction & (1 << 25)) == 0) { 2290 /* Immediate offset - easy */ 2291 2292 offset = fault_instruction & 0xfff; 2293 if ((fault_instruction & (1 << 23))) 2294 offset = -offset; 2295 registers[base] += offset; 2296 DFC_PRINTF(("imm=%08x ", offset)); 2297 } else { 2298 /* offset is a shifted register */ 2299 int shift; 2300 2301 offset = fault_instruction & 0x0f; 2302 if (offset == base) 2303 return ABORT_FIXUP_FAILED; 2304 2305 /* 2306 * Register offset - hard we have to 2307 * cope with shifts ! 2308 */ 2309 offset = registers[offset]; 2310 2311 if ((fault_instruction & (1 << 4)) == 0) 2312 /* shift with amount */ 2313 shift = (fault_instruction >> 7) & 0x1f; 2314 else { 2315 /* shift with register */ 2316 if ((fault_instruction & (1 << 7)) != 0) 2317 /* undefined for now so bail out */ 2318 return ABORT_FIXUP_FAILED; 2319 shift = ((fault_instruction >> 8) & 0xf); 2320 if (base == shift) 2321 return ABORT_FIXUP_FAILED; 2322 DFC_PRINTF(("shift reg=%d ", shift)); 2323 shift = registers[shift]; 2324 } 2325 DFC_PRINTF(("shift=%08x ", shift)); 2326 switch (((fault_instruction >> 5) & 0x3)) { 2327 case 0 : /* Logical left */ 2328 offset = (int)(((u_int)offset) << shift); 2329 break; 2330 case 1 : /* Logical Right */ 2331 if (shift == 0) shift = 32; 2332 offset = (int)(((u_int)offset) >> shift); 2333 break; 2334 case 2 : /* Arithmetic Right */ 2335 if (shift == 0) shift = 32; 2336 offset = (int)(((int)offset) >> shift); 2337 break; 2338 case 3 : /* Rotate right (rol or rxx) */ 2339 return ABORT_FIXUP_FAILED; 2340 break; 2341 } 2342 2343 DFC_PRINTF(("abt: fixed LDR/STR with " 2344 "register offset\n")); 2345 if ((fault_instruction & (1 << 23))) 2346 offset = -offset; 2347 DFC_PRINTF(("offset=%08x ", offset)); 2348 registers[base] += offset; 2349 } 2350 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2351 } 2352 } 2353 2354 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2355 2356 /* Ok an abort in SVC mode */ 2357 2358 /* 2359 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2360 * as the fault happened in svc mode but we need it in the 2361 * usr slot so we can treat the registers as an array of ints 2362 * during fixing. 2363 * NOTE: This PC is in the position but writeback is not 2364 * allowed on r15. 2365 * Doing it like this is more efficient than trapping this 2366 * case in all possible locations in the prior fixup code. 2367 */ 2368 2369 frame->tf_svc_lr = frame->tf_usr_lr; 2370 frame->tf_usr_lr = saved_lr; 2371 2372 /* 2373 * Note the trapframe does not have the SVC r13 so a fault 2374 * from an instruction with writeback to r13 in SVC mode is 2375 * not allowed. This should not happen as the kstack is 2376 * always valid. 2377 */ 2378 } 2379 2380 /* 2381 * Now let the early-abort fixup routine have a go, in case it 2382 * was an LDM, STM, LDC or STC that faulted. 2383 */ 2384 2385 return early_abort_fixup(arg); 2386 } 2387 #endif /* CPU_ARM6(LATE)/7/7TDMI */ 2388 2389 /* 2390 * CPU Setup code 2391 */ 2392 2393 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \ 2394 defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \ 2395 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \ 2396 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 2397 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 2398 defined(CPU_ARM10) || defined(CPU_ARM11) || \ 2399 defined(CPU_FA526) || defined(CPU_CORTEX) || defined(CPU_SHEEVA) 2400 2401 #define IGN 0 2402 #define OR 1 2403 #define BIC 2 2404 2405 struct cpu_option { 2406 const char *co_name; 2407 int co_falseop; 2408 int co_trueop; 2409 int co_value; 2410 }; 2411 2412 static u_int parse_cpu_options(char *, struct cpu_option *, u_int); 2413 2414 static u_int 2415 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl) 2416 { 2417 int integer; 2418 2419 if (args == NULL) 2420 return(cpuctrl); 2421 2422 while (optlist->co_name) { 2423 if (get_bootconf_option(args, optlist->co_name, 2424 BOOTOPT_TYPE_BOOLEAN, &integer)) { 2425 if (integer) { 2426 if (optlist->co_trueop == OR) 2427 cpuctrl |= optlist->co_value; 2428 else if (optlist->co_trueop == BIC) 2429 cpuctrl &= ~optlist->co_value; 2430 } else { 2431 if (optlist->co_falseop == OR) 2432 cpuctrl |= optlist->co_value; 2433 else if (optlist->co_falseop == BIC) 2434 cpuctrl &= ~optlist->co_value; 2435 } 2436 } 2437 ++optlist; 2438 } 2439 return(cpuctrl); 2440 } 2441 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */ 2442 2443 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \ 2444 || defined(CPU_ARM8) 2445 struct cpu_option arm678_options[] = { 2446 #ifdef COMPAT_12 2447 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE }, 2448 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 2449 #endif /* COMPAT_12 */ 2450 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2451 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2452 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2453 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2454 { NULL, IGN, IGN, 0 } 2455 }; 2456 2457 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */ 2458 2459 #ifdef CPU_ARM6 2460 struct cpu_option arm6_options[] = { 2461 { "arm6.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2462 { "arm6.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2463 { "arm6.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2464 { "arm6.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2465 { NULL, IGN, IGN, 0 } 2466 }; 2467 2468 void 2469 arm6_setup(char *args) 2470 { 2471 int cpuctrl, cpuctrlmask; 2472 2473 /* Set up default control registers bits */ 2474 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2475 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2476 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2477 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2478 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2479 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2480 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 2481 | CPU_CONTROL_AFLT_ENABLE; 2482 2483 #ifdef ARM6_LATE_ABORT 2484 cpuctrl |= CPU_CONTROL_LABT_ENABLE; 2485 #endif /* ARM6_LATE_ABORT */ 2486 2487 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2488 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2489 #endif 2490 2491 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2492 cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl); 2493 2494 #ifdef __ARMEB__ 2495 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2496 #endif 2497 2498 /* Clear out the cache */ 2499 cpu_idcache_wbinv_all(); 2500 2501 /* Set the control register */ 2502 curcpu()->ci_ctrl = cpuctrl; 2503 cpu_control(0xffffffff, cpuctrl); 2504 } 2505 #endif /* CPU_ARM6 */ 2506 2507 #ifdef CPU_ARM7 2508 struct cpu_option arm7_options[] = { 2509 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2510 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2511 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2512 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2513 #ifdef COMPAT_12 2514 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2515 #endif /* COMPAT_12 */ 2516 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2517 { NULL, IGN, IGN, 0 } 2518 }; 2519 2520 void 2521 arm7_setup(char *args) 2522 { 2523 int cpuctrl, cpuctrlmask; 2524 2525 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2526 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2527 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2528 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2529 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2530 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2531 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE 2532 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 2533 | CPU_CONTROL_AFLT_ENABLE; 2534 2535 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2536 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2537 #endif 2538 2539 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2540 cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl); 2541 2542 #ifdef __ARMEB__ 2543 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2544 #endif 2545 2546 /* Clear out the cache */ 2547 cpu_idcache_wbinv_all(); 2548 2549 /* Set the control register */ 2550 curcpu()->ci_ctrl = cpuctrl; 2551 cpu_control(0xffffffff, cpuctrl); 2552 } 2553 #endif /* CPU_ARM7 */ 2554 2555 #ifdef CPU_ARM7TDMI 2556 struct cpu_option arm7tdmi_options[] = { 2557 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2558 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2559 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2560 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2561 #ifdef COMPAT_12 2562 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2563 #endif /* COMPAT_12 */ 2564 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2565 { NULL, IGN, IGN, 0 } 2566 }; 2567 2568 void 2569 arm7tdmi_setup(char *args) 2570 { 2571 int cpuctrl; 2572 2573 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2574 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2575 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2576 2577 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2578 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl); 2579 2580 #ifdef __ARMEB__ 2581 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2582 #endif 2583 2584 /* Clear out the cache */ 2585 cpu_idcache_wbinv_all(); 2586 2587 /* Set the control register */ 2588 curcpu()->ci_ctrl = cpuctrl; 2589 cpu_control(0xffffffff, cpuctrl); 2590 } 2591 #endif /* CPU_ARM7TDMI */ 2592 2593 #ifdef CPU_ARM8 2594 struct cpu_option arm8_options[] = { 2595 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2596 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2597 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2598 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2599 #ifdef COMPAT_12 2600 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2601 #endif /* COMPAT_12 */ 2602 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2603 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2604 { NULL, IGN, IGN, 0 } 2605 }; 2606 2607 void 2608 arm8_setup(char *args) 2609 { 2610 int integer; 2611 int cpuctrl, cpuctrlmask; 2612 int clocktest; 2613 int setclock = 0; 2614 2615 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2616 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2617 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2618 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2619 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2620 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2621 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE 2622 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE; 2623 2624 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2625 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2626 #endif 2627 2628 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2629 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl); 2630 2631 #ifdef __ARMEB__ 2632 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2633 #endif 2634 2635 /* Get clock configuration */ 2636 clocktest = arm8_clock_config(0, 0) & 0x0f; 2637 2638 /* Special ARM8 clock and test configuration */ 2639 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2640 clocktest = 0; 2641 setclock = 1; 2642 } 2643 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2644 if (integer) 2645 clocktest |= 0x01; 2646 else 2647 clocktest &= ~(0x01); 2648 setclock = 1; 2649 } 2650 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2651 if (integer) 2652 clocktest |= 0x02; 2653 else 2654 clocktest &= ~(0x02); 2655 setclock = 1; 2656 } 2657 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) { 2658 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2; 2659 setclock = 1; 2660 } 2661 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) { 2662 clocktest |= (integer & 7) << 5; 2663 setclock = 1; 2664 } 2665 2666 /* Clear out the cache */ 2667 cpu_idcache_wbinv_all(); 2668 2669 /* Set the control register */ 2670 curcpu()->ci_ctrl = cpuctrl; 2671 cpu_control(0xffffffff, cpuctrl); 2672 2673 /* Set the clock/test register */ 2674 if (setclock) 2675 arm8_clock_config(0x7f, clocktest); 2676 } 2677 #endif /* CPU_ARM8 */ 2678 2679 #ifdef CPU_ARM9 2680 struct cpu_option arm9_options[] = { 2681 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2682 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2683 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2684 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2685 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2686 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2687 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2688 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2689 { NULL, IGN, IGN, 0 } 2690 }; 2691 2692 void 2693 arm9_setup(char *args) 2694 { 2695 int cpuctrl, cpuctrlmask; 2696 2697 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2698 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2699 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2700 | CPU_CONTROL_WBUF_ENABLE; 2701 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2702 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2703 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2704 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2705 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2706 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC 2707 | CPU_CONTROL_ROUNDROBIN; 2708 2709 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2710 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2711 #endif 2712 2713 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl); 2714 2715 #ifdef __ARMEB__ 2716 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2717 #endif 2718 2719 if (vector_page == ARM_VECTORS_HIGH) 2720 cpuctrl |= CPU_CONTROL_VECRELOC; 2721 2722 /* Clear out the cache */ 2723 cpu_idcache_wbinv_all(); 2724 2725 /* Set the control register */ 2726 curcpu()->ci_ctrl = cpuctrl; 2727 cpu_control(cpuctrlmask, cpuctrl); 2728 2729 } 2730 #endif /* CPU_ARM9 */ 2731 2732 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 2733 struct cpu_option arm10_options[] = { 2734 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2735 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2736 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2737 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2738 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2739 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2740 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2741 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2742 { NULL, IGN, IGN, 0 } 2743 }; 2744 2745 void 2746 arm10_setup(char *args) 2747 { 2748 int cpuctrl, cpuctrlmask; 2749 2750 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2751 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2752 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; 2753 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2754 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2755 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2756 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2757 | CPU_CONTROL_BPRD_ENABLE 2758 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2759 2760 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2761 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2762 #endif 2763 2764 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl); 2765 2766 #ifdef __ARMEB__ 2767 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2768 #endif 2769 2770 if (vector_page == ARM_VECTORS_HIGH) 2771 cpuctrl |= CPU_CONTROL_VECRELOC; 2772 2773 /* Clear out the cache */ 2774 cpu_idcache_wbinv_all(); 2775 2776 /* Now really make sure they are clean. */ 2777 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2778 2779 /* Set the control register */ 2780 curcpu()->ci_ctrl = cpuctrl; 2781 cpu_control(0xffffffff, cpuctrl); 2782 2783 /* And again. */ 2784 cpu_idcache_wbinv_all(); 2785 } 2786 #endif /* CPU_ARM9E || CPU_ARM10 */ 2787 2788 #if defined(CPU_ARM11) 2789 struct cpu_option arm11_options[] = { 2790 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2791 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2792 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2793 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2794 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2795 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2796 { "arm11.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2797 { NULL, IGN, IGN, 0 } 2798 }; 2799 2800 void 2801 arm11_setup(char *args) 2802 { 2803 int cpuctrl, cpuctrlmask; 2804 2805 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2806 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2807 /* | CPU_CONTROL_BPRD_ENABLE */; 2808 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2809 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2810 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE 2811 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2812 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2813 2814 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2815 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2816 #endif 2817 2818 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2819 2820 #ifdef __ARMEB__ 2821 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2822 #endif 2823 2824 if (vector_page == ARM_VECTORS_HIGH) 2825 cpuctrl |= CPU_CONTROL_VECRELOC; 2826 2827 /* Clear out the cache */ 2828 cpu_idcache_wbinv_all(); 2829 2830 /* Now really make sure they are clean. */ 2831 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2832 2833 /* Allow detection code to find the VFP if it's fitted. */ 2834 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 2835 2836 /* Set the control register */ 2837 curcpu()->ci_ctrl = cpuctrl; 2838 cpu_control(0xffffffff, cpuctrl); 2839 2840 /* And again. */ 2841 cpu_idcache_wbinv_all(); 2842 } 2843 #endif /* CPU_ARM11 */ 2844 2845 #if defined(CPU_ARM11MPCORE) 2846 2847 void 2848 arm11mpcore_setup(char *args) 2849 { 2850 int cpuctrl, cpuctrlmask; 2851 2852 cpuctrl = CPU_CONTROL_IC_ENABLE 2853 | CPU_CONTROL_DC_ENABLE 2854 | CPU_CONTROL_BPRD_ENABLE ; 2855 cpuctrlmask = CPU_CONTROL_IC_ENABLE 2856 | CPU_CONTROL_DC_ENABLE 2857 | CPU_CONTROL_BPRD_ENABLE 2858 | CPU_CONTROL_AFLT_ENABLE 2859 | CPU_CONTROL_VECRELOC; 2860 2861 #ifdef ARM11MPCORE_MMU_COMPAT 2862 /* XXX: S and R? */ 2863 #endif 2864 2865 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2866 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2867 #endif 2868 2869 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2870 2871 if (vector_page == ARM_VECTORS_HIGH) 2872 cpuctrl |= CPU_CONTROL_VECRELOC; 2873 2874 /* Clear out the cache */ 2875 cpu_idcache_wbinv_all(); 2876 2877 /* Now really make sure they are clean. */ 2878 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2879 2880 /* Allow detection code to find the VFP if it's fitted. */ 2881 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 2882 2883 /* Set the control register */ 2884 curcpu()->ci_ctrl = cpu_control(cpuctrlmask, cpuctrl); 2885 2886 /* And again. */ 2887 cpu_idcache_wbinv_all(); 2888 } 2889 #endif /* CPU_ARM11MPCORE */ 2890 2891 2892 #if defined(CPU_CORTEX) 2893 struct cpu_option armv7_options[] = { 2894 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2895 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2896 { "armv7.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2897 { "armv7.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2898 { "armv7.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2899 { NULL, IGN, IGN, 0} 2900 }; 2901 2902 void 2903 armv7_setup(char *args) 2904 { 2905 int cpuctrl, cpuctrlmask; 2906 2907 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE 2908 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE ; 2909 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2910 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2911 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE 2912 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2913 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2914 2915 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2916 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2917 #endif 2918 2919 cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl); 2920 2921 #ifdef __ARMEB__ 2922 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2923 #endif 2924 2925 if (vector_page == ARM_VECTORS_HIGH) 2926 cpuctrl |= CPU_CONTROL_VECRELOC; 2927 2928 /* Clear out the cache */ 2929 cpu_idcache_wbinv_all(); 2930 2931 /* Set the control register */ 2932 curcpu()->ci_ctrl = cpuctrl; 2933 cpu_control(0xffffffff, cpuctrl); 2934 } 2935 #endif /* CPU_CORTEX */ 2936 2937 2938 #if defined(CPU_ARM1136) || defined(CPU_ARM1176) 2939 void 2940 arm11x6_setup(char *args) 2941 { 2942 int cpuctrl, cpuctrl_wax; 2943 uint32_t auxctrl, auxctrl_wax; 2944 uint32_t tmp, tmp2; 2945 uint32_t sbz=0; 2946 uint32_t cpuid; 2947 2948 cpuid = cpu_id(); 2949 2950 cpuctrl = 2951 CPU_CONTROL_MMU_ENABLE | 2952 CPU_CONTROL_DC_ENABLE | 2953 CPU_CONTROL_WBUF_ENABLE | 2954 CPU_CONTROL_32BP_ENABLE | 2955 CPU_CONTROL_32BD_ENABLE | 2956 CPU_CONTROL_LABT_ENABLE | 2957 CPU_CONTROL_SYST_ENABLE | 2958 CPU_CONTROL_IC_ENABLE; 2959 2960 /* 2961 * "write as existing" bits 2962 * inverse of this is mask 2963 */ 2964 cpuctrl_wax = 2965 (3 << 30) | 2966 (1 << 29) | 2967 (1 << 28) | 2968 (3 << 26) | 2969 (3 << 19) | 2970 (1 << 17); 2971 2972 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2973 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2974 #endif 2975 2976 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2977 2978 #ifdef __ARMEB__ 2979 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2980 #endif 2981 2982 if (vector_page == ARM_VECTORS_HIGH) 2983 cpuctrl |= CPU_CONTROL_VECRELOC; 2984 2985 auxctrl = 0; 2986 auxctrl_wax = ~0; 2987 /* 2988 * This options enables the workaround for the 364296 ARM1136 2989 * r0pX errata (possible cache data corruption with 2990 * hit-under-miss enabled). It sets the undocumented bit 31 in 2991 * the auxiliary control register and the FI bit in the control 2992 * register, thus disabling hit-under-miss without putting the 2993 * processor into full low interrupt latency mode. ARM11MPCore 2994 * is not affected. 2995 */ 2996 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */ 2997 cpuctrl |= CPU_CONTROL_FI_ENABLE; 2998 auxctrl = ARM1136_AUXCTL_PFI; 2999 auxctrl_wax = ~ARM1136_AUXCTL_PFI; 3000 } 3001 3002 /* 3003 * Enable an errata workaround 3004 */ 3005 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1176JZS) { /* ARM1176JZSr0 */ 3006 auxctrl = ARM1176_AUXCTL_PHD; 3007 auxctrl_wax = ~ARM1176_AUXCTL_PHD; 3008 } 3009 3010 /* Clear out the cache */ 3011 cpu_idcache_wbinv_all(); 3012 3013 /* Now really make sure they are clean. */ 3014 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz)); 3015 3016 /* Allow detection code to find the VFP if it's fitted. */ 3017 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 3018 3019 /* Set the control register */ 3020 curcpu()->ci_ctrl = cpuctrl; 3021 cpu_control(~cpuctrl_wax, cpuctrl); 3022 3023 __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t" 3024 "and %1, %0, %2\n\t" 3025 "orr %1, %1, %3\n\t" 3026 "teq %0, %1\n\t" 3027 "mcrne p15, 0, %1, c1, c0, 1\n\t" 3028 : "=r"(tmp), "=r"(tmp2) : 3029 "r"(auxctrl_wax), "r"(auxctrl)); 3030 3031 /* And again. */ 3032 cpu_idcache_wbinv_all(); 3033 } 3034 #endif /* CPU_ARM1136 || CPU_ARM1176 */ 3035 3036 #ifdef CPU_SA110 3037 struct cpu_option sa110_options[] = { 3038 #ifdef COMPAT_12 3039 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3040 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3041 #endif /* COMPAT_12 */ 3042 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3043 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3044 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3045 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3046 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3047 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3048 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3049 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3050 { NULL, IGN, IGN, 0 } 3051 }; 3052 3053 void 3054 sa110_setup(char *args) 3055 { 3056 int cpuctrl, cpuctrlmask; 3057 3058 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3059 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3060 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3061 | CPU_CONTROL_WBUF_ENABLE; 3062 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3063 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3064 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3065 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3066 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3067 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3068 | CPU_CONTROL_CPCLK; 3069 3070 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3071 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3072 #endif 3073 3074 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl); 3075 3076 #ifdef __ARMEB__ 3077 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3078 #endif 3079 3080 if (vector_page == ARM_VECTORS_HIGH) 3081 cpuctrl |= CPU_CONTROL_VECRELOC; 3082 3083 /* Clear out the cache */ 3084 cpu_idcache_wbinv_all(); 3085 3086 /* Set the control register */ 3087 curcpu()->ci_ctrl = cpuctrl; 3088 /* cpu_control(cpuctrlmask, cpuctrl);*/ 3089 cpu_control(0xffffffff, cpuctrl); 3090 3091 /* 3092 * enable clockswitching, note that this doesn't read or write to r0, 3093 * r0 is just to make it valid asm 3094 */ 3095 __asm ("mcr 15, 0, r0, c15, c1, 2"); 3096 } 3097 #endif /* CPU_SA110 */ 3098 3099 #if defined(CPU_SA1100) || defined(CPU_SA1110) 3100 struct cpu_option sa11x0_options[] = { 3101 #ifdef COMPAT_12 3102 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3103 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3104 #endif /* COMPAT_12 */ 3105 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3106 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3107 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3108 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3109 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3110 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3111 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3112 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3113 { NULL, IGN, IGN, 0 } 3114 }; 3115 3116 void 3117 sa11x0_setup(char *args) 3118 { 3119 int cpuctrl, cpuctrlmask; 3120 3121 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3122 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3123 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3124 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 3125 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3126 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3127 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3128 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3129 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3130 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3131 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3132 3133 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3134 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3135 #endif 3136 3137 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl); 3138 3139 #ifdef __ARMEB__ 3140 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3141 #endif 3142 3143 if (vector_page == ARM_VECTORS_HIGH) 3144 cpuctrl |= CPU_CONTROL_VECRELOC; 3145 3146 /* Clear out the cache */ 3147 cpu_idcache_wbinv_all(); 3148 3149 /* Set the control register */ 3150 curcpu()->ci_ctrl = cpuctrl; 3151 cpu_control(0xffffffff, cpuctrl); 3152 } 3153 #endif /* CPU_SA1100 || CPU_SA1110 */ 3154 3155 #if defined(CPU_FA526) 3156 struct cpu_option fa526_options[] = { 3157 #ifdef COMPAT_12 3158 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3159 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 3160 #endif /* COMPAT_12 */ 3161 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3162 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3163 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3164 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3165 { NULL, IGN, IGN, 0 } 3166 }; 3167 3168 void 3169 fa526_setup(char *args) 3170 { 3171 int cpuctrl, cpuctrlmask; 3172 3173 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3174 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3175 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3176 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 3177 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3178 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3179 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3180 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3181 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3182 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3183 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3184 3185 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3186 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3187 #endif 3188 3189 cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl); 3190 3191 #ifdef __ARMEB__ 3192 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3193 #endif 3194 3195 if (vector_page == ARM_VECTORS_HIGH) 3196 cpuctrl |= CPU_CONTROL_VECRELOC; 3197 3198 /* Clear out the cache */ 3199 cpu_idcache_wbinv_all(); 3200 3201 /* Set the control register */ 3202 curcpu()->ci_ctrl = cpuctrl; 3203 cpu_control(0xffffffff, cpuctrl); 3204 } 3205 #endif /* CPU_FA526 */ 3206 3207 #if defined(CPU_IXP12X0) 3208 struct cpu_option ixp12x0_options[] = { 3209 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3210 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3211 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3212 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3213 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3214 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3215 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3216 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3217 { NULL, IGN, IGN, 0 } 3218 }; 3219 3220 void 3221 ixp12x0_setup(char *args) 3222 { 3223 int cpuctrl, cpuctrlmask; 3224 3225 3226 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE 3227 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE 3228 | CPU_CONTROL_IC_ENABLE; 3229 3230 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE 3231 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE 3232 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE 3233 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE 3234 | CPU_CONTROL_VECRELOC; 3235 3236 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3237 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3238 #endif 3239 3240 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl); 3241 3242 #ifdef __ARMEB__ 3243 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3244 #endif 3245 3246 if (vector_page == ARM_VECTORS_HIGH) 3247 cpuctrl |= CPU_CONTROL_VECRELOC; 3248 3249 /* Clear out the cache */ 3250 cpu_idcache_wbinv_all(); 3251 3252 /* Set the control register */ 3253 curcpu()->ci_ctrl = cpuctrl; 3254 /* cpu_control(0xffffffff, cpuctrl); */ 3255 cpu_control(cpuctrlmask, cpuctrl); 3256 } 3257 #endif /* CPU_IXP12X0 */ 3258 3259 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 3260 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || defined(CPU_CORTEX) 3261 struct cpu_option xscale_options[] = { 3262 #ifdef COMPAT_12 3263 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3264 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3265 #endif /* COMPAT_12 */ 3266 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3267 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3268 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3269 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 3270 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3271 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3272 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3273 { NULL, IGN, IGN, 0 } 3274 }; 3275 3276 void 3277 xscale_setup(char *args) 3278 { 3279 uint32_t auxctl; 3280 int cpuctrl, cpuctrlmask; 3281 3282 /* 3283 * The XScale Write Buffer is always enabled. Our option 3284 * is to enable/disable coalescing. Note that bits 6:3 3285 * must always be enabled. 3286 */ 3287 3288 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3289 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3290 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3291 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE 3292 | CPU_CONTROL_BPRD_ENABLE; 3293 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3294 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3295 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3296 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3297 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3298 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3299 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3300 3301 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3302 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3303 #endif 3304 3305 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl); 3306 3307 #ifdef __ARMEB__ 3308 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3309 #endif 3310 3311 if (vector_page == ARM_VECTORS_HIGH) 3312 cpuctrl |= CPU_CONTROL_VECRELOC; 3313 3314 /* Clear out the cache */ 3315 cpu_idcache_wbinv_all(); 3316 3317 /* 3318 * Set the control register. Note that bits 6:3 must always 3319 * be set to 1. 3320 */ 3321 curcpu()->ci_ctrl = cpuctrl; 3322 /* cpu_control(cpuctrlmask, cpuctrl);*/ 3323 cpu_control(0xffffffff, cpuctrl); 3324 3325 /* Make sure write coalescing is turned on */ 3326 __asm volatile("mrc p15, 0, %0, c1, c0, 1" 3327 : "=r" (auxctl)); 3328 #ifdef XSCALE_NO_COALESCE_WRITES 3329 auxctl |= XSCALE_AUXCTL_K; 3330 #else 3331 auxctl &= ~XSCALE_AUXCTL_K; 3332 #endif 3333 __asm volatile("mcr p15, 0, %0, c1, c0, 1" 3334 : : "r" (auxctl)); 3335 } 3336 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 3337 3338 #if defined(CPU_SHEEVA) 3339 struct cpu_option sheeva_options[] = { 3340 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3341 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3342 { "sheeva.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 3343 { "sheeva.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 3344 { "sheeva.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 3345 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3346 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 3347 { "sheeva.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 3348 { NULL, IGN, IGN, 0 } 3349 }; 3350 3351 void 3352 sheeva_setup(char *args) 3353 { 3354 int cpuctrl, cpuctrlmask; 3355 uint32_t sheeva_ext; 3356 3357 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 3358 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3359 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; 3360 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 3361 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3362 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3363 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3364 | CPU_CONTROL_BPRD_ENABLE 3365 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 3366 3367 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3368 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3369 #endif 3370 3371 cpuctrl = parse_cpu_options(args, sheeva_options, cpuctrl); 3372 3373 /* Enable DCache Streaming Switch and Write Allocate */ 3374 __asm volatile("mrc p15, 1, %0, c15, c1, 0" 3375 : "=r" (sheeva_ext)); 3376 3377 sheeva_ext |= FC_DCACHE_STREAM_EN | FC_WR_ALLOC_EN; 3378 3379 __asm volatile("mcr p15, 1, %0, c15, c1, 0" 3380 :: "r" (sheeva_ext)); 3381 3382 /* 3383 * Sheeva has L2 Cache. Enable/Disable it here. 3384 * Really not support yet... 3385 */ 3386 3387 #ifdef __ARMEB__ 3388 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3389 #endif 3390 3391 if (vector_page == ARM_VECTORS_HIGH) 3392 cpuctrl |= CPU_CONTROL_VECRELOC; 3393 3394 /* Clear out the cache */ 3395 cpu_idcache_wbinv_all(); 3396 3397 /* Now really make sure they are clean. */ 3398 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 3399 3400 /* Set the control register */ 3401 curcpu()->ci_ctrl = cpuctrl; 3402 cpu_control(0xffffffff, cpuctrl); 3403 3404 /* And again. */ 3405 cpu_idcache_wbinv_all(); 3406 } 3407 #endif /* CPU_SHEEVA */ 3408