1 /* $NetBSD: cpufunc.c,v 1.99 2010/07/05 06:54:48 kiyohara Exp $ */ 2 3 /* 4 * arm7tdmi support code Copyright (c) 2001 John Fremlin 5 * arm8 support code Copyright (c) 1997 ARM Limited 6 * arm8 support code Copyright (c) 1997 Causality Limited 7 * arm9 support code Copyright (C) 2001 ARM Ltd 8 * arm11 support code Copyright (c) 2007 Microsoft 9 * cortexa8 support code Copyright (c) 2008 3am Software Foundry 10 * cortexa8 improvements Copyright (c) Goeran Weinholt 11 * Copyright (c) 1997 Mark Brinicombe. 12 * Copyright (c) 1997 Causality Limited 13 * All rights reserved. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by Causality Limited. 26 * 4. The name of Causality Limited may not be used to endorse or promote 27 * products derived from this software without specific prior written 28 * permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 31 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 32 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 33 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 34 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 35 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 36 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * RiscBSD kernel project 43 * 44 * cpufuncs.c 45 * 46 * C functions for supporting CPU / MMU / TLB specific operations. 47 * 48 * Created : 30/01/97 49 */ 50 51 #include <sys/cdefs.h> 52 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.99 2010/07/05 06:54:48 kiyohara Exp $"); 53 54 #include "opt_compat_netbsd.h" 55 #include "opt_cpuoptions.h" 56 #include "opt_perfctrs.h" 57 58 #include <sys/types.h> 59 #include <sys/param.h> 60 #include <sys/pmc.h> 61 #include <sys/systm.h> 62 #include <machine/cpu.h> 63 #include <machine/bootconfig.h> 64 #include <arch/arm/arm/disassem.h> 65 66 #include <uvm/uvm.h> 67 68 #include <arm/cpuconf.h> 69 #include <arm/cpufunc.h> 70 71 #ifdef CPU_XSCALE_80200 72 #include <arm/xscale/i80200reg.h> 73 #include <arm/xscale/i80200var.h> 74 #endif 75 76 #ifdef CPU_XSCALE_80321 77 #include <arm/xscale/i80321reg.h> 78 #include <arm/xscale/i80321var.h> 79 #endif 80 81 #ifdef CPU_XSCALE_IXP425 82 #include <arm/xscale/ixp425reg.h> 83 #include <arm/xscale/ixp425var.h> 84 #endif 85 86 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) 87 #include <arm/xscale/xscalereg.h> 88 #endif 89 90 #if defined(PERFCTRS) 91 struct arm_pmc_funcs *arm_pmc; 92 #endif 93 94 /* PRIMARY CACHE VARIABLES */ 95 int arm_picache_size; 96 int arm_picache_line_size; 97 int arm_picache_ways; 98 99 int arm_pdcache_size; /* and unified */ 100 int arm_pdcache_line_size; 101 int arm_pdcache_ways; 102 #if (ARM_MMU_V6 + ARM_MMU_V7) != 0 103 int arm_cache_prefer_mask; 104 #endif 105 106 107 int arm_pcache_type; 108 int arm_pcache_unified; 109 110 int arm_dcache_align; 111 int arm_dcache_align_mask; 112 113 /* 1 == use cpu_sleep(), 0 == don't */ 114 int cpu_do_powersave; 115 116 #ifdef CPU_ARM2 117 struct cpu_functions arm2_cpufuncs = { 118 /* CPU functions */ 119 120 .cf_id = arm2_id, 121 .cf_cpwait = cpufunc_nullop, 122 123 /* MMU functions */ 124 125 .cf_control = (void *)cpufunc_nullop, 126 127 /* TLB functions */ 128 129 .cf_tlb_flushID = cpufunc_nullop, 130 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 131 .cf_tlb_flushI = cpufunc_nullop, 132 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 133 .cf_tlb_flushD = cpufunc_nullop, 134 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 135 136 /* Cache operations */ 137 138 .cf_icache_sync_all = cpufunc_nullop, 139 .cf_icache_sync_range = (void *) cpufunc_nullop, 140 141 .cf_dcache_wbinv_all = arm3_cache_flush, 142 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 143 .cf_dcache_inv_range = (void *)cpufunc_nullop, 144 .cf_dcache_wb_range = (void *)cpufunc_nullop, 145 146 .cf_idcache_wbinv_all = cpufunc_nullop, 147 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 148 149 /* Other functions */ 150 151 .cf_flush_prefetchbuf = cpufunc_nullop, 152 .cf_drain_writebuf = cpufunc_nullop, 153 .cf_flush_brnchtgt_C = cpufunc_nullop, 154 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 155 156 .cf_sleep = (void *)cpufunc_nullop, 157 158 /* Soft functions */ 159 160 .cf_dataabt_fixup = early_abort_fixup, 161 .cf_prefetchabt_fixup = cpufunc_null_fixup, 162 163 .cf_setup = (void *)cpufunc_nullop 164 165 }; 166 #endif /* CPU_ARM2 */ 167 168 #ifdef CPU_ARM250 169 struct cpu_functions arm250_cpufuncs = { 170 /* CPU functions */ 171 172 .cf_id = arm250_id, 173 .cf_cpwait = cpufunc_nullop, 174 175 /* MMU functions */ 176 177 .cf_control = (void *)cpufunc_nullop, 178 179 /* TLB functions */ 180 181 .cf_tlb_flushID = cpufunc_nullop, 182 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 183 .cf_tlb_flushI = cpufunc_nullop, 184 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 185 .cf_tlb_flushD = cpufunc_nullop, 186 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 187 188 /* Cache operations */ 189 190 .cf_icache_sync_all = cpufunc_nullop, 191 .cf_icache_sync_range = (void *) cpufunc_nullop, 192 193 .cf_dcache_wbinv_all = arm3_cache_flush, 194 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 195 .cf_dcache_inv_range = (void *)cpufunc_nullop, 196 .cf_dcache_wb_range = (void *)cpufunc_nullop, 197 198 .cf_idcache_wbinv_all = cpufunc_nullop, 199 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 200 201 /* Other functions */ 202 203 .cf_flush_prefetchbuf = cpufunc_nullop, 204 .cf_drain_writebuf = cpufunc_nullop, 205 .cf_flush_brnchtgt_C = cpufunc_nullop, 206 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 207 208 .cf_sleep = (void *)cpufunc_nullop, 209 210 /* Soft functions */ 211 212 .cf_dataabt_fixup = early_abort_fixup, 213 .cf_prefetchabt_fixup = cpufunc_null_fixup, 214 215 .cf_setup = (void *)cpufunc_nullop 216 217 }; 218 #endif /* CPU_ARM250 */ 219 220 #ifdef CPU_ARM3 221 struct cpu_functions arm3_cpufuncs = { 222 /* CPU functions */ 223 224 .cf_id = cpufunc_id, 225 .cf_cpwait = cpufunc_nullop, 226 227 /* MMU functions */ 228 229 .cf_control = arm3_control, 230 231 /* TLB functions */ 232 233 .cf_tlb_flushID = cpufunc_nullop, 234 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 235 .cf_tlb_flushI = cpufunc_nullop, 236 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 237 .cf_tlb_flushD = cpufunc_nullop, 238 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 239 240 /* Cache operations */ 241 242 .cf_icache_sync_all = cpufunc_nullop, 243 .cf_icache_sync_range = (void *) cpufunc_nullop, 244 245 .cf_dcache_wbinv_all = arm3_cache_flush, 246 .cf_dcache_wbinv_range = (void *)arm3_cache_flush, 247 .cf_dcache_inv_range = (void *)arm3_cache_flush, 248 .cf_dcache_wb_range = (void *)cpufunc_nullop, 249 250 .cf_idcache_wbinv_all = arm3_cache_flush, 251 .cf_idcache_wbinv_range = (void *)arm3_cache_flush, 252 253 /* Other functions */ 254 255 .cf_flush_prefetchbuf = cpufunc_nullop, 256 .cf_drain_writebuf = cpufunc_nullop, 257 .cf_flush_brnchtgt_C = cpufunc_nullop, 258 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 259 260 .cf_sleep = (void *)cpufunc_nullop, 261 262 /* Soft functions */ 263 264 .cf_dataabt_fixup = early_abort_fixup, 265 .cf_prefetchabt_fixup = cpufunc_null_fixup, 266 267 .cf_setup = (void *)cpufunc_nullop 268 269 }; 270 #endif /* CPU_ARM3 */ 271 272 #ifdef CPU_ARM6 273 struct cpu_functions arm6_cpufuncs = { 274 /* CPU functions */ 275 276 .cf_id = cpufunc_id, 277 .cf_cpwait = cpufunc_nullop, 278 279 /* MMU functions */ 280 281 .cf_control = cpufunc_control, 282 .cf_domains = cpufunc_domains, 283 .cf_setttb = arm67_setttb, 284 .cf_faultstatus = cpufunc_faultstatus, 285 .cf_faultaddress = cpufunc_faultaddress, 286 287 /* TLB functions */ 288 289 .cf_tlb_flushID = arm67_tlb_flush, 290 .cf_tlb_flushID_SE = arm67_tlb_purge, 291 .cf_tlb_flushI = arm67_tlb_flush, 292 .cf_tlb_flushI_SE = arm67_tlb_purge, 293 .cf_tlb_flushD = arm67_tlb_flush, 294 .cf_tlb_flushD_SE = arm67_tlb_purge, 295 296 /* Cache operations */ 297 298 .cf_icache_sync_all = cpufunc_nullop, 299 .cf_icache_sync_range = (void *) cpufunc_nullop, 300 301 .cf_dcache_wbinv_all = arm67_cache_flush, 302 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 303 .cf_dcache_inv_range = (void *)arm67_cache_flush, 304 .cf_dcache_wb_range = (void *)cpufunc_nullop, 305 306 .cf_idcache_wbinv_all = arm67_cache_flush, 307 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 308 309 /* Other functions */ 310 311 .cf_flush_prefetchbuf = cpufunc_nullop, 312 .cf_drain_writebuf = cpufunc_nullop, 313 .cf_flush_brnchtgt_C = cpufunc_nullop, 314 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 315 316 .cf_sleep = (void *)cpufunc_nullop, 317 318 /* Soft functions */ 319 320 #ifdef ARM6_LATE_ABORT 321 .cf_dataabt_fixup = late_abort_fixup, 322 #else 323 .cf_dataabt_fixup = early_abort_fixup, 324 #endif 325 .cf_prefetchabt_fixup = cpufunc_null_fixup, 326 327 .cf_context_switch = arm67_context_switch, 328 329 .cf_setup = arm6_setup 330 331 }; 332 #endif /* CPU_ARM6 */ 333 334 #ifdef CPU_ARM7 335 struct cpu_functions arm7_cpufuncs = { 336 /* CPU functions */ 337 338 .cf_id = cpufunc_id, 339 .cf_cpwait = cpufunc_nullop, 340 341 /* MMU functions */ 342 343 .cf_control = cpufunc_control, 344 .cf_domains = cpufunc_domains, 345 .cf_setttb = arm67_setttb, 346 .cf_faultstatus = cpufunc_faultstatus, 347 .cf_faultaddress = cpufunc_faultaddress, 348 349 /* TLB functions */ 350 351 .cf_tlb_flushID = arm67_tlb_flush, 352 .cf_tlb_flushID_SE = arm67_tlb_purge, 353 .cf_tlb_flushI = arm67_tlb_flush, 354 .cf_tlb_flushI_SE = arm67_tlb_purge, 355 .cf_tlb_flushD = arm67_tlb_flush, 356 .cf_tlb_flushD_SE = arm67_tlb_purge, 357 358 /* Cache operations */ 359 360 .cf_icache_sync_all = cpufunc_nullop, 361 .cf_icache_sync_range = (void *)cpufunc_nullop, 362 363 .cf_dcache_wbinv_all = arm67_cache_flush, 364 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 365 .cf_dcache_inv_range = (void *)arm67_cache_flush, 366 .cf_dcache_wb_range = (void *)cpufunc_nullop, 367 368 .cf_idcache_wbinv_all = arm67_cache_flush, 369 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 370 371 /* Other functions */ 372 373 .cf_flush_prefetchbuf = cpufunc_nullop, 374 .cf_drain_writebuf = cpufunc_nullop, 375 .cf_flush_brnchtgt_C = cpufunc_nullop, 376 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 377 378 .cf_sleep = (void *)cpufunc_nullop, 379 380 /* Soft functions */ 381 382 .cf_dataabt_fixup = late_abort_fixup, 383 .cf_prefetchabt_fixup = cpufunc_null_fixup, 384 385 .cf_context_switch = arm67_context_switch, 386 387 .cf_setup = arm7_setup 388 389 }; 390 #endif /* CPU_ARM7 */ 391 392 #ifdef CPU_ARM7TDMI 393 struct cpu_functions arm7tdmi_cpufuncs = { 394 /* CPU functions */ 395 396 .cf_id = cpufunc_id, 397 .cf_cpwait = cpufunc_nullop, 398 399 /* MMU functions */ 400 401 .cf_control = cpufunc_control, 402 .cf_domains = cpufunc_domains, 403 .cf_setttb = arm7tdmi_setttb, 404 .cf_faultstatus = cpufunc_faultstatus, 405 .cf_faultaddress = cpufunc_faultaddress, 406 407 /* TLB functions */ 408 409 .cf_tlb_flushID = arm7tdmi_tlb_flushID, 410 .cf_tlb_flushID_SE = arm7tdmi_tlb_flushID_SE, 411 .cf_tlb_flushI = arm7tdmi_tlb_flushID, 412 .cf_tlb_flushI_SE = arm7tdmi_tlb_flushID_SE, 413 .cf_tlb_flushD = arm7tdmi_tlb_flushID, 414 .cf_tlb_flushD_SE = arm7tdmi_tlb_flushID_SE, 415 416 /* Cache operations */ 417 418 .cf_icache_sync_all = cpufunc_nullop, 419 .cf_icache_sync_range = (void *)cpufunc_nullop, 420 421 .cf_dcache_wbinv_all = arm7tdmi_cache_flushID, 422 .cf_dcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 423 .cf_dcache_inv_range = (void *)arm7tdmi_cache_flushID, 424 .cf_dcache_wb_range = (void *)cpufunc_nullop, 425 426 .cf_idcache_wbinv_all = arm7tdmi_cache_flushID, 427 .cf_idcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 428 429 /* Other functions */ 430 431 .cf_flush_prefetchbuf = cpufunc_nullop, 432 .cf_drain_writebuf = cpufunc_nullop, 433 .cf_flush_brnchtgt_C = cpufunc_nullop, 434 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 435 436 .cf_sleep = (void *)cpufunc_nullop, 437 438 /* Soft functions */ 439 440 .cf_dataabt_fixup = late_abort_fixup, 441 .cf_prefetchabt_fixup = cpufunc_null_fixup, 442 443 .cf_context_switch = arm7tdmi_context_switch, 444 445 .cf_setup = arm7tdmi_setup 446 447 }; 448 #endif /* CPU_ARM7TDMI */ 449 450 #ifdef CPU_ARM8 451 struct cpu_functions arm8_cpufuncs = { 452 /* CPU functions */ 453 454 .cf_id = cpufunc_id, 455 .cf_cpwait = cpufunc_nullop, 456 457 /* MMU functions */ 458 459 .cf_control = cpufunc_control, 460 .cf_domains = cpufunc_domains, 461 .cf_setttb = arm8_setttb, 462 .cf_faultstatus = cpufunc_faultstatus, 463 .cf_faultaddress = cpufunc_faultaddress, 464 465 /* TLB functions */ 466 467 .cf_tlb_flushID = arm8_tlb_flushID, 468 .cf_tlb_flushID_SE = arm8_tlb_flushID_SE, 469 .cf_tlb_flushI = arm8_tlb_flushID, 470 .cf_tlb_flushI_SE = arm8_tlb_flushID_SE, 471 .cf_tlb_flushD = arm8_tlb_flushID, 472 .cf_tlb_flushD_SE = arm8_tlb_flushID_SE, 473 474 /* Cache operations */ 475 476 .cf_icache_sync_all = cpufunc_nullop, 477 .cf_icache_sync_range = (void *)cpufunc_nullop, 478 479 .cf_dcache_wbinv_all = arm8_cache_purgeID, 480 .cf_dcache_wbinv_range = (void *)arm8_cache_purgeID, 481 /*XXX*/ .cf_dcache_inv_range = (void *)arm8_cache_purgeID, 482 .cf_dcache_wb_range = (void *)arm8_cache_cleanID, 483 484 .cf_idcache_wbinv_all = arm8_cache_purgeID, 485 .cf_idcache_wbinv_range = (void *)arm8_cache_purgeID, 486 487 /* Other functions */ 488 489 .cf_flush_prefetchbuf = cpufunc_nullop, 490 .cf_drain_writebuf = cpufunc_nullop, 491 .cf_flush_brnchtgt_C = cpufunc_nullop, 492 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 493 494 .cf_sleep = (void *)cpufunc_nullop, 495 496 /* Soft functions */ 497 498 .cf_dataabt_fixup = cpufunc_null_fixup, 499 .cf_prefetchabt_fixup = cpufunc_null_fixup, 500 501 .cf_context_switch = arm8_context_switch, 502 503 .cf_setup = arm8_setup 504 }; 505 #endif /* CPU_ARM8 */ 506 507 #ifdef CPU_ARM9 508 struct cpu_functions arm9_cpufuncs = { 509 /* CPU functions */ 510 511 .cf_id = cpufunc_id, 512 .cf_cpwait = cpufunc_nullop, 513 514 /* MMU functions */ 515 516 .cf_control = cpufunc_control, 517 .cf_domains = cpufunc_domains, 518 .cf_setttb = arm9_setttb, 519 .cf_faultstatus = cpufunc_faultstatus, 520 .cf_faultaddress = cpufunc_faultaddress, 521 522 /* TLB functions */ 523 524 .cf_tlb_flushID = armv4_tlb_flushID, 525 .cf_tlb_flushID_SE = arm9_tlb_flushID_SE, 526 .cf_tlb_flushI = armv4_tlb_flushI, 527 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 528 .cf_tlb_flushD = armv4_tlb_flushD, 529 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 530 531 /* Cache operations */ 532 533 .cf_icache_sync_all = arm9_icache_sync_all, 534 .cf_icache_sync_range = arm9_icache_sync_range, 535 536 .cf_dcache_wbinv_all = arm9_dcache_wbinv_all, 537 .cf_dcache_wbinv_range = arm9_dcache_wbinv_range, 538 /*XXX*/ .cf_dcache_inv_range = arm9_dcache_wbinv_range, 539 .cf_dcache_wb_range = arm9_dcache_wb_range, 540 541 .cf_idcache_wbinv_all = arm9_idcache_wbinv_all, 542 .cf_idcache_wbinv_range = arm9_idcache_wbinv_range, 543 544 /* Other functions */ 545 546 .cf_flush_prefetchbuf = cpufunc_nullop, 547 .cf_drain_writebuf = armv4_drain_writebuf, 548 .cf_flush_brnchtgt_C = cpufunc_nullop, 549 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 550 551 .cf_sleep = (void *)cpufunc_nullop, 552 553 /* Soft functions */ 554 555 .cf_dataabt_fixup = cpufunc_null_fixup, 556 .cf_prefetchabt_fixup = cpufunc_null_fixup, 557 558 .cf_context_switch = arm9_context_switch, 559 560 .cf_setup = arm9_setup 561 562 }; 563 #endif /* CPU_ARM9 */ 564 565 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 566 struct cpu_functions armv5_ec_cpufuncs = { 567 /* CPU functions */ 568 569 .cf_id = cpufunc_id, 570 .cf_cpwait = cpufunc_nullop, 571 572 /* MMU functions */ 573 574 .cf_control = cpufunc_control, 575 .cf_domains = cpufunc_domains, 576 .cf_setttb = armv5_ec_setttb, 577 .cf_faultstatus = cpufunc_faultstatus, 578 .cf_faultaddress = cpufunc_faultaddress, 579 580 /* TLB functions */ 581 582 .cf_tlb_flushID = armv4_tlb_flushID, 583 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 584 .cf_tlb_flushI = armv4_tlb_flushI, 585 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 586 .cf_tlb_flushD = armv4_tlb_flushD, 587 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 588 589 /* Cache operations */ 590 591 .cf_icache_sync_all = armv5_ec_icache_sync_all, 592 .cf_icache_sync_range = armv5_ec_icache_sync_range, 593 594 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all, 595 .cf_dcache_wbinv_range = armv5_ec_dcache_wbinv_range, 596 /*XXX*/ .cf_dcache_inv_range = armv5_ec_dcache_wbinv_range, 597 .cf_dcache_wb_range = armv5_ec_dcache_wb_range, 598 599 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all, 600 .cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range, 601 602 /* Other functions */ 603 604 .cf_flush_prefetchbuf = cpufunc_nullop, 605 .cf_drain_writebuf = armv4_drain_writebuf, 606 .cf_flush_brnchtgt_C = cpufunc_nullop, 607 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 608 609 .cf_sleep = (void *)cpufunc_nullop, 610 611 /* Soft functions */ 612 613 .cf_dataabt_fixup = cpufunc_null_fixup, 614 .cf_prefetchabt_fixup = cpufunc_null_fixup, 615 616 .cf_context_switch = arm10_context_switch, 617 618 .cf_setup = arm10_setup 619 620 }; 621 #endif /* CPU_ARM9E || CPU_ARM10 */ 622 623 #ifdef CPU_ARM10 624 struct cpu_functions arm10_cpufuncs = { 625 /* CPU functions */ 626 627 .cf_id = cpufunc_id, 628 .cf_cpwait = cpufunc_nullop, 629 630 /* MMU functions */ 631 632 .cf_control = cpufunc_control, 633 .cf_domains = cpufunc_domains, 634 .cf_setttb = armv5_setttb, 635 .cf_faultstatus = cpufunc_faultstatus, 636 .cf_faultaddress = cpufunc_faultaddress, 637 638 /* TLB functions */ 639 640 .cf_tlb_flushID = armv4_tlb_flushID, 641 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 642 .cf_tlb_flushI = armv4_tlb_flushI, 643 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 644 .cf_tlb_flushD = armv4_tlb_flushD, 645 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 646 647 /* Cache operations */ 648 649 .cf_icache_sync_all = armv5_icache_sync_all, 650 .cf_icache_sync_range = armv5_icache_sync_range, 651 652 .cf_dcache_wbinv_all = armv5_dcache_wbinv_all, 653 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range, 654 /*XXX*/ .cf_dcache_inv_range = armv5_dcache_wbinv_range, 655 .cf_dcache_wb_range = armv5_dcache_wb_range, 656 657 .cf_idcache_wbinv_all = armv5_idcache_wbinv_all, 658 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range, 659 660 /* Other functions */ 661 662 .cf_flush_prefetchbuf = cpufunc_nullop, 663 .cf_drain_writebuf = armv4_drain_writebuf, 664 .cf_flush_brnchtgt_C = cpufunc_nullop, 665 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 666 667 .cf_sleep = (void *)cpufunc_nullop, 668 669 /* Soft functions */ 670 671 .cf_dataabt_fixup = cpufunc_null_fixup, 672 .cf_prefetchabt_fixup = cpufunc_null_fixup, 673 674 .cf_context_switch = arm10_context_switch, 675 676 .cf_setup = arm10_setup 677 678 }; 679 #endif /* CPU_ARM10 */ 680 681 #ifdef CPU_ARM11 682 struct cpu_functions arm11_cpufuncs = { 683 /* CPU functions */ 684 685 .cf_id = cpufunc_id, 686 .cf_cpwait = cpufunc_nullop, 687 688 /* MMU functions */ 689 690 .cf_control = cpufunc_control, 691 .cf_domains = cpufunc_domains, 692 .cf_setttb = arm11_setttb, 693 .cf_faultstatus = cpufunc_faultstatus, 694 .cf_faultaddress = cpufunc_faultaddress, 695 696 /* TLB functions */ 697 698 .cf_tlb_flushID = arm11_tlb_flushID, 699 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 700 .cf_tlb_flushI = arm11_tlb_flushI, 701 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 702 .cf_tlb_flushD = arm11_tlb_flushD, 703 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 704 705 /* Cache operations */ 706 707 .cf_icache_sync_all = armv6_icache_sync_all, 708 .cf_icache_sync_range = armv6_icache_sync_range, 709 710 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all, 711 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 712 .cf_dcache_inv_range = armv6_dcache_inv_range, 713 .cf_dcache_wb_range = armv6_dcache_wb_range, 714 715 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all, 716 .cf_idcache_wbinv_range = armv6_idcache_wbinv_range, 717 718 /* Other functions */ 719 720 .cf_flush_prefetchbuf = cpufunc_nullop, 721 .cf_drain_writebuf = arm11_drain_writebuf, 722 .cf_flush_brnchtgt_C = cpufunc_nullop, 723 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 724 725 .cf_sleep = arm11_sleep, 726 727 /* Soft functions */ 728 729 .cf_dataabt_fixup = cpufunc_null_fixup, 730 .cf_prefetchabt_fixup = cpufunc_null_fixup, 731 732 .cf_context_switch = arm11_context_switch, 733 734 .cf_setup = arm11_setup 735 736 }; 737 #endif /* CPU_ARM11 */ 738 739 #ifdef CPU_ARM1136 740 struct cpu_functions arm1136_cpufuncs = { 741 /* CPU functions */ 742 743 .cf_id = cpufunc_id, 744 .cf_cpwait = cpufunc_nullop, 745 746 /* MMU functions */ 747 748 .cf_control = cpufunc_control, 749 .cf_domains = cpufunc_domains, 750 .cf_setttb = arm1136_setttb, 751 .cf_faultstatus = cpufunc_faultstatus, 752 .cf_faultaddress = cpufunc_faultaddress, 753 754 /* TLB functions */ 755 756 .cf_tlb_flushID = arm11_tlb_flushID, 757 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 758 .cf_tlb_flushI = arm11_tlb_flushI, 759 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 760 .cf_tlb_flushD = arm11_tlb_flushD, 761 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 762 763 /* Cache operations */ 764 765 .cf_icache_sync_all = arm1136_icache_sync_all, /* 411920 */ 766 .cf_icache_sync_range = arm1136_icache_sync_range, /* 371025 */ 767 768 .cf_dcache_wbinv_all = arm1136_dcache_wbinv_all, /* 411920 */ 769 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 770 .cf_dcache_inv_range = armv6_dcache_inv_range, 771 .cf_dcache_wb_range = armv6_dcache_wb_range, 772 773 .cf_idcache_wbinv_all = arm1136_idcache_wbinv_all, /* 411920 */ 774 .cf_idcache_wbinv_range = arm1136_idcache_wbinv_range, /* 371025 */ 775 776 /* Other functions */ 777 778 .cf_flush_prefetchbuf = arm1136_flush_prefetchbuf, 779 .cf_drain_writebuf = arm11_drain_writebuf, 780 .cf_flush_brnchtgt_C = cpufunc_nullop, 781 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 782 783 .cf_sleep = arm11_sleep, 784 785 /* Soft functions */ 786 787 .cf_dataabt_fixup = cpufunc_null_fixup, 788 .cf_prefetchabt_fixup = cpufunc_null_fixup, 789 790 .cf_context_switch = arm11_context_switch, 791 792 .cf_setup = arm1136_setup 793 794 }; 795 #endif /* CPU_ARM1136 */ 796 797 #ifdef CPU_SA110 798 struct cpu_functions sa110_cpufuncs = { 799 /* CPU functions */ 800 801 .cf_id = cpufunc_id, 802 .cf_cpwait = cpufunc_nullop, 803 804 /* MMU functions */ 805 806 .cf_control = cpufunc_control, 807 .cf_domains = cpufunc_domains, 808 .cf_setttb = sa1_setttb, 809 .cf_faultstatus = cpufunc_faultstatus, 810 .cf_faultaddress = cpufunc_faultaddress, 811 812 /* TLB functions */ 813 814 .cf_tlb_flushID = armv4_tlb_flushID, 815 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 816 .cf_tlb_flushI = armv4_tlb_flushI, 817 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 818 .cf_tlb_flushD = armv4_tlb_flushD, 819 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 820 821 /* Cache operations */ 822 823 .cf_icache_sync_all = sa1_cache_syncI, 824 .cf_icache_sync_range = sa1_cache_syncI_rng, 825 826 .cf_dcache_wbinv_all = sa1_cache_purgeD, 827 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 828 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 829 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 830 831 .cf_idcache_wbinv_all = sa1_cache_purgeID, 832 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 833 834 /* Other functions */ 835 836 .cf_flush_prefetchbuf = cpufunc_nullop, 837 .cf_drain_writebuf = armv4_drain_writebuf, 838 .cf_flush_brnchtgt_C = cpufunc_nullop, 839 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 840 841 .cf_sleep = (void *)cpufunc_nullop, 842 843 /* Soft functions */ 844 845 .cf_dataabt_fixup = cpufunc_null_fixup, 846 .cf_prefetchabt_fixup = cpufunc_null_fixup, 847 848 .cf_context_switch = sa110_context_switch, 849 850 .cf_setup = sa110_setup 851 }; 852 #endif /* CPU_SA110 */ 853 854 #if defined(CPU_SA1100) || defined(CPU_SA1110) 855 struct cpu_functions sa11x0_cpufuncs = { 856 /* CPU functions */ 857 858 .cf_id = cpufunc_id, 859 .cf_cpwait = cpufunc_nullop, 860 861 /* MMU functions */ 862 863 .cf_control = cpufunc_control, 864 .cf_domains = cpufunc_domains, 865 .cf_setttb = sa1_setttb, 866 .cf_faultstatus = cpufunc_faultstatus, 867 .cf_faultaddress = cpufunc_faultaddress, 868 869 /* TLB functions */ 870 871 .cf_tlb_flushID = armv4_tlb_flushID, 872 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 873 .cf_tlb_flushI = armv4_tlb_flushI, 874 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 875 .cf_tlb_flushD = armv4_tlb_flushD, 876 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 877 878 /* Cache operations */ 879 880 .cf_icache_sync_all = sa1_cache_syncI, 881 .cf_icache_sync_range = sa1_cache_syncI_rng, 882 883 .cf_dcache_wbinv_all = sa1_cache_purgeD, 884 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 885 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 886 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 887 888 .cf_idcache_wbinv_all = sa1_cache_purgeID, 889 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 890 891 /* Other functions */ 892 893 .cf_flush_prefetchbuf = sa11x0_drain_readbuf, 894 .cf_drain_writebuf = armv4_drain_writebuf, 895 .cf_flush_brnchtgt_C = cpufunc_nullop, 896 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 897 898 .cf_sleep = sa11x0_cpu_sleep, 899 900 /* Soft functions */ 901 902 .cf_dataabt_fixup = cpufunc_null_fixup, 903 .cf_prefetchabt_fixup = cpufunc_null_fixup, 904 905 .cf_context_switch = sa11x0_context_switch, 906 907 .cf_setup = sa11x0_setup 908 }; 909 #endif /* CPU_SA1100 || CPU_SA1110 */ 910 911 #if defined(CPU_FA526) 912 struct cpu_functions fa526_cpufuncs = { 913 /* CPU functions */ 914 915 .cf_id = cpufunc_id, 916 .cf_cpwait = cpufunc_nullop, 917 918 /* MMU functions */ 919 920 .cf_control = cpufunc_control, 921 .cf_domains = cpufunc_domains, 922 .cf_setttb = fa526_setttb, 923 .cf_faultstatus = cpufunc_faultstatus, 924 .cf_faultaddress = cpufunc_faultaddress, 925 926 /* TLB functions */ 927 928 .cf_tlb_flushID = armv4_tlb_flushID, 929 .cf_tlb_flushID_SE = fa526_tlb_flushID_SE, 930 .cf_tlb_flushI = armv4_tlb_flushI, 931 .cf_tlb_flushI_SE = fa526_tlb_flushI_SE, 932 .cf_tlb_flushD = armv4_tlb_flushD, 933 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 934 935 /* Cache operations */ 936 937 .cf_icache_sync_all = fa526_icache_sync_all, 938 .cf_icache_sync_range = fa526_icache_sync_range, 939 940 .cf_dcache_wbinv_all = fa526_dcache_wbinv_all, 941 .cf_dcache_wbinv_range = fa526_dcache_wbinv_range, 942 .cf_dcache_inv_range = fa526_dcache_inv_range, 943 .cf_dcache_wb_range = fa526_dcache_wb_range, 944 945 .cf_idcache_wbinv_all = fa526_idcache_wbinv_all, 946 .cf_idcache_wbinv_range = fa526_idcache_wbinv_range, 947 948 /* Other functions */ 949 950 .cf_flush_prefetchbuf = fa526_flush_prefetchbuf, 951 .cf_drain_writebuf = armv4_drain_writebuf, 952 .cf_flush_brnchtgt_C = cpufunc_nullop, 953 .cf_flush_brnchtgt_E = fa526_flush_brnchtgt_E, 954 955 .cf_sleep = fa526_cpu_sleep, 956 957 /* Soft functions */ 958 959 .cf_dataabt_fixup = cpufunc_null_fixup, 960 .cf_prefetchabt_fixup = cpufunc_null_fixup, 961 962 .cf_context_switch = fa526_context_switch, 963 964 .cf_setup = fa526_setup 965 }; 966 #endif /* CPU_FA526 */ 967 968 #ifdef CPU_IXP12X0 969 struct cpu_functions ixp12x0_cpufuncs = { 970 /* CPU functions */ 971 972 .cf_id = cpufunc_id, 973 .cf_cpwait = cpufunc_nullop, 974 975 /* MMU functions */ 976 977 .cf_control = cpufunc_control, 978 .cf_domains = cpufunc_domains, 979 .cf_setttb = sa1_setttb, 980 .cf_faultstatus = cpufunc_faultstatus, 981 .cf_faultaddress = cpufunc_faultaddress, 982 983 /* TLB functions */ 984 985 .cf_tlb_flushID = armv4_tlb_flushID, 986 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 987 .cf_tlb_flushI = armv4_tlb_flushI, 988 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 989 .cf_tlb_flushD = armv4_tlb_flushD, 990 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 991 992 /* Cache operations */ 993 994 .cf_icache_sync_all = sa1_cache_syncI, 995 .cf_icache_sync_range = sa1_cache_syncI_rng, 996 997 .cf_dcache_wbinv_all = sa1_cache_purgeD, 998 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 999 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 1000 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 1001 1002 .cf_idcache_wbinv_all = sa1_cache_purgeID, 1003 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 1004 1005 /* Other functions */ 1006 1007 .cf_flush_prefetchbuf = ixp12x0_drain_readbuf, 1008 .cf_drain_writebuf = armv4_drain_writebuf, 1009 .cf_flush_brnchtgt_C = cpufunc_nullop, 1010 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1011 1012 .cf_sleep = (void *)cpufunc_nullop, 1013 1014 /* Soft functions */ 1015 1016 .cf_dataabt_fixup = cpufunc_null_fixup, 1017 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1018 1019 .cf_context_switch = ixp12x0_context_switch, 1020 1021 .cf_setup = ixp12x0_setup 1022 }; 1023 #endif /* CPU_IXP12X0 */ 1024 1025 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1026 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) 1027 struct cpu_functions xscale_cpufuncs = { 1028 /* CPU functions */ 1029 1030 .cf_id = cpufunc_id, 1031 .cf_cpwait = xscale_cpwait, 1032 1033 /* MMU functions */ 1034 1035 .cf_control = xscale_control, 1036 .cf_domains = cpufunc_domains, 1037 .cf_setttb = xscale_setttb, 1038 .cf_faultstatus = cpufunc_faultstatus, 1039 .cf_faultaddress = cpufunc_faultaddress, 1040 1041 /* TLB functions */ 1042 1043 .cf_tlb_flushID = armv4_tlb_flushID, 1044 .cf_tlb_flushID_SE = xscale_tlb_flushID_SE, 1045 .cf_tlb_flushI = armv4_tlb_flushI, 1046 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 1047 .cf_tlb_flushD = armv4_tlb_flushD, 1048 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 1049 1050 /* Cache operations */ 1051 1052 .cf_icache_sync_all = xscale_cache_syncI, 1053 .cf_icache_sync_range = xscale_cache_syncI_rng, 1054 1055 .cf_dcache_wbinv_all = xscale_cache_purgeD, 1056 .cf_dcache_wbinv_range = xscale_cache_purgeD_rng, 1057 .cf_dcache_inv_range = xscale_cache_flushD_rng, 1058 .cf_dcache_wb_range = xscale_cache_cleanD_rng, 1059 1060 .cf_idcache_wbinv_all = xscale_cache_purgeID, 1061 .cf_idcache_wbinv_range = xscale_cache_purgeID_rng, 1062 1063 /* Other functions */ 1064 1065 .cf_flush_prefetchbuf = cpufunc_nullop, 1066 .cf_drain_writebuf = armv4_drain_writebuf, 1067 .cf_flush_brnchtgt_C = cpufunc_nullop, 1068 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1069 1070 .cf_sleep = xscale_cpu_sleep, 1071 1072 /* Soft functions */ 1073 1074 .cf_dataabt_fixup = cpufunc_null_fixup, 1075 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1076 1077 .cf_context_switch = xscale_context_switch, 1078 1079 .cf_setup = xscale_setup 1080 }; 1081 #endif 1082 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 1083 1084 #if defined(CPU_CORTEX) 1085 struct cpu_functions cortex_cpufuncs = { 1086 /* CPU functions */ 1087 1088 .cf_id = cpufunc_id, 1089 .cf_cpwait = cpufunc_nullop, 1090 1091 /* MMU functions */ 1092 1093 .cf_control = cpufunc_control, 1094 .cf_domains = cpufunc_domains, 1095 .cf_setttb = armv7_setttb, 1096 .cf_faultstatus = cpufunc_faultstatus, 1097 .cf_faultaddress = cpufunc_faultaddress, 1098 1099 /* TLB functions */ 1100 1101 .cf_tlb_flushID = arm11_tlb_flushID, 1102 .cf_tlb_flushID_SE = armv7_tlb_flushID_SE, 1103 .cf_tlb_flushI = arm11_tlb_flushI, 1104 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 1105 .cf_tlb_flushD = arm11_tlb_flushD, 1106 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 1107 1108 /* Cache operations */ 1109 1110 .cf_icache_sync_all = armv7_icache_sync_all, 1111 .cf_dcache_wbinv_all = armv7_dcache_wbinv_all, 1112 1113 .cf_dcache_inv_range = armv7_dcache_inv_range, 1114 .cf_dcache_wb_range = armv7_dcache_wb_range, 1115 .cf_dcache_wbinv_range = armv7_dcache_wbinv_range, 1116 1117 .cf_icache_sync_range = armv7_icache_sync_range, 1118 .cf_idcache_wbinv_range = armv7_idcache_wbinv_range, 1119 1120 1121 .cf_idcache_wbinv_all = armv7_idcache_wbinv_all, 1122 1123 /* Other functions */ 1124 1125 .cf_flush_prefetchbuf = cpufunc_nullop, 1126 .cf_drain_writebuf = arm11_drain_writebuf, 1127 .cf_flush_brnchtgt_C = cpufunc_nullop, 1128 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1129 1130 .cf_sleep = armv7_cpu_sleep, 1131 1132 /* Soft functions */ 1133 1134 .cf_dataabt_fixup = cpufunc_null_fixup, 1135 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1136 1137 .cf_context_switch = armv7_context_switch, 1138 1139 .cf_setup = armv7_setup 1140 1141 }; 1142 #endif /* CPU_CORTEX */ 1143 1144 1145 /* 1146 * Global constants also used by locore.s 1147 */ 1148 1149 struct cpu_functions cpufuncs; 1150 u_int cputype; 1151 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */ 1152 1153 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \ 1154 defined(CPU_ARM9E) || defined(CPU_ARM10) || defined(CPU_ARM11) || \ 1155 defined(CPU_FA526) || \ 1156 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1157 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 1158 defined(CPU_CORTEX) 1159 static void get_cachetype_cp15(void); 1160 1161 /* Additional cache information local to this file. Log2 of some of the 1162 above numbers. */ 1163 static int arm_dcache_l2_nsets; 1164 static int arm_dcache_l2_assoc; 1165 static int arm_dcache_l2_linesize; 1166 1167 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1168 static inline u_int 1169 get_cachesize_cp15(int cssr) 1170 { 1171 u_int csid; 1172 1173 #if (CPU_CORTEX) > 0 1174 __asm volatile(".arch\tarmv7a"); 1175 __asm volatile("mcr p15, 2, %0, c0, c0, 0" :: "r" (cssr)); 1176 __asm volatile("isb"); /* sync to the new cssr */ 1177 #else 1178 __asm volatile("mcr p15, 1, %0, c0, c0, 2" :: "r" (cssr)); 1179 #endif 1180 __asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (csid)); 1181 return csid; 1182 } 1183 #endif 1184 1185 static void 1186 get_cachetype_cp15() 1187 { 1188 u_int ctype, isize, dsize; 1189 u_int multiplier; 1190 1191 __asm volatile("mrc p15, 0, %0, c0, c0, 1" 1192 : "=r" (ctype)); 1193 1194 /* 1195 * ...and thus spake the ARM ARM: 1196 * 1197 * If an <opcode2> value corresponding to an unimplemented or 1198 * reserved ID register is encountered, the System Control 1199 * processor returns the value of the main ID register. 1200 */ 1201 if (ctype == cpu_id()) 1202 goto out; 1203 1204 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1205 if (CPU_CT_FORMAT(ctype) == 4) { 1206 u_int csid0, csid1, csid2; 1207 1208 isize = 1U << (CPU_CT4_ILINE(ctype) + 2); 1209 dsize = 1U << (CPU_CT4_DLINE(ctype) + 2); 1210 1211 csid0 = get_cachesize_cp15(CPU_CSSR_L1); /* select L1 dcache values */ 1212 arm_pdcache_ways = CPU_CSID_ASSOC(csid0) + 1; 1213 arm_pdcache_line_size = dsize; 1214 arm_pdcache_size = arm_pdcache_line_size * arm_pdcache_ways; 1215 arm_pdcache_size *= (CPU_CSID_NUMSETS(csid0) + 1); 1216 arm_cache_prefer_mask = PAGE_SIZE; 1217 1218 arm_dcache_align = arm_pdcache_line_size; 1219 1220 csid1 = get_cachesize_cp15(CPU_CSSR_L1|CPU_CSSR_InD); /* select L1 icache values */ 1221 arm_picache_ways = CPU_CSID_ASSOC(csid1) + 1; 1222 arm_picache_line_size = isize; 1223 arm_picache_size = arm_picache_line_size * arm_picache_ways; 1224 arm_picache_size *= (CPU_CSID_NUMSETS(csid1) + 1); 1225 arm_cache_prefer_mask = PAGE_SIZE; 1226 1227 arm_dcache_align = arm_pdcache_line_size; 1228 1229 csid2 = get_cachesize_cp15(CPU_CSSR_L2); /* select L2 cache values */ 1230 arm_dcache_l2_assoc = CPU_CSID_ASSOC(csid2) + 1; 1231 arm_dcache_l2_linesize = 1 << (CPU_CSID_LEN(csid2) + 2); 1232 arm_dcache_l2_nsets = CPU_CSID_NUMSETS(csid2) + 1; 1233 arm_pcache_type = CPU_CT_CTYPE_WB14; 1234 goto out; 1235 } 1236 #endif /* ARM_MMU_V6 + ARM_MMU_V7 > 0 */ 1237 1238 if ((ctype & CPU_CT_S) == 0) 1239 arm_pcache_unified = 1; 1240 1241 /* 1242 * If you want to know how this code works, go read the ARM ARM. 1243 */ 1244 1245 arm_pcache_type = CPU_CT_CTYPE(ctype); 1246 1247 if (arm_pcache_unified == 0) { 1248 isize = CPU_CT_ISIZE(ctype); 1249 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; 1250 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); 1251 if (CPU_CT_xSIZE_ASSOC(isize) == 0) { 1252 if (isize & CPU_CT_xSIZE_M) 1253 arm_picache_line_size = 0; /* not present */ 1254 else 1255 arm_picache_ways = 1; 1256 } else { 1257 arm_picache_ways = multiplier << 1258 (CPU_CT_xSIZE_ASSOC(isize) - 1); 1259 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1260 if (CPU_CT_xSIZE_P & isize) 1261 arm_cache_prefer_mask |= 1262 __BIT(9 + CPU_CT_xSIZE_SIZE(isize) 1263 - CPU_CT_xSIZE_ASSOC(isize)) 1264 - PAGE_SIZE; 1265 #endif 1266 } 1267 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); 1268 } 1269 1270 dsize = CPU_CT_DSIZE(ctype); 1271 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; 1272 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); 1273 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { 1274 if (dsize & CPU_CT_xSIZE_M) 1275 arm_pdcache_line_size = 0; /* not present */ 1276 else 1277 arm_pdcache_ways = 1; 1278 } else { 1279 arm_pdcache_ways = multiplier << 1280 (CPU_CT_xSIZE_ASSOC(dsize) - 1); 1281 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0 1282 if (CPU_CT_xSIZE_P & dsize) 1283 arm_cache_prefer_mask |= 1284 __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) 1285 - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE; 1286 #endif 1287 } 1288 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); 1289 1290 arm_dcache_align = arm_pdcache_line_size; 1291 1292 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; 1293 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; 1294 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - 1295 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); 1296 1297 out: 1298 arm_dcache_align_mask = arm_dcache_align - 1; 1299 } 1300 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */ 1301 1302 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 1303 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \ 1304 defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0) 1305 /* Cache information for CPUs without cache type registers. */ 1306 struct cachetab { 1307 u_int32_t ct_cpuid; 1308 int ct_pcache_type; 1309 int ct_pcache_unified; 1310 int ct_pdcache_size; 1311 int ct_pdcache_line_size; 1312 int ct_pdcache_ways; 1313 int ct_picache_size; 1314 int ct_picache_line_size; 1315 int ct_picache_ways; 1316 }; 1317 1318 struct cachetab cachetab[] = { 1319 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */ 1320 { CPU_ID_ARM2, 0, 1, 0, 0, 0, 0, 0, 0 }, 1321 { CPU_ID_ARM250, 0, 1, 0, 0, 0, 0, 0, 0 }, 1322 { CPU_ID_ARM3, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 1323 { CPU_ID_ARM610, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 1324 { CPU_ID_ARM710, CPU_CT_CTYPE_WT, 1, 8192, 32, 4, 0, 0, 0 }, 1325 { CPU_ID_ARM7500, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 1326 { CPU_ID_ARM710A, CPU_CT_CTYPE_WT, 1, 8192, 16, 4, 0, 0, 0 }, 1327 { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 1328 /* XXX is this type right for SA-1? */ 1329 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, 1330 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 1331 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 1332 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */ 1333 { 0, 0, 0, 0, 0, 0, 0, 0} 1334 }; 1335 1336 static void get_cachetype_table(void); 1337 1338 static void 1339 get_cachetype_table(void) 1340 { 1341 int i; 1342 u_int32_t cpuid = cpu_id(); 1343 1344 for (i = 0; cachetab[i].ct_cpuid != 0; i++) { 1345 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) { 1346 arm_pcache_type = cachetab[i].ct_pcache_type; 1347 arm_pcache_unified = cachetab[i].ct_pcache_unified; 1348 arm_pdcache_size = cachetab[i].ct_pdcache_size; 1349 arm_pdcache_line_size = 1350 cachetab[i].ct_pdcache_line_size; 1351 arm_pdcache_ways = cachetab[i].ct_pdcache_ways; 1352 arm_picache_size = cachetab[i].ct_picache_size; 1353 arm_picache_line_size = 1354 cachetab[i].ct_picache_line_size; 1355 arm_picache_ways = cachetab[i].ct_picache_ways; 1356 } 1357 } 1358 arm_dcache_align = arm_pdcache_line_size; 1359 1360 arm_dcache_align_mask = arm_dcache_align - 1; 1361 } 1362 1363 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */ 1364 1365 /* 1366 * Cannot panic here as we may not have a console yet ... 1367 */ 1368 1369 int 1370 set_cpufuncs(void) 1371 { 1372 if (cputype == 0) { 1373 cputype = cpufunc_id(); 1374 cputype &= CPU_ID_CPU_MASK; 1375 } 1376 1377 /* 1378 * NOTE: cpu_do_powersave defaults to off. If we encounter a 1379 * CPU type where we want to use it by default, then we set it. 1380 */ 1381 #ifdef CPU_ARM2 1382 if (cputype == CPU_ID_ARM2) { 1383 cpufuncs = arm2_cpufuncs; 1384 cpu_reset_needs_v4_MMU_disable = 0; 1385 get_cachetype_table(); 1386 return 0; 1387 } 1388 #endif /* CPU_ARM2 */ 1389 #ifdef CPU_ARM250 1390 if (cputype == CPU_ID_ARM250) { 1391 cpufuncs = arm250_cpufuncs; 1392 cpu_reset_needs_v4_MMU_disable = 0; 1393 get_cachetype_table(); 1394 return 0; 1395 } 1396 #endif 1397 #ifdef CPU_ARM3 1398 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1399 (cputype & 0x00000f00) == 0x00000300) { 1400 cpufuncs = arm3_cpufuncs; 1401 cpu_reset_needs_v4_MMU_disable = 0; 1402 get_cachetype_table(); 1403 return 0; 1404 } 1405 #endif /* CPU_ARM3 */ 1406 #ifdef CPU_ARM6 1407 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1408 (cputype & 0x00000f00) == 0x00000600) { 1409 cpufuncs = arm6_cpufuncs; 1410 cpu_reset_needs_v4_MMU_disable = 0; 1411 get_cachetype_table(); 1412 pmap_pte_init_generic(); 1413 return 0; 1414 } 1415 #endif /* CPU_ARM6 */ 1416 #ifdef CPU_ARM7 1417 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1418 CPU_ID_IS7(cputype) && 1419 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) { 1420 cpufuncs = arm7_cpufuncs; 1421 cpu_reset_needs_v4_MMU_disable = 0; 1422 get_cachetype_table(); 1423 pmap_pte_init_generic(); 1424 return 0; 1425 } 1426 #endif /* CPU_ARM7 */ 1427 #ifdef CPU_ARM7TDMI 1428 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1429 CPU_ID_IS7(cputype) && 1430 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) { 1431 cpufuncs = arm7tdmi_cpufuncs; 1432 cpu_reset_needs_v4_MMU_disable = 0; 1433 get_cachetype_cp15(); 1434 pmap_pte_init_generic(); 1435 return 0; 1436 } 1437 #endif 1438 #ifdef CPU_ARM8 1439 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1440 (cputype & 0x0000f000) == 0x00008000) { 1441 cpufuncs = arm8_cpufuncs; 1442 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */ 1443 get_cachetype_cp15(); 1444 pmap_pte_init_arm8(); 1445 return 0; 1446 } 1447 #endif /* CPU_ARM8 */ 1448 #ifdef CPU_ARM9 1449 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD || 1450 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) && 1451 (cputype & 0x0000f000) == 0x00009000) { 1452 cpufuncs = arm9_cpufuncs; 1453 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ 1454 get_cachetype_cp15(); 1455 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize; 1456 arm9_dcache_sets_max = 1457 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - 1458 arm9_dcache_sets_inc; 1459 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); 1460 arm9_dcache_index_max = 0U - arm9_dcache_index_inc; 1461 #ifdef ARM9_CACHE_WRITE_THROUGH 1462 pmap_pte_init_arm9(); 1463 #else 1464 pmap_pte_init_generic(); 1465 #endif 1466 return 0; 1467 } 1468 #endif /* CPU_ARM9 */ 1469 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 1470 if (cputype == CPU_ID_ARM926EJS || 1471 cputype == CPU_ID_ARM1026EJS) { 1472 cpufuncs = armv5_ec_cpufuncs; 1473 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ 1474 get_cachetype_cp15(); 1475 pmap_pte_init_generic(); 1476 return 0; 1477 } 1478 #endif /* CPU_ARM9E || CPU_ARM10 */ 1479 #ifdef CPU_ARM10 1480 if (/* cputype == CPU_ID_ARM1020T || */ 1481 cputype == CPU_ID_ARM1020E) { 1482 /* 1483 * Select write-through cacheing (this isn't really an 1484 * option on ARM1020T). 1485 */ 1486 cpufuncs = arm10_cpufuncs; 1487 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ 1488 get_cachetype_cp15(); 1489 armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize; 1490 armv5_dcache_sets_max = 1491 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - 1492 armv5_dcache_sets_inc; 1493 armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); 1494 armv5_dcache_index_max = 0U - armv5_dcache_index_inc; 1495 pmap_pte_init_generic(); 1496 return 0; 1497 } 1498 #endif /* CPU_ARM10 */ 1499 #if defined(CPU_ARM11) 1500 if (cputype == CPU_ID_ARM1136JS || 1501 cputype == CPU_ID_ARM1136JSR1 || 1502 cputype == CPU_ID_ARM1176JS) { 1503 cpufuncs = arm11_cpufuncs; 1504 #if defined(CPU_ARM1136) 1505 if (cputype != CPU_ID_ARM1176JS) { 1506 cpufuncs = arm1136_cpufuncs; 1507 if (cputype == CPU_ID_ARM1136JS) 1508 cpufuncs.cf_sleep = arm1136_sleep_rev0; 1509 } 1510 #endif 1511 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ 1512 cpu_do_powersave = 1; /* Enable powersave */ 1513 get_cachetype_cp15(); 1514 #ifdef ARM11_CACHE_WRITE_THROUGH 1515 pmap_pte_init_arm11(); 1516 #else 1517 pmap_pte_init_generic(); 1518 #endif 1519 if (arm_cache_prefer_mask) 1520 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 1521 1522 return 0; 1523 } 1524 #endif /* CPU_ARM11 */ 1525 #ifdef CPU_SA110 1526 if (cputype == CPU_ID_SA110) { 1527 cpufuncs = sa110_cpufuncs; 1528 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ 1529 get_cachetype_table(); 1530 pmap_pte_init_sa1(); 1531 return 0; 1532 } 1533 #endif /* CPU_SA110 */ 1534 #ifdef CPU_SA1100 1535 if (cputype == CPU_ID_SA1100) { 1536 cpufuncs = sa11x0_cpufuncs; 1537 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ 1538 get_cachetype_table(); 1539 pmap_pte_init_sa1(); 1540 1541 /* Use powersave on this CPU. */ 1542 cpu_do_powersave = 1; 1543 1544 return 0; 1545 } 1546 #endif /* CPU_SA1100 */ 1547 #ifdef CPU_SA1110 1548 if (cputype == CPU_ID_SA1110) { 1549 cpufuncs = sa11x0_cpufuncs; 1550 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ 1551 get_cachetype_table(); 1552 pmap_pte_init_sa1(); 1553 1554 /* Use powersave on this CPU. */ 1555 cpu_do_powersave = 1; 1556 1557 return 0; 1558 } 1559 #endif /* CPU_SA1110 */ 1560 #ifdef CPU_FA526 1561 if (cputype == CPU_ID_FA526) { 1562 cpufuncs = fa526_cpufuncs; 1563 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ 1564 get_cachetype_cp15(); 1565 pmap_pte_init_generic(); 1566 1567 /* Use powersave on this CPU. */ 1568 cpu_do_powersave = 1; 1569 1570 return 0; 1571 } 1572 #endif /* CPU_FA526 */ 1573 #ifdef CPU_IXP12X0 1574 if (cputype == CPU_ID_IXP1200) { 1575 cpufuncs = ixp12x0_cpufuncs; 1576 cpu_reset_needs_v4_MMU_disable = 1; 1577 get_cachetype_table(); 1578 pmap_pte_init_sa1(); 1579 return 0; 1580 } 1581 #endif /* CPU_IXP12X0 */ 1582 #ifdef CPU_XSCALE_80200 1583 if (cputype == CPU_ID_80200) { 1584 int rev = cpufunc_id() & CPU_ID_REVISION_MASK; 1585 1586 i80200_icu_init(); 1587 1588 /* 1589 * Reset the Performance Monitoring Unit to a 1590 * pristine state: 1591 * - CCNT, PMN0, PMN1 reset to 0 1592 * - overflow indications cleared 1593 * - all counters disabled 1594 */ 1595 __asm volatile("mcr p14, 0, %0, c0, c0, 0" 1596 : 1597 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 1598 PMNC_CC_IF)); 1599 1600 #if defined(XSCALE_CCLKCFG) 1601 /* 1602 * Crank CCLKCFG to maximum legal value. 1603 */ 1604 __asm volatile ("mcr p14, 0, %0, c6, c0, 0" 1605 : 1606 : "r" (XSCALE_CCLKCFG)); 1607 #endif 1608 1609 /* 1610 * XXX Disable ECC in the Bus Controller Unit; we 1611 * don't really support it, yet. Clear any pending 1612 * error indications. 1613 */ 1614 __asm volatile("mcr p13, 0, %0, c0, c1, 0" 1615 : 1616 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV)); 1617 1618 cpufuncs = xscale_cpufuncs; 1619 #if defined(PERFCTRS) 1620 xscale_pmu_init(); 1621 #endif 1622 1623 /* 1624 * i80200 errata: Step-A0 and A1 have a bug where 1625 * D$ dirty bits are not cleared on "invalidate by 1626 * address". 1627 * 1628 * Workaround: Clean cache line before invalidating. 1629 */ 1630 if (rev == 0 || rev == 1) 1631 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng; 1632 1633 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ 1634 get_cachetype_cp15(); 1635 pmap_pte_init_xscale(); 1636 return 0; 1637 } 1638 #endif /* CPU_XSCALE_80200 */ 1639 #ifdef CPU_XSCALE_80321 1640 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 || 1641 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 || 1642 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) { 1643 i80321_icu_init(); 1644 1645 /* 1646 * Reset the Performance Monitoring Unit to a 1647 * pristine state: 1648 * - CCNT, PMN0, PMN1 reset to 0 1649 * - overflow indications cleared 1650 * - all counters disabled 1651 */ 1652 __asm volatile("mcr p14, 0, %0, c0, c0, 0" 1653 : 1654 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 1655 PMNC_CC_IF)); 1656 1657 cpufuncs = xscale_cpufuncs; 1658 #if defined(PERFCTRS) 1659 xscale_pmu_init(); 1660 #endif 1661 1662 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ 1663 get_cachetype_cp15(); 1664 pmap_pte_init_xscale(); 1665 return 0; 1666 } 1667 #endif /* CPU_XSCALE_80321 */ 1668 #ifdef __CPU_XSCALE_PXA2XX 1669 /* ignore core revision to test PXA2xx CPUs */ 1670 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X || 1671 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 || 1672 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) { 1673 1674 cpufuncs = xscale_cpufuncs; 1675 #if defined(PERFCTRS) 1676 xscale_pmu_init(); 1677 #endif 1678 1679 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ 1680 get_cachetype_cp15(); 1681 pmap_pte_init_xscale(); 1682 1683 /* Use powersave on this CPU. */ 1684 cpu_do_powersave = 1; 1685 1686 return 0; 1687 } 1688 #endif /* __CPU_XSCALE_PXA2XX */ 1689 #ifdef CPU_XSCALE_IXP425 1690 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 || 1691 cputype == CPU_ID_IXP425_266) { 1692 ixp425_icu_init(); 1693 1694 cpufuncs = xscale_cpufuncs; 1695 #if defined(PERFCTRS) 1696 xscale_pmu_init(); 1697 #endif 1698 1699 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ 1700 get_cachetype_cp15(); 1701 pmap_pte_init_xscale(); 1702 1703 return 0; 1704 } 1705 #endif /* CPU_XSCALE_IXP425 */ 1706 #if defined(CPU_CORTEX) 1707 if (cputype == CPU_ID_CORTEXA8R1 || 1708 cputype == CPU_ID_CORTEXA8R2 || 1709 cputype == CPU_ID_CORTEXA8R3 || 1710 cputype == CPU_ID_CORTEXA9R1) { 1711 cpufuncs = cortex_cpufuncs; 1712 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ 1713 cpu_do_powersave = 1; /* Enable powersave */ 1714 get_cachetype_cp15(); 1715 pmap_pte_init_armv7(); 1716 if (arm_cache_prefer_mask) 1717 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 1718 1719 return 0; 1720 } 1721 #endif /* CPU_CORTEX */ 1722 /* 1723 * Bzzzz. And the answer was ... 1724 */ 1725 panic("No support for this CPU type (%08x) in kernel", cputype); 1726 return(ARCHITECTURE_NOT_PRESENT); 1727 } 1728 1729 #ifdef CPU_ARM2 1730 u_int arm2_id(void) 1731 { 1732 1733 return CPU_ID_ARM2; 1734 } 1735 #endif /* CPU_ARM2 */ 1736 1737 #ifdef CPU_ARM250 1738 u_int arm250_id(void) 1739 { 1740 1741 return CPU_ID_ARM250; 1742 } 1743 #endif /* CPU_ARM250 */ 1744 1745 /* 1746 * Fixup routines for data and prefetch aborts. 1747 * 1748 * Several compile time symbols are used 1749 * 1750 * DEBUG_FAULT_CORRECTION - Print debugging information during the 1751 * correction of registers after a fault. 1752 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts 1753 * when defined should use late aborts 1754 */ 1755 1756 1757 /* 1758 * Null abort fixup routine. 1759 * For use when no fixup is required. 1760 */ 1761 int 1762 cpufunc_null_fixup(void *arg) 1763 { 1764 return(ABORT_FIXUP_OK); 1765 } 1766 1767 1768 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 1769 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) 1770 1771 #ifdef DEBUG_FAULT_CORRECTION 1772 #define DFC_PRINTF(x) printf x 1773 #define DFC_DISASSEMBLE(x) disassemble(x) 1774 #else 1775 #define DFC_PRINTF(x) /* nothing */ 1776 #define DFC_DISASSEMBLE(x) /* nothing */ 1777 #endif 1778 1779 /* 1780 * "Early" data abort fixup. 1781 * 1782 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used 1783 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI]. 1784 * 1785 * In early aborts, we may have to fix up LDM, STM, LDC and STC. 1786 */ 1787 int 1788 early_abort_fixup(void *arg) 1789 { 1790 trapframe_t *frame = arg; 1791 u_int fault_pc; 1792 u_int fault_instruction; 1793 int saved_lr = 0; 1794 1795 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 1796 1797 /* Ok an abort in SVC mode */ 1798 1799 /* 1800 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 1801 * as the fault happened in svc mode but we need it in the 1802 * usr slot so we can treat the registers as an array of ints 1803 * during fixing. 1804 * NOTE: This PC is in the position but writeback is not 1805 * allowed on r15. 1806 * Doing it like this is more efficient than trapping this 1807 * case in all possible locations in the following fixup code. 1808 */ 1809 1810 saved_lr = frame->tf_usr_lr; 1811 frame->tf_usr_lr = frame->tf_svc_lr; 1812 1813 /* 1814 * Note the trapframe does not have the SVC r13 so a fault 1815 * from an instruction with writeback to r13 in SVC mode is 1816 * not allowed. This should not happen as the kstack is 1817 * always valid. 1818 */ 1819 } 1820 1821 /* Get fault address and status from the CPU */ 1822 1823 fault_pc = frame->tf_pc; 1824 fault_instruction = *((volatile unsigned int *)fault_pc); 1825 1826 /* Decode the fault instruction and fix the registers as needed */ 1827 1828 if ((fault_instruction & 0x0e000000) == 0x08000000) { 1829 int base; 1830 int loop; 1831 int count; 1832 int *registers = &frame->tf_r0; 1833 1834 DFC_PRINTF(("LDM/STM\n")); 1835 DFC_DISASSEMBLE(fault_pc); 1836 if (fault_instruction & (1 << 21)) { 1837 DFC_PRINTF(("This instruction must be corrected\n")); 1838 base = (fault_instruction >> 16) & 0x0f; 1839 if (base == 15) 1840 return ABORT_FIXUP_FAILED; 1841 /* Count registers transferred */ 1842 count = 0; 1843 for (loop = 0; loop < 16; ++loop) { 1844 if (fault_instruction & (1<<loop)) 1845 ++count; 1846 } 1847 DFC_PRINTF(("%d registers used\n", count)); 1848 DFC_PRINTF(("Corrected r%d by %d bytes ", 1849 base, count * 4)); 1850 if (fault_instruction & (1 << 23)) { 1851 DFC_PRINTF(("down\n")); 1852 registers[base] -= count * 4; 1853 } else { 1854 DFC_PRINTF(("up\n")); 1855 registers[base] += count * 4; 1856 } 1857 } 1858 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) { 1859 int base; 1860 int offset; 1861 int *registers = &frame->tf_r0; 1862 1863 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */ 1864 1865 DFC_DISASSEMBLE(fault_pc); 1866 1867 /* Only need to fix registers if write back is turned on */ 1868 1869 if ((fault_instruction & (1 << 21)) != 0) { 1870 base = (fault_instruction >> 16) & 0x0f; 1871 if (base == 13 && 1872 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 1873 return ABORT_FIXUP_FAILED; 1874 if (base == 15) 1875 return ABORT_FIXUP_FAILED; 1876 1877 offset = (fault_instruction & 0xff) << 2; 1878 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 1879 if ((fault_instruction & (1 << 23)) != 0) 1880 offset = -offset; 1881 registers[base] += offset; 1882 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 1883 } 1884 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) 1885 return ABORT_FIXUP_FAILED; 1886 1887 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 1888 1889 /* Ok an abort in SVC mode */ 1890 1891 /* 1892 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 1893 * as the fault happened in svc mode but we need it in the 1894 * usr slot so we can treat the registers as an array of ints 1895 * during fixing. 1896 * NOTE: This PC is in the position but writeback is not 1897 * allowed on r15. 1898 * Doing it like this is more efficient than trapping this 1899 * case in all possible locations in the prior fixup code. 1900 */ 1901 1902 frame->tf_svc_lr = frame->tf_usr_lr; 1903 frame->tf_usr_lr = saved_lr; 1904 1905 /* 1906 * Note the trapframe does not have the SVC r13 so a fault 1907 * from an instruction with writeback to r13 in SVC mode is 1908 * not allowed. This should not happen as the kstack is 1909 * always valid. 1910 */ 1911 } 1912 1913 return(ABORT_FIXUP_OK); 1914 } 1915 #endif /* CPU_ARM2/250/3/6/7 */ 1916 1917 1918 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \ 1919 defined(CPU_ARM7TDMI) 1920 /* 1921 * "Late" (base updated) data abort fixup 1922 * 1923 * For ARM6 (in late-abort mode) and ARM7. 1924 * 1925 * In this model, all data-transfer instructions need fixing up. We defer 1926 * LDM, STM, LDC and STC fixup to the early-abort handler. 1927 */ 1928 int 1929 late_abort_fixup(void *arg) 1930 { 1931 trapframe_t *frame = arg; 1932 u_int fault_pc; 1933 u_int fault_instruction; 1934 int saved_lr = 0; 1935 1936 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 1937 1938 /* Ok an abort in SVC mode */ 1939 1940 /* 1941 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 1942 * as the fault happened in svc mode but we need it in the 1943 * usr slot so we can treat the registers as an array of ints 1944 * during fixing. 1945 * NOTE: This PC is in the position but writeback is not 1946 * allowed on r15. 1947 * Doing it like this is more efficient than trapping this 1948 * case in all possible locations in the following fixup code. 1949 */ 1950 1951 saved_lr = frame->tf_usr_lr; 1952 frame->tf_usr_lr = frame->tf_svc_lr; 1953 1954 /* 1955 * Note the trapframe does not have the SVC r13 so a fault 1956 * from an instruction with writeback to r13 in SVC mode is 1957 * not allowed. This should not happen as the kstack is 1958 * always valid. 1959 */ 1960 } 1961 1962 /* Get fault address and status from the CPU */ 1963 1964 fault_pc = frame->tf_pc; 1965 fault_instruction = *((volatile unsigned int *)fault_pc); 1966 1967 /* Decode the fault instruction and fix the registers as needed */ 1968 1969 /* Was is a swap instruction ? */ 1970 1971 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) { 1972 DFC_DISASSEMBLE(fault_pc); 1973 } else if ((fault_instruction & 0x0c000000) == 0x04000000) { 1974 1975 /* Was is a ldr/str instruction */ 1976 /* This is for late abort only */ 1977 1978 int base; 1979 int offset; 1980 int *registers = &frame->tf_r0; 1981 1982 DFC_DISASSEMBLE(fault_pc); 1983 1984 /* This is for late abort only */ 1985 1986 if ((fault_instruction & (1 << 24)) == 0 1987 || (fault_instruction & (1 << 21)) != 0) { 1988 /* postindexed ldr/str with no writeback */ 1989 1990 base = (fault_instruction >> 16) & 0x0f; 1991 if (base == 13 && 1992 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 1993 return ABORT_FIXUP_FAILED; 1994 if (base == 15) 1995 return ABORT_FIXUP_FAILED; 1996 DFC_PRINTF(("late abt fix: r%d=%08x : ", 1997 base, registers[base])); 1998 if ((fault_instruction & (1 << 25)) == 0) { 1999 /* Immediate offset - easy */ 2000 2001 offset = fault_instruction & 0xfff; 2002 if ((fault_instruction & (1 << 23))) 2003 offset = -offset; 2004 registers[base] += offset; 2005 DFC_PRINTF(("imm=%08x ", offset)); 2006 } else { 2007 /* offset is a shifted register */ 2008 int shift; 2009 2010 offset = fault_instruction & 0x0f; 2011 if (offset == base) 2012 return ABORT_FIXUP_FAILED; 2013 2014 /* 2015 * Register offset - hard we have to 2016 * cope with shifts ! 2017 */ 2018 offset = registers[offset]; 2019 2020 if ((fault_instruction & (1 << 4)) == 0) 2021 /* shift with amount */ 2022 shift = (fault_instruction >> 7) & 0x1f; 2023 else { 2024 /* shift with register */ 2025 if ((fault_instruction & (1 << 7)) != 0) 2026 /* undefined for now so bail out */ 2027 return ABORT_FIXUP_FAILED; 2028 shift = ((fault_instruction >> 8) & 0xf); 2029 if (base == shift) 2030 return ABORT_FIXUP_FAILED; 2031 DFC_PRINTF(("shift reg=%d ", shift)); 2032 shift = registers[shift]; 2033 } 2034 DFC_PRINTF(("shift=%08x ", shift)); 2035 switch (((fault_instruction >> 5) & 0x3)) { 2036 case 0 : /* Logical left */ 2037 offset = (int)(((u_int)offset) << shift); 2038 break; 2039 case 1 : /* Logical Right */ 2040 if (shift == 0) shift = 32; 2041 offset = (int)(((u_int)offset) >> shift); 2042 break; 2043 case 2 : /* Arithmetic Right */ 2044 if (shift == 0) shift = 32; 2045 offset = (int)(((int)offset) >> shift); 2046 break; 2047 case 3 : /* Rotate right (rol or rxx) */ 2048 return ABORT_FIXUP_FAILED; 2049 break; 2050 } 2051 2052 DFC_PRINTF(("abt: fixed LDR/STR with " 2053 "register offset\n")); 2054 if ((fault_instruction & (1 << 23))) 2055 offset = -offset; 2056 DFC_PRINTF(("offset=%08x ", offset)); 2057 registers[base] += offset; 2058 } 2059 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 2060 } 2061 } 2062 2063 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 2064 2065 /* Ok an abort in SVC mode */ 2066 2067 /* 2068 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 2069 * as the fault happened in svc mode but we need it in the 2070 * usr slot so we can treat the registers as an array of ints 2071 * during fixing. 2072 * NOTE: This PC is in the position but writeback is not 2073 * allowed on r15. 2074 * Doing it like this is more efficient than trapping this 2075 * case in all possible locations in the prior fixup code. 2076 */ 2077 2078 frame->tf_svc_lr = frame->tf_usr_lr; 2079 frame->tf_usr_lr = saved_lr; 2080 2081 /* 2082 * Note the trapframe does not have the SVC r13 so a fault 2083 * from an instruction with writeback to r13 in SVC mode is 2084 * not allowed. This should not happen as the kstack is 2085 * always valid. 2086 */ 2087 } 2088 2089 /* 2090 * Now let the early-abort fixup routine have a go, in case it 2091 * was an LDM, STM, LDC or STC that faulted. 2092 */ 2093 2094 return early_abort_fixup(arg); 2095 } 2096 #endif /* CPU_ARM6(LATE)/7/7TDMI */ 2097 2098 /* 2099 * CPU Setup code 2100 */ 2101 2102 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \ 2103 defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \ 2104 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \ 2105 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 2106 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 2107 defined(CPU_ARM10) || defined(CPU_ARM11) || defined(CPU_ARM1136) || \ 2108 defined(CPU_FA526) || defined(CPU_CORTEX) 2109 2110 #define IGN 0 2111 #define OR 1 2112 #define BIC 2 2113 2114 struct cpu_option { 2115 const char *co_name; 2116 int co_falseop; 2117 int co_trueop; 2118 int co_value; 2119 }; 2120 2121 static u_int parse_cpu_options(char *, struct cpu_option *, u_int); 2122 2123 static u_int 2124 parse_cpu_options(char *args, struct cpu_option *optlist, u_int cpuctrl) 2125 { 2126 int integer; 2127 2128 if (args == NULL) 2129 return(cpuctrl); 2130 2131 while (optlist->co_name) { 2132 if (get_bootconf_option(args, optlist->co_name, 2133 BOOTOPT_TYPE_BOOLEAN, &integer)) { 2134 if (integer) { 2135 if (optlist->co_trueop == OR) 2136 cpuctrl |= optlist->co_value; 2137 else if (optlist->co_trueop == BIC) 2138 cpuctrl &= ~optlist->co_value; 2139 } else { 2140 if (optlist->co_falseop == OR) 2141 cpuctrl |= optlist->co_value; 2142 else if (optlist->co_falseop == BIC) 2143 cpuctrl &= ~optlist->co_value; 2144 } 2145 } 2146 ++optlist; 2147 } 2148 return(cpuctrl); 2149 } 2150 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */ 2151 2152 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \ 2153 || defined(CPU_ARM8) 2154 struct cpu_option arm678_options[] = { 2155 #ifdef COMPAT_12 2156 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE }, 2157 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 2158 #endif /* COMPAT_12 */ 2159 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2160 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2161 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2162 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2163 { NULL, IGN, IGN, 0 } 2164 }; 2165 2166 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */ 2167 2168 #ifdef CPU_ARM6 2169 struct cpu_option arm6_options[] = { 2170 { "arm6.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2171 { "arm6.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2172 { "arm6.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2173 { "arm6.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2174 { NULL, IGN, IGN, 0 } 2175 }; 2176 2177 void 2178 arm6_setup(char *args) 2179 { 2180 int cpuctrl, cpuctrlmask; 2181 2182 /* Set up default control registers bits */ 2183 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2184 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2185 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2186 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2187 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2188 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2189 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 2190 | CPU_CONTROL_AFLT_ENABLE; 2191 2192 #ifdef ARM6_LATE_ABORT 2193 cpuctrl |= CPU_CONTROL_LABT_ENABLE; 2194 #endif /* ARM6_LATE_ABORT */ 2195 2196 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2197 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2198 #endif 2199 2200 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2201 cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl); 2202 2203 #ifdef __ARMEB__ 2204 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2205 #endif 2206 2207 /* Clear out the cache */ 2208 cpu_idcache_wbinv_all(); 2209 2210 /* Set the control register */ 2211 curcpu()->ci_ctrl = cpuctrl; 2212 cpu_control(0xffffffff, cpuctrl); 2213 } 2214 #endif /* CPU_ARM6 */ 2215 2216 #ifdef CPU_ARM7 2217 struct cpu_option arm7_options[] = { 2218 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2219 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2220 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2221 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2222 #ifdef COMPAT_12 2223 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2224 #endif /* COMPAT_12 */ 2225 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2226 { NULL, IGN, IGN, 0 } 2227 }; 2228 2229 void 2230 arm7_setup(char *args) 2231 { 2232 int cpuctrl, cpuctrlmask; 2233 2234 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2235 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2236 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2237 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2238 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2239 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2240 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE 2241 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 2242 | CPU_CONTROL_AFLT_ENABLE; 2243 2244 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2245 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2246 #endif 2247 2248 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2249 cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl); 2250 2251 #ifdef __ARMEB__ 2252 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2253 #endif 2254 2255 /* Clear out the cache */ 2256 cpu_idcache_wbinv_all(); 2257 2258 /* Set the control register */ 2259 curcpu()->ci_ctrl = cpuctrl; 2260 cpu_control(0xffffffff, cpuctrl); 2261 } 2262 #endif /* CPU_ARM7 */ 2263 2264 #ifdef CPU_ARM7TDMI 2265 struct cpu_option arm7tdmi_options[] = { 2266 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2267 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2268 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2269 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2270 #ifdef COMPAT_12 2271 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2272 #endif /* COMPAT_12 */ 2273 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2274 { NULL, IGN, IGN, 0 } 2275 }; 2276 2277 void 2278 arm7tdmi_setup(char *args) 2279 { 2280 int cpuctrl; 2281 2282 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2283 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2284 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2285 2286 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2287 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl); 2288 2289 #ifdef __ARMEB__ 2290 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2291 #endif 2292 2293 /* Clear out the cache */ 2294 cpu_idcache_wbinv_all(); 2295 2296 /* Set the control register */ 2297 curcpu()->ci_ctrl = cpuctrl; 2298 cpu_control(0xffffffff, cpuctrl); 2299 } 2300 #endif /* CPU_ARM7TDMI */ 2301 2302 #ifdef CPU_ARM8 2303 struct cpu_option arm8_options[] = { 2304 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2305 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2306 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2307 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2308 #ifdef COMPAT_12 2309 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2310 #endif /* COMPAT_12 */ 2311 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2312 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2313 { NULL, IGN, IGN, 0 } 2314 }; 2315 2316 void 2317 arm8_setup(char *args) 2318 { 2319 int integer; 2320 int cpuctrl, cpuctrlmask; 2321 int clocktest; 2322 int setclock = 0; 2323 2324 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2325 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2326 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2327 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2328 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2329 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2330 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE 2331 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE; 2332 2333 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2334 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2335 #endif 2336 2337 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2338 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl); 2339 2340 #ifdef __ARMEB__ 2341 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2342 #endif 2343 2344 /* Get clock configuration */ 2345 clocktest = arm8_clock_config(0, 0) & 0x0f; 2346 2347 /* Special ARM8 clock and test configuration */ 2348 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2349 clocktest = 0; 2350 setclock = 1; 2351 } 2352 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2353 if (integer) 2354 clocktest |= 0x01; 2355 else 2356 clocktest &= ~(0x01); 2357 setclock = 1; 2358 } 2359 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2360 if (integer) 2361 clocktest |= 0x02; 2362 else 2363 clocktest &= ~(0x02); 2364 setclock = 1; 2365 } 2366 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) { 2367 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2; 2368 setclock = 1; 2369 } 2370 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) { 2371 clocktest |= (integer & 7) << 5; 2372 setclock = 1; 2373 } 2374 2375 /* Clear out the cache */ 2376 cpu_idcache_wbinv_all(); 2377 2378 /* Set the control register */ 2379 curcpu()->ci_ctrl = cpuctrl; 2380 cpu_control(0xffffffff, cpuctrl); 2381 2382 /* Set the clock/test register */ 2383 if (setclock) 2384 arm8_clock_config(0x7f, clocktest); 2385 } 2386 #endif /* CPU_ARM8 */ 2387 2388 #ifdef CPU_ARM9 2389 struct cpu_option arm9_options[] = { 2390 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2391 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2392 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2393 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2394 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2395 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2396 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2397 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2398 { NULL, IGN, IGN, 0 } 2399 }; 2400 2401 void 2402 arm9_setup(char *args) 2403 { 2404 int cpuctrl, cpuctrlmask; 2405 2406 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2407 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2408 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2409 | CPU_CONTROL_WBUF_ENABLE; 2410 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2411 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2412 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2413 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2414 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2415 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC 2416 | CPU_CONTROL_ROUNDROBIN; 2417 2418 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2419 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2420 #endif 2421 2422 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl); 2423 2424 #ifdef __ARMEB__ 2425 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2426 #endif 2427 2428 if (vector_page == ARM_VECTORS_HIGH) 2429 cpuctrl |= CPU_CONTROL_VECRELOC; 2430 2431 /* Clear out the cache */ 2432 cpu_idcache_wbinv_all(); 2433 2434 /* Set the control register */ 2435 curcpu()->ci_ctrl = cpuctrl; 2436 cpu_control(cpuctrlmask, cpuctrl); 2437 2438 } 2439 #endif /* CPU_ARM9 */ 2440 2441 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 2442 struct cpu_option arm10_options[] = { 2443 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2444 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2445 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2446 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2447 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2448 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2449 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2450 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2451 { NULL, IGN, IGN, 0 } 2452 }; 2453 2454 void 2455 arm10_setup(char *args) 2456 { 2457 int cpuctrl, cpuctrlmask; 2458 2459 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2460 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2461 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; 2462 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2463 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2464 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2465 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2466 | CPU_CONTROL_BPRD_ENABLE 2467 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2468 2469 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2470 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2471 #endif 2472 2473 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl); 2474 2475 #ifdef __ARMEB__ 2476 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2477 #endif 2478 2479 if (vector_page == ARM_VECTORS_HIGH) 2480 cpuctrl |= CPU_CONTROL_VECRELOC; 2481 2482 /* Clear out the cache */ 2483 cpu_idcache_wbinv_all(); 2484 2485 /* Now really make sure they are clean. */ 2486 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2487 2488 /* Set the control register */ 2489 curcpu()->ci_ctrl = cpuctrl; 2490 cpu_control(0xffffffff, cpuctrl); 2491 2492 /* And again. */ 2493 cpu_idcache_wbinv_all(); 2494 } 2495 #endif /* CPU_ARM9E || CPU_ARM10 */ 2496 2497 #if defined(CPU_ARM11) 2498 struct cpu_option arm11_options[] = { 2499 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2500 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2501 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2502 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2503 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2504 { NULL, IGN, IGN, 0 } 2505 }; 2506 2507 void 2508 arm11_setup(char *args) 2509 { 2510 int cpuctrl, cpuctrlmask; 2511 2512 #if defined(PROCESS_ID_IS_CURCPU) 2513 /* set curcpu() */ 2514 __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store)); 2515 #elif defined(PROCESS_ID_IS_CURLWP) 2516 /* set curlwp() */ 2517 __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0)); 2518 #endif 2519 2520 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2521 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2522 /* | CPU_CONTROL_BPRD_ENABLE */; 2523 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2524 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2525 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE 2526 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2527 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2528 2529 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2530 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2531 #endif 2532 2533 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2534 2535 #ifdef __ARMEB__ 2536 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2537 #endif 2538 2539 if (vector_page == ARM_VECTORS_HIGH) 2540 cpuctrl |= CPU_CONTROL_VECRELOC; 2541 2542 /* Clear out the cache */ 2543 cpu_idcache_wbinv_all(); 2544 2545 /* Now really make sure they are clean. */ 2546 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2547 2548 /* Allow detection code to find the VFP if it's fitted. */ 2549 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 2550 2551 /* Set the control register */ 2552 curcpu()->ci_ctrl = cpuctrl; 2553 cpu_control(0xffffffff, cpuctrl); 2554 2555 /* And again. */ 2556 cpu_idcache_wbinv_all(); 2557 } 2558 #endif /* CPU_ARM11 */ 2559 2560 #if defined(CPU_CORTEX) 2561 struct cpu_option armv7_options[] = { 2562 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2563 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2564 { "armv7.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2565 { "armv7.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2566 { "armv7.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2567 { NULL, IGN, IGN, 0} 2568 }; 2569 2570 void 2571 armv7_setup(args) 2572 char *args; 2573 { 2574 int cpuctrl, cpuctrlmask; 2575 2576 #if defined(PROCESS_ID_IS_CURCPU) 2577 /* set curcpu() */ 2578 __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store)); 2579 #elif defined(PROCESS_ID_IS_CURLWP) 2580 /* set curlwp() */ 2581 __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0)); 2582 #endif 2583 2584 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_IC_ENABLE 2585 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_BPRD_ENABLE ; 2586 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2587 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2588 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE 2589 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2590 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2591 2592 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2593 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2594 #endif 2595 2596 cpuctrl = parse_cpu_options(args, armv7_options, cpuctrl); 2597 2598 #ifdef __ARMEB__ 2599 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2600 #endif 2601 2602 if (vector_page == ARM_VECTORS_HIGH) 2603 cpuctrl |= CPU_CONTROL_VECRELOC; 2604 2605 /* Clear out the cache */ 2606 cpu_idcache_wbinv_all(); 2607 2608 /* Set the control register */ 2609 curcpu()->ci_ctrl = cpuctrl; 2610 cpu_control(0xffffffff, cpuctrl); 2611 } 2612 2613 /* Clean the data cache to the level of coherency. Slow. */ 2614 void 2615 armv7_dcache_wbinv_all() 2616 { 2617 u_int clidr, loc, level; 2618 2619 /* Cache Level ID Register */ 2620 __asm volatile("mrc\tp15, 1, %0, c0, c0, 1" : "=r" (clidr)); 2621 2622 loc = (clidr >> 24) & 7; /* Level of Coherency */ 2623 2624 for (level = 0; level <= loc; level++) { 2625 u_int ctype, csid; 2626 int line_size, ways, nsets, wayshift, setshift; 2627 2628 ctype = (clidr >> (level * 3)) & 7; 2629 /* We're supposed to stop when ctype == 0, but we 2630 * trust that loc isn't larger than necesssary. */ 2631 if (ctype < 2) continue; /* no cache / only icache */ 2632 2633 csid = get_cachesize_cp15(level << 1); 2634 line_size = CPU_CSID_LEN(csid); 2635 ways = CPU_CSID_ASSOC(csid); 2636 nsets = (csid >> 13) & 0x7fff; 2637 2638 wayshift = __builtin_clz(ways); /* leading zeros */ 2639 setshift = line_size + 4; 2640 2641 for (; nsets >= 0; nsets--) { 2642 int way; 2643 2644 for (way = ways; way >= 0; way--) { 2645 /* Clean by set/way */ 2646 const u_int sw = (way << wayshift) 2647 | (nsets << setshift) 2648 | (level << 1); 2649 2650 __asm volatile("mcr\tp15, 0, %0, c7, c10, 2" 2651 :: "r"(sw)); 2652 } 2653 } 2654 } 2655 2656 __asm volatile("dsb"); 2657 __asm volatile("isb"); 2658 } 2659 #endif /* CPU_CORTEX */ 2660 2661 2662 2663 #if defined(CPU_ARM1136) 2664 void 2665 arm1136_setup(char *args) 2666 { 2667 int cpuctrl, cpuctrl_wax; 2668 uint32_t auxctrl, auxctrl_wax; 2669 uint32_t tmp, tmp2; 2670 uint32_t sbz=0; 2671 uint32_t cpuid; 2672 2673 #if defined(PROCESS_ID_IS_CURCPU) 2674 /* set curcpu() */ 2675 __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store)); 2676 #elif defined(PROCESS_ID_IS_CURLWP) 2677 /* set curlwp() */ 2678 __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0)); 2679 #endif 2680 2681 cpuid = cpu_id(); 2682 2683 cpuctrl = 2684 CPU_CONTROL_MMU_ENABLE | 2685 CPU_CONTROL_DC_ENABLE | 2686 CPU_CONTROL_WBUF_ENABLE | 2687 CPU_CONTROL_32BP_ENABLE | 2688 CPU_CONTROL_32BD_ENABLE | 2689 CPU_CONTROL_LABT_ENABLE | 2690 CPU_CONTROL_SYST_ENABLE | 2691 CPU_CONTROL_IC_ENABLE; 2692 2693 /* 2694 * "write as existing" bits 2695 * inverse of this is mask 2696 */ 2697 cpuctrl_wax = 2698 (3 << 30) | 2699 (1 << 29) | 2700 (1 << 28) | 2701 (3 << 26) | 2702 (3 << 19) | 2703 (1 << 17); 2704 2705 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2706 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2707 #endif 2708 2709 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2710 2711 #ifdef __ARMEB__ 2712 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2713 #endif 2714 2715 if (vector_page == ARM_VECTORS_HIGH) 2716 cpuctrl |= CPU_CONTROL_VECRELOC; 2717 2718 auxctrl = 0; 2719 auxctrl_wax = ~0; 2720 /* This options enables the workaround for the 364296 ARM1136 2721 * r0pX errata (possible cache data corruption with 2722 * hit-under-miss enabled). It sets the undocumented bit 31 in 2723 * the auxiliary control register and the FI bit in the control 2724 * register, thus disabling hit-under-miss without putting the 2725 * processor into full low interrupt latency mode. ARM11MPCore 2726 * is not affected. 2727 */ 2728 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */ 2729 cpuctrl |= CPU_CONTROL_FI_ENABLE; 2730 auxctrl = ARM11R0_AUXCTL_PFI; 2731 auxctrl_wax = ~ARM11R0_AUXCTL_PFI; 2732 } 2733 2734 /* Clear out the cache */ 2735 cpu_idcache_wbinv_all(); 2736 2737 /* Now really make sure they are clean. */ 2738 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz)); 2739 2740 /* Allow detection code to find the VFP if it's fitted. */ 2741 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 2742 2743 /* Set the control register */ 2744 curcpu()->ci_ctrl = cpuctrl; 2745 cpu_control(~cpuctrl_wax, cpuctrl); 2746 2747 __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t" 2748 "bic %1, %0, %2\n\t" 2749 "eor %1, %0, %3\n\t" 2750 "teq %0, %1\n\t" 2751 "mcrne p15, 0, %1, c1, c0, 1\n\t" 2752 : "=r"(tmp), "=r"(tmp2) : 2753 "r"(~auxctrl_wax), "r"(auxctrl)); 2754 2755 /* And again. */ 2756 cpu_idcache_wbinv_all(); 2757 } 2758 #endif /* CPU_ARM1136 */ 2759 2760 #ifdef CPU_SA110 2761 struct cpu_option sa110_options[] = { 2762 #ifdef COMPAT_12 2763 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2764 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 2765 #endif /* COMPAT_12 */ 2766 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2767 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2768 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2769 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2770 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2771 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2772 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2773 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2774 { NULL, IGN, IGN, 0 } 2775 }; 2776 2777 void 2778 sa110_setup(char *args) 2779 { 2780 int cpuctrl, cpuctrlmask; 2781 2782 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2783 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2784 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2785 | CPU_CONTROL_WBUF_ENABLE; 2786 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2787 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2788 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2789 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2790 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2791 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 2792 | CPU_CONTROL_CPCLK; 2793 2794 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2795 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2796 #endif 2797 2798 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl); 2799 2800 #ifdef __ARMEB__ 2801 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2802 #endif 2803 2804 if (vector_page == ARM_VECTORS_HIGH) 2805 cpuctrl |= CPU_CONTROL_VECRELOC; 2806 2807 /* Clear out the cache */ 2808 cpu_idcache_wbinv_all(); 2809 2810 /* Set the control register */ 2811 curcpu()->ci_ctrl = cpuctrl; 2812 /* cpu_control(cpuctrlmask, cpuctrl);*/ 2813 cpu_control(0xffffffff, cpuctrl); 2814 2815 /* 2816 * enable clockswitching, note that this doesn't read or write to r0, 2817 * r0 is just to make it valid asm 2818 */ 2819 __asm ("mcr 15, 0, r0, c15, c1, 2"); 2820 } 2821 #endif /* CPU_SA110 */ 2822 2823 #if defined(CPU_SA1100) || defined(CPU_SA1110) 2824 struct cpu_option sa11x0_options[] = { 2825 #ifdef COMPAT_12 2826 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2827 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 2828 #endif /* COMPAT_12 */ 2829 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2830 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2831 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2832 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2833 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2834 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2835 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2836 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2837 { NULL, IGN, IGN, 0 } 2838 }; 2839 2840 void 2841 sa11x0_setup(char *args) 2842 { 2843 int cpuctrl, cpuctrlmask; 2844 2845 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2846 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2847 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2848 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 2849 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2850 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2851 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2852 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2853 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2854 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 2855 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 2856 2857 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2858 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2859 #endif 2860 2861 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl); 2862 2863 #ifdef __ARMEB__ 2864 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2865 #endif 2866 2867 if (vector_page == ARM_VECTORS_HIGH) 2868 cpuctrl |= CPU_CONTROL_VECRELOC; 2869 2870 /* Clear out the cache */ 2871 cpu_idcache_wbinv_all(); 2872 2873 /* Set the control register */ 2874 curcpu()->ci_ctrl = cpuctrl; 2875 cpu_control(0xffffffff, cpuctrl); 2876 } 2877 #endif /* CPU_SA1100 || CPU_SA1110 */ 2878 2879 #if defined(CPU_FA526) 2880 struct cpu_option fa526_options[] = { 2881 #ifdef COMPAT_12 2882 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2883 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 2884 #endif /* COMPAT_12 */ 2885 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2886 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2887 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2888 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2889 { NULL, IGN, IGN, 0 } 2890 }; 2891 2892 void 2893 fa526_setup(char *args) 2894 { 2895 int cpuctrl, cpuctrlmask; 2896 2897 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2898 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2899 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2900 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 2901 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2902 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2903 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2904 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2905 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2906 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 2907 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 2908 2909 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2910 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2911 #endif 2912 2913 cpuctrl = parse_cpu_options(args, fa526_options, cpuctrl); 2914 2915 #ifdef __ARMEB__ 2916 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2917 #endif 2918 2919 if (vector_page == ARM_VECTORS_HIGH) 2920 cpuctrl |= CPU_CONTROL_VECRELOC; 2921 2922 /* Clear out the cache */ 2923 cpu_idcache_wbinv_all(); 2924 2925 /* Set the control register */ 2926 curcpu()->ci_ctrl = cpuctrl; 2927 cpu_control(0xffffffff, cpuctrl); 2928 } 2929 #endif /* CPU_FA526 */ 2930 2931 #if defined(CPU_IXP12X0) 2932 struct cpu_option ixp12x0_options[] = { 2933 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2934 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2935 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2936 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2937 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2938 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2939 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2940 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2941 { NULL, IGN, IGN, 0 } 2942 }; 2943 2944 void 2945 ixp12x0_setup(char *args) 2946 { 2947 int cpuctrl, cpuctrlmask; 2948 2949 2950 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE 2951 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE 2952 | CPU_CONTROL_IC_ENABLE; 2953 2954 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE 2955 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2956 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE 2957 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE 2958 | CPU_CONTROL_VECRELOC; 2959 2960 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2961 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2962 #endif 2963 2964 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl); 2965 2966 #ifdef __ARMEB__ 2967 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2968 #endif 2969 2970 if (vector_page == ARM_VECTORS_HIGH) 2971 cpuctrl |= CPU_CONTROL_VECRELOC; 2972 2973 /* Clear out the cache */ 2974 cpu_idcache_wbinv_all(); 2975 2976 /* Set the control register */ 2977 curcpu()->ci_ctrl = cpuctrl; 2978 /* cpu_control(0xffffffff, cpuctrl); */ 2979 cpu_control(cpuctrlmask, cpuctrl); 2980 } 2981 #endif /* CPU_IXP12X0 */ 2982 2983 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 2984 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || defined(CPU_CORTEX) 2985 struct cpu_option xscale_options[] = { 2986 #ifdef COMPAT_12 2987 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2988 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2989 #endif /* COMPAT_12 */ 2990 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2991 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2992 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2993 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2994 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2995 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2996 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2997 { NULL, IGN, IGN, 0 } 2998 }; 2999 3000 void 3001 xscale_setup(char *args) 3002 { 3003 uint32_t auxctl; 3004 int cpuctrl, cpuctrlmask; 3005 3006 /* 3007 * The XScale Write Buffer is always enabled. Our option 3008 * is to enable/disable coalescing. Note that bits 6:3 3009 * must always be enabled. 3010 */ 3011 3012 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3013 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3014 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3015 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE 3016 | CPU_CONTROL_BPRD_ENABLE; 3017 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 3018 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 3019 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 3020 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 3021 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 3022 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 3023 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 3024 3025 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 3026 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 3027 #endif 3028 3029 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl); 3030 3031 #ifdef __ARMEB__ 3032 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 3033 #endif 3034 3035 if (vector_page == ARM_VECTORS_HIGH) 3036 cpuctrl |= CPU_CONTROL_VECRELOC; 3037 3038 /* Clear out the cache */ 3039 cpu_idcache_wbinv_all(); 3040 3041 /* 3042 * Set the control register. Note that bits 6:3 must always 3043 * be set to 1. 3044 */ 3045 curcpu()->ci_ctrl = cpuctrl; 3046 /* cpu_control(cpuctrlmask, cpuctrl);*/ 3047 cpu_control(0xffffffff, cpuctrl); 3048 3049 /* Make sure write coalescing is turned on */ 3050 __asm volatile("mrc p15, 0, %0, c1, c0, 1" 3051 : "=r" (auxctl)); 3052 #ifdef XSCALE_NO_COALESCE_WRITES 3053 auxctl |= XSCALE_AUXCTL_K; 3054 #else 3055 auxctl &= ~XSCALE_AUXCTL_K; 3056 #endif 3057 __asm volatile("mcr p15, 0, %0, c1, c0, 1" 3058 : : "r" (auxctl)); 3059 } 3060 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 3061 3062