1 /* $NetBSD: cpufunc.c,v 1.86 2008/07/22 07:07:23 matt Exp $ */ 2 3 /* 4 * arm7tdmi support code Copyright (c) 2001 John Fremlin 5 * arm8 support code Copyright (c) 1997 ARM Limited 6 * arm8 support code Copyright (c) 1997 Causality Limited 7 * arm9 support code Copyright (C) 2001 ARM Ltd 8 * arm11 support code Copyright (c) 2007 Microsoft 9 * Copyright (c) 1997 Mark Brinicombe. 10 * Copyright (c) 1997 Causality Limited 11 * All rights reserved. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Causality Limited. 24 * 4. The name of Causality Limited may not be used to endorse or promote 25 * products derived from this software without specific prior written 26 * permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY CAUSALITY LIMITED ``AS IS'' AND ANY EXPRESS 29 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 30 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 31 * DISCLAIMED. IN NO EVENT SHALL CAUSALITY LIMITED BE LIABLE FOR ANY DIRECT, 32 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 33 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 34 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * RiscBSD kernel project 41 * 42 * cpufuncs.c 43 * 44 * C functions for supporting CPU / MMU / TLB specific operations. 45 * 46 * Created : 30/01/97 47 */ 48 49 #include <sys/cdefs.h> 50 __KERNEL_RCSID(0, "$NetBSD: cpufunc.c,v 1.86 2008/07/22 07:07:23 matt Exp $"); 51 52 #include "opt_compat_netbsd.h" 53 #include "opt_cpuoptions.h" 54 #include "opt_perfctrs.h" 55 56 #include <sys/types.h> 57 #include <sys/param.h> 58 #include <sys/pmc.h> 59 #include <sys/systm.h> 60 #include <machine/cpu.h> 61 #include <machine/bootconfig.h> 62 #include <arch/arm/arm/disassem.h> 63 64 #include <uvm/uvm.h> 65 66 #include <arm/cpuconf.h> 67 #include <arm/cpufunc.h> 68 69 #ifdef CPU_XSCALE_80200 70 #include <arm/xscale/i80200reg.h> 71 #include <arm/xscale/i80200var.h> 72 #endif 73 74 #ifdef CPU_XSCALE_80321 75 #include <arm/xscale/i80321reg.h> 76 #include <arm/xscale/i80321var.h> 77 #endif 78 79 #ifdef CPU_XSCALE_IXP425 80 #include <arm/xscale/ixp425reg.h> 81 #include <arm/xscale/ixp425var.h> 82 #endif 83 84 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) 85 #include <arm/xscale/xscalereg.h> 86 #endif 87 88 #if defined(PERFCTRS) 89 struct arm_pmc_funcs *arm_pmc; 90 #endif 91 92 /* PRIMARY CACHE VARIABLES */ 93 int arm_picache_size; 94 int arm_picache_line_size; 95 int arm_picache_ways; 96 97 int arm_pdcache_size; /* and unified */ 98 int arm_pdcache_line_size; 99 int arm_pdcache_ways; 100 #if (ARM_MMU_V6) != 0 101 int arm_cache_prefer_mask; 102 #endif 103 104 105 int arm_pcache_type; 106 int arm_pcache_unified; 107 108 int arm_dcache_align; 109 int arm_dcache_align_mask; 110 111 /* 1 == use cpu_sleep(), 0 == don't */ 112 int cpu_do_powersave; 113 114 #ifdef CPU_ARM2 115 struct cpu_functions arm2_cpufuncs = { 116 /* CPU functions */ 117 118 .cf_id = arm2_id, 119 .cf_cpwait = cpufunc_nullop, 120 121 /* MMU functions */ 122 123 .cf_control = (void *)cpufunc_nullop, 124 125 /* TLB functions */ 126 127 .cf_tlb_flushID = cpufunc_nullop, 128 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 129 .cf_tlb_flushI = cpufunc_nullop, 130 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 131 .cf_tlb_flushD = cpufunc_nullop, 132 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 133 134 /* Cache operations */ 135 136 .cf_icache_sync_all = cpufunc_nullop, 137 .cf_icache_sync_range = (void *) cpufunc_nullop, 138 139 .cf_dcache_wbinv_all = arm3_cache_flush, 140 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 141 .cf_dcache_inv_range = (void *)cpufunc_nullop, 142 .cf_dcache_wb_range = (void *)cpufunc_nullop, 143 144 .cf_idcache_wbinv_all = cpufunc_nullop, 145 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 146 147 /* Other functions */ 148 149 .cf_flush_prefetchbuf = cpufunc_nullop, 150 .cf_drain_writebuf = cpufunc_nullop, 151 .cf_flush_brnchtgt_C = cpufunc_nullop, 152 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 153 154 .cf_sleep = (void *)cpufunc_nullop, 155 156 /* Soft functions */ 157 158 .cf_dataabt_fixup = early_abort_fixup, 159 .cf_prefetchabt_fixup = cpufunc_null_fixup, 160 161 .cf_setup = (void *)cpufunc_nullop 162 163 }; 164 #endif /* CPU_ARM2 */ 165 166 #ifdef CPU_ARM250 167 struct cpu_functions arm250_cpufuncs = { 168 /* CPU functions */ 169 170 .cf_id = arm250_id, 171 .cf_cpwait = cpufunc_nullop, 172 173 /* MMU functions */ 174 175 .cf_control = (void *)cpufunc_nullop, 176 177 /* TLB functions */ 178 179 .cf_tlb_flushID = cpufunc_nullop, 180 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 181 .cf_tlb_flushI = cpufunc_nullop, 182 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 183 .cf_tlb_flushD = cpufunc_nullop, 184 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 185 186 /* Cache operations */ 187 188 .cf_icache_sync_all = cpufunc_nullop, 189 .cf_icache_sync_range = (void *) cpufunc_nullop, 190 191 .cf_dcache_wbinv_all = arm3_cache_flush, 192 .cf_dcache_wbinv_range = (void *)cpufunc_nullop, 193 .cf_dcache_inv_range = (void *)cpufunc_nullop, 194 .cf_dcache_wb_range = (void *)cpufunc_nullop, 195 196 .cf_idcache_wbinv_all = cpufunc_nullop, 197 .cf_idcache_wbinv_range = (void *)cpufunc_nullop, 198 199 /* Other functions */ 200 201 .cf_flush_prefetchbuf = cpufunc_nullop, 202 .cf_drain_writebuf = cpufunc_nullop, 203 .cf_flush_brnchtgt_C = cpufunc_nullop, 204 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 205 206 .cf_sleep = (void *)cpufunc_nullop, 207 208 /* Soft functions */ 209 210 .cf_dataabt_fixup = early_abort_fixup, 211 .cf_prefetchabt_fixup = cpufunc_null_fixup, 212 213 .cf_setup = (void *)cpufunc_nullop 214 215 }; 216 #endif /* CPU_ARM250 */ 217 218 #ifdef CPU_ARM3 219 struct cpu_functions arm3_cpufuncs = { 220 /* CPU functions */ 221 222 .cf_id = cpufunc_id, 223 .cf_cpwait = cpufunc_nullop, 224 225 /* MMU functions */ 226 227 .cf_control = arm3_control, 228 229 /* TLB functions */ 230 231 .cf_tlb_flushID = cpufunc_nullop, 232 .cf_tlb_flushID_SE = (void *)cpufunc_nullop, 233 .cf_tlb_flushI = cpufunc_nullop, 234 .cf_tlb_flushI_SE = (void *)cpufunc_nullop, 235 .cf_tlb_flushD = cpufunc_nullop, 236 .cf_tlb_flushD_SE = (void *)cpufunc_nullop, 237 238 /* Cache operations */ 239 240 .cf_icache_sync_all = cpufunc_nullop, 241 .cf_icache_sync_range = (void *) cpufunc_nullop, 242 243 .cf_dcache_wbinv_all = arm3_cache_flush, 244 .cf_dcache_wbinv_range = (void *)arm3_cache_flush, 245 .cf_dcache_inv_range = (void *)arm3_cache_flush, 246 .cf_dcache_wb_range = (void *)cpufunc_nullop, 247 248 .cf_idcache_wbinv_all = arm3_cache_flush, 249 .cf_idcache_wbinv_range = (void *)arm3_cache_flush, 250 251 /* Other functions */ 252 253 .cf_flush_prefetchbuf = cpufunc_nullop, 254 .cf_drain_writebuf = cpufunc_nullop, 255 .cf_flush_brnchtgt_C = cpufunc_nullop, 256 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 257 258 .cf_sleep = (void *)cpufunc_nullop, 259 260 /* Soft functions */ 261 262 .cf_dataabt_fixup = early_abort_fixup, 263 .cf_prefetchabt_fixup = cpufunc_null_fixup, 264 265 .cf_setup = (void *)cpufunc_nullop 266 267 }; 268 #endif /* CPU_ARM3 */ 269 270 #ifdef CPU_ARM6 271 struct cpu_functions arm6_cpufuncs = { 272 /* CPU functions */ 273 274 .cf_id = cpufunc_id, 275 .cf_cpwait = cpufunc_nullop, 276 277 /* MMU functions */ 278 279 .cf_control = cpufunc_control, 280 .cf_domains = cpufunc_domains, 281 .cf_setttb = arm67_setttb, 282 .cf_faultstatus = cpufunc_faultstatus, 283 .cf_faultaddress = cpufunc_faultaddress, 284 285 /* TLB functions */ 286 287 .cf_tlb_flushID = arm67_tlb_flush, 288 .cf_tlb_flushID_SE = arm67_tlb_purge, 289 .cf_tlb_flushI = arm67_tlb_flush, 290 .cf_tlb_flushI_SE = arm67_tlb_purge, 291 .cf_tlb_flushD = arm67_tlb_flush, 292 .cf_tlb_flushD_SE = arm67_tlb_purge, 293 294 /* Cache operations */ 295 296 .cf_icache_sync_all = cpufunc_nullop, 297 .cf_icache_sync_range = (void *) cpufunc_nullop, 298 299 .cf_dcache_wbinv_all = arm67_cache_flush, 300 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 301 .cf_dcache_inv_range = (void *)arm67_cache_flush, 302 .cf_dcache_wb_range = (void *)cpufunc_nullop, 303 304 .cf_idcache_wbinv_all = arm67_cache_flush, 305 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 306 307 /* Other functions */ 308 309 .cf_flush_prefetchbuf = cpufunc_nullop, 310 .cf_drain_writebuf = cpufunc_nullop, 311 .cf_flush_brnchtgt_C = cpufunc_nullop, 312 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 313 314 .cf_sleep = (void *)cpufunc_nullop, 315 316 /* Soft functions */ 317 318 #ifdef ARM6_LATE_ABORT 319 .cf_dataabt_fixup = late_abort_fixup, 320 #else 321 .cf_dataabt_fixup = early_abort_fixup, 322 #endif 323 .cf_prefetchabt_fixup = cpufunc_null_fixup, 324 325 .cf_context_switch = arm67_context_switch, 326 327 .cf_setup = arm6_setup 328 329 }; 330 #endif /* CPU_ARM6 */ 331 332 #ifdef CPU_ARM7 333 struct cpu_functions arm7_cpufuncs = { 334 /* CPU functions */ 335 336 .cf_id = cpufunc_id, 337 .cf_cpwait = cpufunc_nullop, 338 339 /* MMU functions */ 340 341 .cf_control = cpufunc_control, 342 .cf_domains = cpufunc_domains, 343 .cf_setttb = arm67_setttb, 344 .cf_faultstatus = cpufunc_faultstatus, 345 .cf_faultaddress = cpufunc_faultaddress, 346 347 /* TLB functions */ 348 349 .cf_tlb_flushID = arm67_tlb_flush, 350 .cf_tlb_flushID_SE = arm67_tlb_purge, 351 .cf_tlb_flushI = arm67_tlb_flush, 352 .cf_tlb_flushI_SE = arm67_tlb_purge, 353 .cf_tlb_flushD = arm67_tlb_flush, 354 .cf_tlb_flushD_SE = arm67_tlb_purge, 355 356 /* Cache operations */ 357 358 .cf_icache_sync_all = cpufunc_nullop, 359 .cf_icache_sync_range = (void *)cpufunc_nullop, 360 361 .cf_dcache_wbinv_all = arm67_cache_flush, 362 .cf_dcache_wbinv_range = (void *)arm67_cache_flush, 363 .cf_dcache_inv_range = (void *)arm67_cache_flush, 364 .cf_dcache_wb_range = (void *)cpufunc_nullop, 365 366 .cf_idcache_wbinv_all = arm67_cache_flush, 367 .cf_idcache_wbinv_range = (void *)arm67_cache_flush, 368 369 /* Other functions */ 370 371 .cf_flush_prefetchbuf = cpufunc_nullop, 372 .cf_drain_writebuf = cpufunc_nullop, 373 .cf_flush_brnchtgt_C = cpufunc_nullop, 374 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 375 376 .cf_sleep = (void *)cpufunc_nullop, 377 378 /* Soft functions */ 379 380 .cf_dataabt_fixup = late_abort_fixup, 381 .cf_prefetchabt_fixup = cpufunc_null_fixup, 382 383 .cf_context_switch = arm67_context_switch, 384 385 .cf_setup = arm7_setup 386 387 }; 388 #endif /* CPU_ARM7 */ 389 390 #ifdef CPU_ARM7TDMI 391 struct cpu_functions arm7tdmi_cpufuncs = { 392 /* CPU functions */ 393 394 .cf_id = cpufunc_id, 395 .cf_cpwait = cpufunc_nullop, 396 397 /* MMU functions */ 398 399 .cf_control = cpufunc_control, 400 .cf_domains = cpufunc_domains, 401 .cf_setttb = arm7tdmi_setttb, 402 .cf_faultstatus = cpufunc_faultstatus, 403 .cf_faultaddress = cpufunc_faultaddress, 404 405 /* TLB functions */ 406 407 .cf_tlb_flushID = arm7tdmi_tlb_flushID, 408 .cf_tlb_flushID_SE = arm7tdmi_tlb_flushID_SE, 409 .cf_tlb_flushI = arm7tdmi_tlb_flushID, 410 .cf_tlb_flushI_SE = arm7tdmi_tlb_flushID_SE, 411 .cf_tlb_flushD = arm7tdmi_tlb_flushID, 412 .cf_tlb_flushD_SE = arm7tdmi_tlb_flushID_SE, 413 414 /* Cache operations */ 415 416 .cf_icache_sync_all = cpufunc_nullop, 417 .cf_icache_sync_range = (void *)cpufunc_nullop, 418 419 .cf_dcache_wbinv_all = arm7tdmi_cache_flushID, 420 .cf_dcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 421 .cf_dcache_inv_range = (void *)arm7tdmi_cache_flushID, 422 .cf_dcache_wb_range = (void *)cpufunc_nullop, 423 424 .cf_idcache_wbinv_all = arm7tdmi_cache_flushID, 425 .cf_idcache_wbinv_range = (void *)arm7tdmi_cache_flushID, 426 427 /* Other functions */ 428 429 .cf_flush_prefetchbuf = cpufunc_nullop, 430 .cf_drain_writebuf = cpufunc_nullop, 431 .cf_flush_brnchtgt_C = cpufunc_nullop, 432 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 433 434 .cf_sleep = (void *)cpufunc_nullop, 435 436 /* Soft functions */ 437 438 .cf_dataabt_fixup = late_abort_fixup, 439 .cf_prefetchabt_fixup = cpufunc_null_fixup, 440 441 .cf_context_switch = arm7tdmi_context_switch, 442 443 .cf_setup = arm7tdmi_setup 444 445 }; 446 #endif /* CPU_ARM7TDMI */ 447 448 #ifdef CPU_ARM8 449 struct cpu_functions arm8_cpufuncs = { 450 /* CPU functions */ 451 452 .cf_id = cpufunc_id, 453 .cf_cpwait = cpufunc_nullop, 454 455 /* MMU functions */ 456 457 .cf_control = cpufunc_control, 458 .cf_domains = cpufunc_domains, 459 .cf_setttb = arm8_setttb, 460 .cf_faultstatus = cpufunc_faultstatus, 461 .cf_faultaddress = cpufunc_faultaddress, 462 463 /* TLB functions */ 464 465 .cf_tlb_flushID = arm8_tlb_flushID, 466 .cf_tlb_flushID_SE = arm8_tlb_flushID_SE, 467 .cf_tlb_flushI = arm8_tlb_flushID, 468 .cf_tlb_flushI_SE = arm8_tlb_flushID_SE, 469 .cf_tlb_flushD = arm8_tlb_flushID, 470 .cf_tlb_flushD_SE = arm8_tlb_flushID_SE, 471 472 /* Cache operations */ 473 474 .cf_icache_sync_all = cpufunc_nullop, 475 .cf_icache_sync_range = (void *)cpufunc_nullop, 476 477 .cf_dcache_wbinv_all = arm8_cache_purgeID, 478 .cf_dcache_wbinv_range = (void *)arm8_cache_purgeID, 479 /*XXX*/ .cf_dcache_inv_range = (void *)arm8_cache_purgeID, 480 .cf_dcache_wb_range = (void *)arm8_cache_cleanID, 481 482 .cf_idcache_wbinv_all = arm8_cache_purgeID, 483 .cf_idcache_wbinv_range = (void *)arm8_cache_purgeID, 484 485 /* Other functions */ 486 487 .cf_flush_prefetchbuf = cpufunc_nullop, 488 .cf_drain_writebuf = cpufunc_nullop, 489 .cf_flush_brnchtgt_C = cpufunc_nullop, 490 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 491 492 .cf_sleep = (void *)cpufunc_nullop, 493 494 /* Soft functions */ 495 496 .cf_dataabt_fixup = cpufunc_null_fixup, 497 .cf_prefetchabt_fixup = cpufunc_null_fixup, 498 499 .cf_context_switch = arm8_context_switch, 500 501 .cf_setup = arm8_setup 502 }; 503 #endif /* CPU_ARM8 */ 504 505 #ifdef CPU_ARM9 506 struct cpu_functions arm9_cpufuncs = { 507 /* CPU functions */ 508 509 .cf_id = cpufunc_id, 510 .cf_cpwait = cpufunc_nullop, 511 512 /* MMU functions */ 513 514 .cf_control = cpufunc_control, 515 .cf_domains = cpufunc_domains, 516 .cf_setttb = arm9_setttb, 517 .cf_faultstatus = cpufunc_faultstatus, 518 .cf_faultaddress = cpufunc_faultaddress, 519 520 /* TLB functions */ 521 522 .cf_tlb_flushID = armv4_tlb_flushID, 523 .cf_tlb_flushID_SE = arm9_tlb_flushID_SE, 524 .cf_tlb_flushI = armv4_tlb_flushI, 525 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 526 .cf_tlb_flushD = armv4_tlb_flushD, 527 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 528 529 /* Cache operations */ 530 531 .cf_icache_sync_all = arm9_icache_sync_all, 532 .cf_icache_sync_range = arm9_icache_sync_range, 533 534 .cf_dcache_wbinv_all = arm9_dcache_wbinv_all, 535 .cf_dcache_wbinv_range = arm9_dcache_wbinv_range, 536 /*XXX*/ .cf_dcache_inv_range = arm9_dcache_wbinv_range, 537 .cf_dcache_wb_range = arm9_dcache_wb_range, 538 539 .cf_idcache_wbinv_all = arm9_idcache_wbinv_all, 540 .cf_idcache_wbinv_range = arm9_idcache_wbinv_range, 541 542 /* Other functions */ 543 544 .cf_flush_prefetchbuf = cpufunc_nullop, 545 .cf_drain_writebuf = armv4_drain_writebuf, 546 .cf_flush_brnchtgt_C = cpufunc_nullop, 547 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 548 549 .cf_sleep = (void *)cpufunc_nullop, 550 551 /* Soft functions */ 552 553 .cf_dataabt_fixup = cpufunc_null_fixup, 554 .cf_prefetchabt_fixup = cpufunc_null_fixup, 555 556 .cf_context_switch = arm9_context_switch, 557 558 .cf_setup = arm9_setup 559 560 }; 561 #endif /* CPU_ARM9 */ 562 563 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 564 struct cpu_functions armv5_ec_cpufuncs = { 565 /* CPU functions */ 566 567 .cf_id = cpufunc_id, 568 .cf_cpwait = cpufunc_nullop, 569 570 /* MMU functions */ 571 572 .cf_control = cpufunc_control, 573 .cf_domains = cpufunc_domains, 574 .cf_setttb = armv5_ec_setttb, 575 .cf_faultstatus = cpufunc_faultstatus, 576 .cf_faultaddress = cpufunc_faultaddress, 577 578 /* TLB functions */ 579 580 .cf_tlb_flushID = armv4_tlb_flushID, 581 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 582 .cf_tlb_flushI = armv4_tlb_flushI, 583 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 584 .cf_tlb_flushD = armv4_tlb_flushD, 585 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 586 587 /* Cache operations */ 588 589 .cf_icache_sync_all = armv5_ec_icache_sync_all, 590 .cf_icache_sync_range = armv5_ec_icache_sync_range, 591 592 .cf_dcache_wbinv_all = armv5_ec_dcache_wbinv_all, 593 .cf_dcache_wbinv_range = armv5_ec_dcache_wbinv_range, 594 /*XXX*/ .cf_dcache_inv_range = armv5_ec_dcache_wbinv_range, 595 .cf_dcache_wb_range = armv5_ec_dcache_wb_range, 596 597 .cf_idcache_wbinv_all = armv5_ec_idcache_wbinv_all, 598 .cf_idcache_wbinv_range = armv5_ec_idcache_wbinv_range, 599 600 /* Other functions */ 601 602 .cf_flush_prefetchbuf = cpufunc_nullop, 603 .cf_drain_writebuf = armv4_drain_writebuf, 604 .cf_flush_brnchtgt_C = cpufunc_nullop, 605 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 606 607 .cf_sleep = (void *)cpufunc_nullop, 608 609 /* Soft functions */ 610 611 .cf_dataabt_fixup = cpufunc_null_fixup, 612 .cf_prefetchabt_fixup = cpufunc_null_fixup, 613 614 .cf_context_switch = arm10_context_switch, 615 616 .cf_setup = arm10_setup 617 618 }; 619 #endif /* CPU_ARM9E || CPU_ARM10 */ 620 621 #ifdef CPU_ARM10 622 struct cpu_functions arm10_cpufuncs = { 623 /* CPU functions */ 624 625 .cf_id = cpufunc_id, 626 .cf_cpwait = cpufunc_nullop, 627 628 /* MMU functions */ 629 630 .cf_control = cpufunc_control, 631 .cf_domains = cpufunc_domains, 632 .cf_setttb = armv5_setttb, 633 .cf_faultstatus = cpufunc_faultstatus, 634 .cf_faultaddress = cpufunc_faultaddress, 635 636 /* TLB functions */ 637 638 .cf_tlb_flushID = armv4_tlb_flushID, 639 .cf_tlb_flushID_SE = arm10_tlb_flushID_SE, 640 .cf_tlb_flushI = armv4_tlb_flushI, 641 .cf_tlb_flushI_SE = arm10_tlb_flushI_SE, 642 .cf_tlb_flushD = armv4_tlb_flushD, 643 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 644 645 /* Cache operations */ 646 647 .cf_icache_sync_all = armv5_icache_sync_all, 648 .cf_icache_sync_range = armv5_icache_sync_range, 649 650 .cf_dcache_wbinv_all = armv5_dcache_wbinv_all, 651 .cf_dcache_wbinv_range = armv5_dcache_wbinv_range, 652 /*XXX*/ .cf_dcache_inv_range = armv5_dcache_wbinv_range, 653 .cf_dcache_wb_range = armv5_dcache_wb_range, 654 655 .cf_idcache_wbinv_all = armv5_idcache_wbinv_all, 656 .cf_idcache_wbinv_range = armv5_idcache_wbinv_range, 657 658 /* Other functions */ 659 660 .cf_flush_prefetchbuf = cpufunc_nullop, 661 .cf_drain_writebuf = armv4_drain_writebuf, 662 .cf_flush_brnchtgt_C = cpufunc_nullop, 663 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 664 665 .cf_sleep = (void *)cpufunc_nullop, 666 667 /* Soft functions */ 668 669 .cf_dataabt_fixup = cpufunc_null_fixup, 670 .cf_prefetchabt_fixup = cpufunc_null_fixup, 671 672 .cf_context_switch = arm10_context_switch, 673 674 .cf_setup = arm10_setup 675 676 }; 677 #endif /* CPU_ARM10 */ 678 679 #ifdef CPU_ARM11 680 struct cpu_functions arm11_cpufuncs = { 681 /* CPU functions */ 682 683 .cf_id = cpufunc_id, 684 .cf_cpwait = cpufunc_nullop, 685 686 /* MMU functions */ 687 688 .cf_control = cpufunc_control, 689 .cf_domains = cpufunc_domains, 690 .cf_setttb = arm11_setttb, 691 .cf_faultstatus = cpufunc_faultstatus, 692 .cf_faultaddress = cpufunc_faultaddress, 693 694 /* TLB functions */ 695 696 .cf_tlb_flushID = arm11_tlb_flushID, 697 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 698 .cf_tlb_flushI = arm11_tlb_flushI, 699 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 700 .cf_tlb_flushD = arm11_tlb_flushD, 701 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 702 703 /* Cache operations */ 704 705 .cf_icache_sync_all = armv6_icache_sync_all, 706 .cf_icache_sync_range = armv6_icache_sync_range, 707 708 .cf_dcache_wbinv_all = armv6_dcache_wbinv_all, 709 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 710 .cf_dcache_inv_range = armv6_dcache_inv_range, 711 .cf_dcache_wb_range = armv6_dcache_wb_range, 712 713 .cf_idcache_wbinv_all = armv6_idcache_wbinv_all, 714 .cf_idcache_wbinv_range = armv6_idcache_wbinv_range, 715 716 /* Other functions */ 717 718 .cf_flush_prefetchbuf = cpufunc_nullop, 719 .cf_drain_writebuf = arm11_drain_writebuf, 720 .cf_flush_brnchtgt_C = cpufunc_nullop, 721 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 722 723 .cf_sleep = arm11_sleep, 724 725 /* Soft functions */ 726 727 .cf_dataabt_fixup = cpufunc_null_fixup, 728 .cf_prefetchabt_fixup = cpufunc_null_fixup, 729 730 .cf_context_switch = arm11_context_switch, 731 732 .cf_setup = arm11_setup 733 734 }; 735 #endif /* CPU_ARM11 */ 736 737 #ifdef CPU_ARM1136 738 struct cpu_functions arm1136_cpufuncs = { 739 /* CPU functions */ 740 741 .cf_id = cpufunc_id, 742 .cf_cpwait = cpufunc_nullop, 743 744 /* MMU functions */ 745 746 .cf_control = cpufunc_control, 747 .cf_domains = cpufunc_domains, 748 .cf_setttb = arm1136_setttb, 749 .cf_faultstatus = cpufunc_faultstatus, 750 .cf_faultaddress = cpufunc_faultaddress, 751 752 /* TLB functions */ 753 754 .cf_tlb_flushID = arm11_tlb_flushID, 755 .cf_tlb_flushID_SE = arm11_tlb_flushID_SE, 756 .cf_tlb_flushI = arm11_tlb_flushI, 757 .cf_tlb_flushI_SE = arm11_tlb_flushI_SE, 758 .cf_tlb_flushD = arm11_tlb_flushD, 759 .cf_tlb_flushD_SE = arm11_tlb_flushD_SE, 760 761 /* Cache operations */ 762 763 .cf_icache_sync_all = arm1136_icache_sync_all, /* 411920 */ 764 .cf_icache_sync_range = arm1136_icache_sync_range, /* 371025 */ 765 766 .cf_dcache_wbinv_all = arm1136_dcache_wbinv_all, /* 411920 */ 767 .cf_dcache_wbinv_range = armv6_dcache_wbinv_range, 768 .cf_dcache_inv_range = armv6_dcache_inv_range, 769 .cf_dcache_wb_range = armv6_dcache_wb_range, 770 771 .cf_idcache_wbinv_all = arm1136_idcache_wbinv_all, /* 411920 */ 772 .cf_idcache_wbinv_range = arm1136_idcache_wbinv_range, /* 371025 */ 773 774 /* Other functions */ 775 776 .cf_flush_prefetchbuf = arm1136_flush_prefetchbuf, 777 .cf_drain_writebuf = arm11_drain_writebuf, 778 .cf_flush_brnchtgt_C = cpufunc_nullop, 779 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 780 781 .cf_sleep = arm11_sleep, 782 783 /* Soft functions */ 784 785 .cf_dataabt_fixup = cpufunc_null_fixup, 786 .cf_prefetchabt_fixup = cpufunc_null_fixup, 787 788 .cf_context_switch = arm11_context_switch, 789 790 .cf_setup = arm1136_setup 791 792 }; 793 #endif /* CPU_ARM1136 */ 794 795 #ifdef CPU_SA110 796 struct cpu_functions sa110_cpufuncs = { 797 /* CPU functions */ 798 799 .cf_id = cpufunc_id, 800 .cf_cpwait = cpufunc_nullop, 801 802 /* MMU functions */ 803 804 .cf_control = cpufunc_control, 805 .cf_domains = cpufunc_domains, 806 .cf_setttb = sa1_setttb, 807 .cf_faultstatus = cpufunc_faultstatus, 808 .cf_faultaddress = cpufunc_faultaddress, 809 810 /* TLB functions */ 811 812 .cf_tlb_flushID = armv4_tlb_flushID, 813 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 814 .cf_tlb_flushI = armv4_tlb_flushI, 815 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 816 .cf_tlb_flushD = armv4_tlb_flushD, 817 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 818 819 /* Cache operations */ 820 821 .cf_icache_sync_all = sa1_cache_syncI, 822 .cf_icache_sync_range = sa1_cache_syncI_rng, 823 824 .cf_dcache_wbinv_all = sa1_cache_purgeD, 825 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 826 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 827 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 828 829 .cf_idcache_wbinv_all = sa1_cache_purgeID, 830 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 831 832 /* Other functions */ 833 834 .cf_flush_prefetchbuf = cpufunc_nullop, 835 .cf_drain_writebuf = armv4_drain_writebuf, 836 .cf_flush_brnchtgt_C = cpufunc_nullop, 837 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 838 839 .cf_sleep = (void *)cpufunc_nullop, 840 841 /* Soft functions */ 842 843 .cf_dataabt_fixup = cpufunc_null_fixup, 844 .cf_prefetchabt_fixup = cpufunc_null_fixup, 845 846 .cf_context_switch = sa110_context_switch, 847 848 .cf_setup = sa110_setup 849 }; 850 #endif /* CPU_SA110 */ 851 852 #if defined(CPU_SA1100) || defined(CPU_SA1110) 853 struct cpu_functions sa11x0_cpufuncs = { 854 /* CPU functions */ 855 856 .cf_id = cpufunc_id, 857 .cf_cpwait = cpufunc_nullop, 858 859 /* MMU functions */ 860 861 .cf_control = cpufunc_control, 862 .cf_domains = cpufunc_domains, 863 .cf_setttb = sa1_setttb, 864 .cf_faultstatus = cpufunc_faultstatus, 865 .cf_faultaddress = cpufunc_faultaddress, 866 867 /* TLB functions */ 868 869 .cf_tlb_flushID = armv4_tlb_flushID, 870 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 871 .cf_tlb_flushI = armv4_tlb_flushI, 872 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 873 .cf_tlb_flushD = armv4_tlb_flushD, 874 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 875 876 /* Cache operations */ 877 878 .cf_icache_sync_all = sa1_cache_syncI, 879 .cf_icache_sync_range = sa1_cache_syncI_rng, 880 881 .cf_dcache_wbinv_all = sa1_cache_purgeD, 882 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 883 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 884 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 885 886 .cf_idcache_wbinv_all = sa1_cache_purgeID, 887 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 888 889 /* Other functions */ 890 891 .cf_flush_prefetchbuf = sa11x0_drain_readbuf, 892 .cf_drain_writebuf = armv4_drain_writebuf, 893 .cf_flush_brnchtgt_C = cpufunc_nullop, 894 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 895 896 .cf_sleep = sa11x0_cpu_sleep, 897 898 /* Soft functions */ 899 900 .cf_dataabt_fixup = cpufunc_null_fixup, 901 .cf_prefetchabt_fixup = cpufunc_null_fixup, 902 903 .cf_context_switch = sa11x0_context_switch, 904 905 .cf_setup = sa11x0_setup 906 }; 907 #endif /* CPU_SA1100 || CPU_SA1110 */ 908 909 #ifdef CPU_IXP12X0 910 struct cpu_functions ixp12x0_cpufuncs = { 911 /* CPU functions */ 912 913 .cf_id = cpufunc_id, 914 .cf_cpwait = cpufunc_nullop, 915 916 /* MMU functions */ 917 918 .cf_control = cpufunc_control, 919 .cf_domains = cpufunc_domains, 920 .cf_setttb = sa1_setttb, 921 .cf_faultstatus = cpufunc_faultstatus, 922 .cf_faultaddress = cpufunc_faultaddress, 923 924 /* TLB functions */ 925 926 .cf_tlb_flushID = armv4_tlb_flushID, 927 .cf_tlb_flushID_SE = sa1_tlb_flushID_SE, 928 .cf_tlb_flushI = armv4_tlb_flushI, 929 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 930 .cf_tlb_flushD = armv4_tlb_flushD, 931 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 932 933 /* Cache operations */ 934 935 .cf_icache_sync_all = sa1_cache_syncI, 936 .cf_icache_sync_range = sa1_cache_syncI_rng, 937 938 .cf_dcache_wbinv_all = sa1_cache_purgeD, 939 .cf_dcache_wbinv_range = sa1_cache_purgeD_rng, 940 /*XXX*/ .cf_dcache_inv_range = sa1_cache_purgeD_rng, 941 .cf_dcache_wb_range = sa1_cache_cleanD_rng, 942 943 .cf_idcache_wbinv_all = sa1_cache_purgeID, 944 .cf_idcache_wbinv_range = sa1_cache_purgeID_rng, 945 946 /* Other functions */ 947 948 .cf_flush_prefetchbuf = ixp12x0_drain_readbuf, 949 .cf_drain_writebuf = armv4_drain_writebuf, 950 .cf_flush_brnchtgt_C = cpufunc_nullop, 951 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 952 953 .cf_sleep = (void *)cpufunc_nullop, 954 955 /* Soft functions */ 956 957 .cf_dataabt_fixup = cpufunc_null_fixup, 958 .cf_prefetchabt_fixup = cpufunc_null_fixup, 959 960 .cf_context_switch = ixp12x0_context_switch, 961 962 .cf_setup = ixp12x0_setup 963 }; 964 #endif /* CPU_IXP12X0 */ 965 966 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 967 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) 968 struct cpu_functions xscale_cpufuncs = { 969 /* CPU functions */ 970 971 .cf_id = cpufunc_id, 972 .cf_cpwait = xscale_cpwait, 973 974 /* MMU functions */ 975 976 .cf_control = xscale_control, 977 .cf_domains = cpufunc_domains, 978 .cf_setttb = xscale_setttb, 979 .cf_faultstatus = cpufunc_faultstatus, 980 .cf_faultaddress = cpufunc_faultaddress, 981 982 /* TLB functions */ 983 984 .cf_tlb_flushID = armv4_tlb_flushID, 985 .cf_tlb_flushID_SE = xscale_tlb_flushID_SE, 986 .cf_tlb_flushI = armv4_tlb_flushI, 987 .cf_tlb_flushI_SE = (void *)armv4_tlb_flushI, 988 .cf_tlb_flushD = armv4_tlb_flushD, 989 .cf_tlb_flushD_SE = armv4_tlb_flushD_SE, 990 991 /* Cache operations */ 992 993 .cf_icache_sync_all = xscale_cache_syncI, 994 .cf_icache_sync_range = xscale_cache_syncI_rng, 995 996 .cf_dcache_wbinv_all = xscale_cache_purgeD, 997 .cf_dcache_wbinv_range = xscale_cache_purgeD_rng, 998 .cf_dcache_inv_range = xscale_cache_flushD_rng, 999 .cf_dcache_wb_range = xscale_cache_cleanD_rng, 1000 1001 .cf_idcache_wbinv_all = xscale_cache_purgeID, 1002 .cf_idcache_wbinv_range = xscale_cache_purgeID_rng, 1003 1004 /* Other functions */ 1005 1006 .cf_flush_prefetchbuf = cpufunc_nullop, 1007 .cf_drain_writebuf = armv4_drain_writebuf, 1008 .cf_flush_brnchtgt_C = cpufunc_nullop, 1009 .cf_flush_brnchtgt_E = (void *)cpufunc_nullop, 1010 1011 .cf_sleep = xscale_cpu_sleep, 1012 1013 /* Soft functions */ 1014 1015 .cf_dataabt_fixup = cpufunc_null_fixup, 1016 .cf_prefetchabt_fixup = cpufunc_null_fixup, 1017 1018 .cf_context_switch = xscale_context_switch, 1019 1020 .cf_setup = xscale_setup 1021 }; 1022 #endif 1023 /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 1024 1025 /* 1026 * Global constants also used by locore.s 1027 */ 1028 1029 struct cpu_functions cpufuncs; 1030 u_int cputype; 1031 u_int cpu_reset_needs_v4_MMU_disable; /* flag used in locore.s */ 1032 1033 #if defined(CPU_ARM7TDMI) || defined(CPU_ARM8) || defined(CPU_ARM9) || \ 1034 defined (CPU_ARM9E) || defined (CPU_ARM10) || defined (CPU_ARM11) || \ 1035 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1036 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) 1037 static void get_cachetype_cp15 __P((void)); 1038 1039 /* Additional cache information local to this file. Log2 of some of the 1040 above numbers. */ 1041 static int arm_dcache_l2_nsets; 1042 static int arm_dcache_l2_assoc; 1043 static int arm_dcache_l2_linesize; 1044 1045 static void 1046 get_cachetype_cp15() 1047 { 1048 u_int ctype, isize, dsize; 1049 u_int multiplier; 1050 1051 __asm volatile("mrc p15, 0, %0, c0, c0, 1" 1052 : "=r" (ctype)); 1053 1054 /* 1055 * ...and thus spake the ARM ARM: 1056 * 1057 * If an <opcode2> value corresponding to an unimplemented or 1058 * reserved ID register is encountered, the System Control 1059 * processor returns the value of the main ID register. 1060 */ 1061 if (ctype == cpu_id()) 1062 goto out; 1063 1064 if ((ctype & CPU_CT_S) == 0) 1065 arm_pcache_unified = 1; 1066 1067 /* 1068 * If you want to know how this code works, go read the ARM ARM. 1069 */ 1070 1071 arm_pcache_type = CPU_CT_CTYPE(ctype); 1072 1073 if (arm_pcache_unified == 0) { 1074 isize = CPU_CT_ISIZE(ctype); 1075 multiplier = (isize & CPU_CT_xSIZE_M) ? 3 : 2; 1076 arm_picache_line_size = 1U << (CPU_CT_xSIZE_LEN(isize) + 3); 1077 if (CPU_CT_xSIZE_ASSOC(isize) == 0) { 1078 if (isize & CPU_CT_xSIZE_M) 1079 arm_picache_line_size = 0; /* not present */ 1080 else 1081 arm_picache_ways = 1; 1082 } else { 1083 arm_picache_ways = multiplier << 1084 (CPU_CT_xSIZE_ASSOC(isize) - 1); 1085 #if (ARM_MMU_V6) > 0 1086 if (CPU_CT_xSIZE_P & isize) 1087 arm_cache_prefer_mask |= 1088 __BIT(9 + CPU_CT_xSIZE_SIZE(isize) 1089 - CPU_CT_xSIZE_ASSOC(isize)) 1090 - PAGE_SIZE; 1091 #endif 1092 } 1093 arm_picache_size = multiplier << (CPU_CT_xSIZE_SIZE(isize) + 8); 1094 } 1095 1096 dsize = CPU_CT_DSIZE(ctype); 1097 multiplier = (dsize & CPU_CT_xSIZE_M) ? 3 : 2; 1098 arm_pdcache_line_size = 1U << (CPU_CT_xSIZE_LEN(dsize) + 3); 1099 if (CPU_CT_xSIZE_ASSOC(dsize) == 0) { 1100 if (dsize & CPU_CT_xSIZE_M) 1101 arm_pdcache_line_size = 0; /* not present */ 1102 else 1103 arm_pdcache_ways = 1; 1104 } else { 1105 arm_pdcache_ways = multiplier << 1106 (CPU_CT_xSIZE_ASSOC(dsize) - 1); 1107 #if (ARM_MMU_V6) > 0 1108 if (CPU_CT_xSIZE_P & dsize) 1109 arm_cache_prefer_mask |= 1110 __BIT(9 + CPU_CT_xSIZE_SIZE(dsize) 1111 - CPU_CT_xSIZE_ASSOC(dsize)) - PAGE_SIZE; 1112 #endif 1113 } 1114 arm_pdcache_size = multiplier << (CPU_CT_xSIZE_SIZE(dsize) + 8); 1115 1116 arm_dcache_align = arm_pdcache_line_size; 1117 1118 arm_dcache_l2_assoc = CPU_CT_xSIZE_ASSOC(dsize) + multiplier - 2; 1119 arm_dcache_l2_linesize = CPU_CT_xSIZE_LEN(dsize) + 3; 1120 arm_dcache_l2_nsets = 6 + CPU_CT_xSIZE_SIZE(dsize) - 1121 CPU_CT_xSIZE_ASSOC(dsize) - CPU_CT_xSIZE_LEN(dsize); 1122 1123 out: 1124 arm_dcache_align_mask = arm_dcache_align - 1; 1125 } 1126 #endif /* ARM7TDMI || ARM8 || ARM9 || XSCALE */ 1127 1128 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 1129 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_SA110) || \ 1130 defined(CPU_SA1100) || defined(CPU_SA1110) || defined(CPU_IXP12X0) 1131 /* Cache information for CPUs without cache type registers. */ 1132 struct cachetab { 1133 u_int32_t ct_cpuid; 1134 int ct_pcache_type; 1135 int ct_pcache_unified; 1136 int ct_pdcache_size; 1137 int ct_pdcache_line_size; 1138 int ct_pdcache_ways; 1139 int ct_picache_size; 1140 int ct_picache_line_size; 1141 int ct_picache_ways; 1142 }; 1143 1144 struct cachetab cachetab[] = { 1145 /* cpuid, cache type, u, dsiz, ls, wy, isiz, ls, wy */ 1146 { CPU_ID_ARM2, 0, 1, 0, 0, 0, 0, 0, 0 }, 1147 { CPU_ID_ARM250, 0, 1, 0, 0, 0, 0, 0, 0 }, 1148 { CPU_ID_ARM3, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 1149 { CPU_ID_ARM610, CPU_CT_CTYPE_WT, 1, 4096, 16, 64, 0, 0, 0 }, 1150 { CPU_ID_ARM710, CPU_CT_CTYPE_WT, 1, 8192, 32, 4, 0, 0, 0 }, 1151 { CPU_ID_ARM7500, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 1152 { CPU_ID_ARM710A, CPU_CT_CTYPE_WT, 1, 8192, 16, 4, 0, 0, 0 }, 1153 { CPU_ID_ARM7500FE, CPU_CT_CTYPE_WT, 1, 4096, 16, 4, 0, 0, 0 }, 1154 /* XXX is this type right for SA-1? */ 1155 { CPU_ID_SA110, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, 1156 { CPU_ID_SA1100, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 1157 { CPU_ID_SA1110, CPU_CT_CTYPE_WB1, 0, 8192, 32, 32, 16384, 32, 32 }, 1158 { CPU_ID_IXP1200, CPU_CT_CTYPE_WB1, 0, 16384, 32, 32, 16384, 32, 32 }, /* XXX */ 1159 { 0, 0, 0, 0, 0, 0, 0, 0} 1160 }; 1161 1162 static void get_cachetype_table __P((void)); 1163 1164 static void 1165 get_cachetype_table() 1166 { 1167 int i; 1168 u_int32_t cpuid = cpu_id(); 1169 1170 for (i = 0; cachetab[i].ct_cpuid != 0; i++) { 1171 if (cachetab[i].ct_cpuid == (cpuid & CPU_ID_CPU_MASK)) { 1172 arm_pcache_type = cachetab[i].ct_pcache_type; 1173 arm_pcache_unified = cachetab[i].ct_pcache_unified; 1174 arm_pdcache_size = cachetab[i].ct_pdcache_size; 1175 arm_pdcache_line_size = 1176 cachetab[i].ct_pdcache_line_size; 1177 arm_pdcache_ways = cachetab[i].ct_pdcache_ways; 1178 arm_picache_size = cachetab[i].ct_picache_size; 1179 arm_picache_line_size = 1180 cachetab[i].ct_picache_line_size; 1181 arm_picache_ways = cachetab[i].ct_picache_ways; 1182 } 1183 } 1184 arm_dcache_align = arm_pdcache_line_size; 1185 1186 arm_dcache_align_mask = arm_dcache_align - 1; 1187 } 1188 1189 #endif /* ARM2 || ARM250 || ARM3 || ARM6 || ARM7 || SA110 || SA1100 || SA1111 || IXP12X0 */ 1190 1191 /* 1192 * Cannot panic here as we may not have a console yet ... 1193 */ 1194 1195 int 1196 set_cpufuncs() 1197 { 1198 if (cputype == 0) { 1199 cputype = cpufunc_id(); 1200 cputype &= CPU_ID_CPU_MASK; 1201 } 1202 1203 /* 1204 * NOTE: cpu_do_powersave defaults to off. If we encounter a 1205 * CPU type where we want to use it by default, then we set it. 1206 */ 1207 #ifdef CPU_ARM2 1208 if (cputype == CPU_ID_ARM2) { 1209 cpufuncs = arm2_cpufuncs; 1210 cpu_reset_needs_v4_MMU_disable = 0; 1211 get_cachetype_table(); 1212 return 0; 1213 } 1214 #endif /* CPU_ARM2 */ 1215 #ifdef CPU_ARM250 1216 if (cputype == CPU_ID_ARM250) { 1217 cpufuncs = arm250_cpufuncs; 1218 cpu_reset_needs_v4_MMU_disable = 0; 1219 get_cachetype_table(); 1220 return 0; 1221 } 1222 #endif 1223 #ifdef CPU_ARM3 1224 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1225 (cputype & 0x00000f00) == 0x00000300) { 1226 cpufuncs = arm3_cpufuncs; 1227 cpu_reset_needs_v4_MMU_disable = 0; 1228 get_cachetype_table(); 1229 return 0; 1230 } 1231 #endif /* CPU_ARM3 */ 1232 #ifdef CPU_ARM6 1233 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1234 (cputype & 0x00000f00) == 0x00000600) { 1235 cpufuncs = arm6_cpufuncs; 1236 cpu_reset_needs_v4_MMU_disable = 0; 1237 get_cachetype_table(); 1238 pmap_pte_init_generic(); 1239 return 0; 1240 } 1241 #endif /* CPU_ARM6 */ 1242 #ifdef CPU_ARM7 1243 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1244 CPU_ID_IS7(cputype) && 1245 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V3) { 1246 cpufuncs = arm7_cpufuncs; 1247 cpu_reset_needs_v4_MMU_disable = 0; 1248 get_cachetype_table(); 1249 pmap_pte_init_generic(); 1250 return 0; 1251 } 1252 #endif /* CPU_ARM7 */ 1253 #ifdef CPU_ARM7TDMI 1254 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1255 CPU_ID_IS7(cputype) && 1256 (cputype & CPU_ID_7ARCH_MASK) == CPU_ID_7ARCH_V4T) { 1257 cpufuncs = arm7tdmi_cpufuncs; 1258 cpu_reset_needs_v4_MMU_disable = 0; 1259 get_cachetype_cp15(); 1260 pmap_pte_init_generic(); 1261 return 0; 1262 } 1263 #endif 1264 #ifdef CPU_ARM8 1265 if ((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD && 1266 (cputype & 0x0000f000) == 0x00008000) { 1267 cpufuncs = arm8_cpufuncs; 1268 cpu_reset_needs_v4_MMU_disable = 0; /* XXX correct? */ 1269 get_cachetype_cp15(); 1270 pmap_pte_init_arm8(); 1271 return 0; 1272 } 1273 #endif /* CPU_ARM8 */ 1274 #ifdef CPU_ARM9 1275 if (((cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_ARM_LTD || 1276 (cputype & CPU_ID_IMPLEMENTOR_MASK) == CPU_ID_TI) && 1277 (cputype & 0x0000f000) == 0x00009000) { 1278 cpufuncs = arm9_cpufuncs; 1279 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ 1280 get_cachetype_cp15(); 1281 arm9_dcache_sets_inc = 1U << arm_dcache_l2_linesize; 1282 arm9_dcache_sets_max = 1283 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - 1284 arm9_dcache_sets_inc; 1285 arm9_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); 1286 arm9_dcache_index_max = 0U - arm9_dcache_index_inc; 1287 #ifdef ARM9_CACHE_WRITE_THROUGH 1288 pmap_pte_init_arm9(); 1289 #else 1290 pmap_pte_init_generic(); 1291 #endif 1292 return 0; 1293 } 1294 #endif /* CPU_ARM9 */ 1295 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 1296 if (cputype == CPU_ID_ARM926EJS || 1297 cputype == CPU_ID_ARM1026EJS) { 1298 cpufuncs = armv5_ec_cpufuncs; 1299 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ 1300 get_cachetype_cp15(); 1301 pmap_pte_init_generic(); 1302 return 0; 1303 } 1304 #endif /* CPU_ARM9E || CPU_ARM10 */ 1305 #ifdef CPU_ARM10 1306 if (/* cputype == CPU_ID_ARM1020T || */ 1307 cputype == CPU_ID_ARM1020E) { 1308 /* 1309 * Select write-through cacheing (this isn't really an 1310 * option on ARM1020T). 1311 */ 1312 cpufuncs = arm10_cpufuncs; 1313 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ 1314 get_cachetype_cp15(); 1315 armv5_dcache_sets_inc = 1U << arm_dcache_l2_linesize; 1316 armv5_dcache_sets_max = 1317 (1U << (arm_dcache_l2_linesize + arm_dcache_l2_nsets)) - 1318 armv5_dcache_sets_inc; 1319 armv5_dcache_index_inc = 1U << (32 - arm_dcache_l2_assoc); 1320 armv5_dcache_index_max = 0U - armv5_dcache_index_inc; 1321 pmap_pte_init_generic(); 1322 return 0; 1323 } 1324 #endif /* CPU_ARM10 */ 1325 #if defined(CPU_ARM11) 1326 if (cputype == CPU_ID_ARM1136JS || 1327 cputype == CPU_ID_ARM1136JSR1 || 1328 cputype == CPU_ID_ARM1176JS) { 1329 cpufuncs = arm11_cpufuncs; 1330 #if defined(CPU_ARM1136) 1331 if (cputype != CPU_ID_ARM1176JS) { 1332 cpufuncs = arm1136_cpufuncs; 1333 if (cputype == CPU_ID_ARM1136JS) 1334 cpufuncs.cf_sleep = arm1136_sleep_rev0; 1335 } 1336 #endif 1337 cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ 1338 cpu_do_powersave = 1; /* Enable powersave */ 1339 get_cachetype_cp15(); 1340 pmap_pte_init_generic(); 1341 if (arm_cache_prefer_mask) 1342 uvmexp.ncolors = (arm_cache_prefer_mask >> PGSHIFT) + 1; 1343 1344 return 0; 1345 } 1346 #endif /* CPU_ARM11 */ 1347 #ifdef CPU_SA110 1348 if (cputype == CPU_ID_SA110) { 1349 cpufuncs = sa110_cpufuncs; 1350 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ 1351 get_cachetype_table(); 1352 pmap_pte_init_sa1(); 1353 return 0; 1354 } 1355 #endif /* CPU_SA110 */ 1356 #ifdef CPU_SA1100 1357 if (cputype == CPU_ID_SA1100) { 1358 cpufuncs = sa11x0_cpufuncs; 1359 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ 1360 get_cachetype_table(); 1361 pmap_pte_init_sa1(); 1362 1363 /* Use powersave on this CPU. */ 1364 cpu_do_powersave = 1; 1365 1366 return 0; 1367 } 1368 #endif /* CPU_SA1100 */ 1369 #ifdef CPU_SA1110 1370 if (cputype == CPU_ID_SA1110) { 1371 cpufuncs = sa11x0_cpufuncs; 1372 cpu_reset_needs_v4_MMU_disable = 1; /* SA needs it */ 1373 get_cachetype_table(); 1374 pmap_pte_init_sa1(); 1375 1376 /* Use powersave on this CPU. */ 1377 cpu_do_powersave = 1; 1378 1379 return 0; 1380 } 1381 #endif /* CPU_SA1110 */ 1382 #ifdef CPU_IXP12X0 1383 if (cputype == CPU_ID_IXP1200) { 1384 cpufuncs = ixp12x0_cpufuncs; 1385 cpu_reset_needs_v4_MMU_disable = 1; 1386 get_cachetype_table(); 1387 pmap_pte_init_sa1(); 1388 return 0; 1389 } 1390 #endif /* CPU_IXP12X0 */ 1391 #ifdef CPU_XSCALE_80200 1392 if (cputype == CPU_ID_80200) { 1393 int rev = cpufunc_id() & CPU_ID_REVISION_MASK; 1394 1395 i80200_icu_init(); 1396 1397 /* 1398 * Reset the Performance Monitoring Unit to a 1399 * pristine state: 1400 * - CCNT, PMN0, PMN1 reset to 0 1401 * - overflow indications cleared 1402 * - all counters disabled 1403 */ 1404 __asm volatile("mcr p14, 0, %0, c0, c0, 0" 1405 : 1406 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 1407 PMNC_CC_IF)); 1408 1409 #if defined(XSCALE_CCLKCFG) 1410 /* 1411 * Crank CCLKCFG to maximum legal value. 1412 */ 1413 __asm volatile ("mcr p14, 0, %0, c6, c0, 0" 1414 : 1415 : "r" (XSCALE_CCLKCFG)); 1416 #endif 1417 1418 /* 1419 * XXX Disable ECC in the Bus Controller Unit; we 1420 * don't really support it, yet. Clear any pending 1421 * error indications. 1422 */ 1423 __asm volatile("mcr p13, 0, %0, c0, c1, 0" 1424 : 1425 : "r" (BCUCTL_E0|BCUCTL_E1|BCUCTL_EV)); 1426 1427 cpufuncs = xscale_cpufuncs; 1428 #if defined(PERFCTRS) 1429 xscale_pmu_init(); 1430 #endif 1431 1432 /* 1433 * i80200 errata: Step-A0 and A1 have a bug where 1434 * D$ dirty bits are not cleared on "invalidate by 1435 * address". 1436 * 1437 * Workaround: Clean cache line before invalidating. 1438 */ 1439 if (rev == 0 || rev == 1) 1440 cpufuncs.cf_dcache_inv_range = xscale_cache_purgeD_rng; 1441 1442 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ 1443 get_cachetype_cp15(); 1444 pmap_pte_init_xscale(); 1445 return 0; 1446 } 1447 #endif /* CPU_XSCALE_80200 */ 1448 #ifdef CPU_XSCALE_80321 1449 if (cputype == CPU_ID_80321_400 || cputype == CPU_ID_80321_600 || 1450 cputype == CPU_ID_80321_400_B0 || cputype == CPU_ID_80321_600_B0 || 1451 cputype == CPU_ID_80219_400 || cputype == CPU_ID_80219_600) { 1452 i80321_icu_init(); 1453 1454 /* 1455 * Reset the Performance Monitoring Unit to a 1456 * pristine state: 1457 * - CCNT, PMN0, PMN1 reset to 0 1458 * - overflow indications cleared 1459 * - all counters disabled 1460 */ 1461 __asm volatile("mcr p14, 0, %0, c0, c0, 0" 1462 : 1463 : "r" (PMNC_P|PMNC_C|PMNC_PMN0_IF|PMNC_PMN1_IF| 1464 PMNC_CC_IF)); 1465 1466 cpufuncs = xscale_cpufuncs; 1467 #if defined(PERFCTRS) 1468 xscale_pmu_init(); 1469 #endif 1470 1471 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ 1472 get_cachetype_cp15(); 1473 pmap_pte_init_xscale(); 1474 return 0; 1475 } 1476 #endif /* CPU_XSCALE_80321 */ 1477 #ifdef __CPU_XSCALE_PXA2XX 1478 /* ignore core revision to test PXA2xx CPUs */ 1479 if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X || 1480 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 || 1481 (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) { 1482 1483 cpufuncs = xscale_cpufuncs; 1484 #if defined(PERFCTRS) 1485 xscale_pmu_init(); 1486 #endif 1487 1488 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ 1489 get_cachetype_cp15(); 1490 pmap_pte_init_xscale(); 1491 1492 /* Use powersave on this CPU. */ 1493 cpu_do_powersave = 1; 1494 1495 return 0; 1496 } 1497 #endif /* __CPU_XSCALE_PXA2XX */ 1498 #ifdef CPU_XSCALE_IXP425 1499 if (cputype == CPU_ID_IXP425_533 || cputype == CPU_ID_IXP425_400 || 1500 cputype == CPU_ID_IXP425_266) { 1501 ixp425_icu_init(); 1502 1503 cpufuncs = xscale_cpufuncs; 1504 #if defined(PERFCTRS) 1505 xscale_pmu_init(); 1506 #endif 1507 1508 cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ 1509 get_cachetype_cp15(); 1510 pmap_pte_init_xscale(); 1511 1512 return 0; 1513 } 1514 #endif /* CPU_XSCALE_IXP425 */ 1515 /* 1516 * Bzzzz. And the answer was ... 1517 */ 1518 panic("No support for this CPU type (%08x) in kernel", cputype); 1519 return(ARCHITECTURE_NOT_PRESENT); 1520 } 1521 1522 #ifdef CPU_ARM2 1523 u_int arm2_id(void) 1524 { 1525 1526 return CPU_ID_ARM2; 1527 } 1528 #endif /* CPU_ARM2 */ 1529 1530 #ifdef CPU_ARM250 1531 u_int arm250_id(void) 1532 { 1533 1534 return CPU_ID_ARM250; 1535 } 1536 #endif /* CPU_ARM250 */ 1537 1538 /* 1539 * Fixup routines for data and prefetch aborts. 1540 * 1541 * Several compile time symbols are used 1542 * 1543 * DEBUG_FAULT_CORRECTION - Print debugging information during the 1544 * correction of registers after a fault. 1545 * ARM6_LATE_ABORT - ARM6 supports both early and late aborts 1546 * when defined should use late aborts 1547 */ 1548 1549 1550 /* 1551 * Null abort fixup routine. 1552 * For use when no fixup is required. 1553 */ 1554 int 1555 cpufunc_null_fixup(arg) 1556 void *arg; 1557 { 1558 return(ABORT_FIXUP_OK); 1559 } 1560 1561 1562 #if defined(CPU_ARM2) || defined(CPU_ARM250) || defined(CPU_ARM3) || \ 1563 defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) 1564 1565 #ifdef DEBUG_FAULT_CORRECTION 1566 #define DFC_PRINTF(x) printf x 1567 #define DFC_DISASSEMBLE(x) disassemble(x) 1568 #else 1569 #define DFC_PRINTF(x) /* nothing */ 1570 #define DFC_DISASSEMBLE(x) /* nothing */ 1571 #endif 1572 1573 /* 1574 * "Early" data abort fixup. 1575 * 1576 * For ARM2, ARM2as, ARM3 and ARM6 (in early-abort mode). Also used 1577 * indirectly by ARM6 (in late-abort mode) and ARM7[TDMI]. 1578 * 1579 * In early aborts, we may have to fix up LDM, STM, LDC and STC. 1580 */ 1581 int 1582 early_abort_fixup(arg) 1583 void *arg; 1584 { 1585 trapframe_t *frame = arg; 1586 u_int fault_pc; 1587 u_int fault_instruction; 1588 int saved_lr = 0; 1589 1590 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 1591 1592 /* Ok an abort in SVC mode */ 1593 1594 /* 1595 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 1596 * as the fault happened in svc mode but we need it in the 1597 * usr slot so we can treat the registers as an array of ints 1598 * during fixing. 1599 * NOTE: This PC is in the position but writeback is not 1600 * allowed on r15. 1601 * Doing it like this is more efficient than trapping this 1602 * case in all possible locations in the following fixup code. 1603 */ 1604 1605 saved_lr = frame->tf_usr_lr; 1606 frame->tf_usr_lr = frame->tf_svc_lr; 1607 1608 /* 1609 * Note the trapframe does not have the SVC r13 so a fault 1610 * from an instruction with writeback to r13 in SVC mode is 1611 * not allowed. This should not happen as the kstack is 1612 * always valid. 1613 */ 1614 } 1615 1616 /* Get fault address and status from the CPU */ 1617 1618 fault_pc = frame->tf_pc; 1619 fault_instruction = *((volatile unsigned int *)fault_pc); 1620 1621 /* Decode the fault instruction and fix the registers as needed */ 1622 1623 if ((fault_instruction & 0x0e000000) == 0x08000000) { 1624 int base; 1625 int loop; 1626 int count; 1627 int *registers = &frame->tf_r0; 1628 1629 DFC_PRINTF(("LDM/STM\n")); 1630 DFC_DISASSEMBLE(fault_pc); 1631 if (fault_instruction & (1 << 21)) { 1632 DFC_PRINTF(("This instruction must be corrected\n")); 1633 base = (fault_instruction >> 16) & 0x0f; 1634 if (base == 15) 1635 return ABORT_FIXUP_FAILED; 1636 /* Count registers transferred */ 1637 count = 0; 1638 for (loop = 0; loop < 16; ++loop) { 1639 if (fault_instruction & (1<<loop)) 1640 ++count; 1641 } 1642 DFC_PRINTF(("%d registers used\n", count)); 1643 DFC_PRINTF(("Corrected r%d by %d bytes ", 1644 base, count * 4)); 1645 if (fault_instruction & (1 << 23)) { 1646 DFC_PRINTF(("down\n")); 1647 registers[base] -= count * 4; 1648 } else { 1649 DFC_PRINTF(("up\n")); 1650 registers[base] += count * 4; 1651 } 1652 } 1653 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) { 1654 int base; 1655 int offset; 1656 int *registers = &frame->tf_r0; 1657 1658 /* REGISTER CORRECTION IS REQUIRED FOR THESE INSTRUCTIONS */ 1659 1660 DFC_DISASSEMBLE(fault_pc); 1661 1662 /* Only need to fix registers if write back is turned on */ 1663 1664 if ((fault_instruction & (1 << 21)) != 0) { 1665 base = (fault_instruction >> 16) & 0x0f; 1666 if (base == 13 && 1667 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 1668 return ABORT_FIXUP_FAILED; 1669 if (base == 15) 1670 return ABORT_FIXUP_FAILED; 1671 1672 offset = (fault_instruction & 0xff) << 2; 1673 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 1674 if ((fault_instruction & (1 << 23)) != 0) 1675 offset = -offset; 1676 registers[base] += offset; 1677 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 1678 } 1679 } else if ((fault_instruction & 0x0e000000) == 0x0c000000) 1680 return ABORT_FIXUP_FAILED; 1681 1682 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 1683 1684 /* Ok an abort in SVC mode */ 1685 1686 /* 1687 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 1688 * as the fault happened in svc mode but we need it in the 1689 * usr slot so we can treat the registers as an array of ints 1690 * during fixing. 1691 * NOTE: This PC is in the position but writeback is not 1692 * allowed on r15. 1693 * Doing it like this is more efficient than trapping this 1694 * case in all possible locations in the prior fixup code. 1695 */ 1696 1697 frame->tf_svc_lr = frame->tf_usr_lr; 1698 frame->tf_usr_lr = saved_lr; 1699 1700 /* 1701 * Note the trapframe does not have the SVC r13 so a fault 1702 * from an instruction with writeback to r13 in SVC mode is 1703 * not allowed. This should not happen as the kstack is 1704 * always valid. 1705 */ 1706 } 1707 1708 return(ABORT_FIXUP_OK); 1709 } 1710 #endif /* CPU_ARM2/250/3/6/7 */ 1711 1712 1713 #if (defined(CPU_ARM6) && defined(ARM6_LATE_ABORT)) || defined(CPU_ARM7) || \ 1714 defined(CPU_ARM7TDMI) 1715 /* 1716 * "Late" (base updated) data abort fixup 1717 * 1718 * For ARM6 (in late-abort mode) and ARM7. 1719 * 1720 * In this model, all data-transfer instructions need fixing up. We defer 1721 * LDM, STM, LDC and STC fixup to the early-abort handler. 1722 */ 1723 int 1724 late_abort_fixup(arg) 1725 void *arg; 1726 { 1727 trapframe_t *frame = arg; 1728 u_int fault_pc; 1729 u_int fault_instruction; 1730 int saved_lr = 0; 1731 1732 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 1733 1734 /* Ok an abort in SVC mode */ 1735 1736 /* 1737 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 1738 * as the fault happened in svc mode but we need it in the 1739 * usr slot so we can treat the registers as an array of ints 1740 * during fixing. 1741 * NOTE: This PC is in the position but writeback is not 1742 * allowed on r15. 1743 * Doing it like this is more efficient than trapping this 1744 * case in all possible locations in the following fixup code. 1745 */ 1746 1747 saved_lr = frame->tf_usr_lr; 1748 frame->tf_usr_lr = frame->tf_svc_lr; 1749 1750 /* 1751 * Note the trapframe does not have the SVC r13 so a fault 1752 * from an instruction with writeback to r13 in SVC mode is 1753 * not allowed. This should not happen as the kstack is 1754 * always valid. 1755 */ 1756 } 1757 1758 /* Get fault address and status from the CPU */ 1759 1760 fault_pc = frame->tf_pc; 1761 fault_instruction = *((volatile unsigned int *)fault_pc); 1762 1763 /* Decode the fault instruction and fix the registers as needed */ 1764 1765 /* Was is a swap instruction ? */ 1766 1767 if ((fault_instruction & 0x0fb00ff0) == 0x01000090) { 1768 DFC_DISASSEMBLE(fault_pc); 1769 } else if ((fault_instruction & 0x0c000000) == 0x04000000) { 1770 1771 /* Was is a ldr/str instruction */ 1772 /* This is for late abort only */ 1773 1774 int base; 1775 int offset; 1776 int *registers = &frame->tf_r0; 1777 1778 DFC_DISASSEMBLE(fault_pc); 1779 1780 /* This is for late abort only */ 1781 1782 if ((fault_instruction & (1 << 24)) == 0 1783 || (fault_instruction & (1 << 21)) != 0) { 1784 /* postindexed ldr/str with no writeback */ 1785 1786 base = (fault_instruction >> 16) & 0x0f; 1787 if (base == 13 && 1788 (frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) 1789 return ABORT_FIXUP_FAILED; 1790 if (base == 15) 1791 return ABORT_FIXUP_FAILED; 1792 DFC_PRINTF(("late abt fix: r%d=%08x : ", 1793 base, registers[base])); 1794 if ((fault_instruction & (1 << 25)) == 0) { 1795 /* Immediate offset - easy */ 1796 1797 offset = fault_instruction & 0xfff; 1798 if ((fault_instruction & (1 << 23))) 1799 offset = -offset; 1800 registers[base] += offset; 1801 DFC_PRINTF(("imm=%08x ", offset)); 1802 } else { 1803 /* offset is a shifted register */ 1804 int shift; 1805 1806 offset = fault_instruction & 0x0f; 1807 if (offset == base) 1808 return ABORT_FIXUP_FAILED; 1809 1810 /* 1811 * Register offset - hard we have to 1812 * cope with shifts ! 1813 */ 1814 offset = registers[offset]; 1815 1816 if ((fault_instruction & (1 << 4)) == 0) 1817 /* shift with amount */ 1818 shift = (fault_instruction >> 7) & 0x1f; 1819 else { 1820 /* shift with register */ 1821 if ((fault_instruction & (1 << 7)) != 0) 1822 /* undefined for now so bail out */ 1823 return ABORT_FIXUP_FAILED; 1824 shift = ((fault_instruction >> 8) & 0xf); 1825 if (base == shift) 1826 return ABORT_FIXUP_FAILED; 1827 DFC_PRINTF(("shift reg=%d ", shift)); 1828 shift = registers[shift]; 1829 } 1830 DFC_PRINTF(("shift=%08x ", shift)); 1831 switch (((fault_instruction >> 5) & 0x3)) { 1832 case 0 : /* Logical left */ 1833 offset = (int)(((u_int)offset) << shift); 1834 break; 1835 case 1 : /* Logical Right */ 1836 if (shift == 0) shift = 32; 1837 offset = (int)(((u_int)offset) >> shift); 1838 break; 1839 case 2 : /* Arithmetic Right */ 1840 if (shift == 0) shift = 32; 1841 offset = (int)(((int)offset) >> shift); 1842 break; 1843 case 3 : /* Rotate right (rol or rxx) */ 1844 return ABORT_FIXUP_FAILED; 1845 break; 1846 } 1847 1848 DFC_PRINTF(("abt: fixed LDR/STR with " 1849 "register offset\n")); 1850 if ((fault_instruction & (1 << 23))) 1851 offset = -offset; 1852 DFC_PRINTF(("offset=%08x ", offset)); 1853 registers[base] += offset; 1854 } 1855 DFC_PRINTF(("r%d=%08x\n", base, registers[base])); 1856 } 1857 } 1858 1859 if ((frame->tf_spsr & PSR_MODE) == PSR_SVC32_MODE) { 1860 1861 /* Ok an abort in SVC mode */ 1862 1863 /* 1864 * Copy the SVC r14 into the usr r14 - The usr r14 is garbage 1865 * as the fault happened in svc mode but we need it in the 1866 * usr slot so we can treat the registers as an array of ints 1867 * during fixing. 1868 * NOTE: This PC is in the position but writeback is not 1869 * allowed on r15. 1870 * Doing it like this is more efficient than trapping this 1871 * case in all possible locations in the prior fixup code. 1872 */ 1873 1874 frame->tf_svc_lr = frame->tf_usr_lr; 1875 frame->tf_usr_lr = saved_lr; 1876 1877 /* 1878 * Note the trapframe does not have the SVC r13 so a fault 1879 * from an instruction with writeback to r13 in SVC mode is 1880 * not allowed. This should not happen as the kstack is 1881 * always valid. 1882 */ 1883 } 1884 1885 /* 1886 * Now let the early-abort fixup routine have a go, in case it 1887 * was an LDM, STM, LDC or STC that faulted. 1888 */ 1889 1890 return early_abort_fixup(arg); 1891 } 1892 #endif /* CPU_ARM6(LATE)/7/7TDMI */ 1893 1894 /* 1895 * CPU Setup code 1896 */ 1897 1898 #if defined(CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) || \ 1899 defined(CPU_ARM8) || defined (CPU_ARM9) || defined (CPU_ARM9E) || \ 1900 defined(CPU_SA110) || defined(CPU_SA1100) || defined(CPU_SA1110) || \ 1901 defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 1902 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) || \ 1903 defined(CPU_ARM10) || defined(CPU_ARM11) || defined(CPU_ARM1136) 1904 1905 #define IGN 0 1906 #define OR 1 1907 #define BIC 2 1908 1909 struct cpu_option { 1910 const char *co_name; 1911 int co_falseop; 1912 int co_trueop; 1913 int co_value; 1914 }; 1915 1916 static u_int parse_cpu_options __P((char *, struct cpu_option *, u_int)); 1917 1918 static u_int 1919 parse_cpu_options(args, optlist, cpuctrl) 1920 char *args; 1921 struct cpu_option *optlist; 1922 u_int cpuctrl; 1923 { 1924 int integer; 1925 1926 if (args == NULL) 1927 return(cpuctrl); 1928 1929 while (optlist->co_name) { 1930 if (get_bootconf_option(args, optlist->co_name, 1931 BOOTOPT_TYPE_BOOLEAN, &integer)) { 1932 if (integer) { 1933 if (optlist->co_trueop == OR) 1934 cpuctrl |= optlist->co_value; 1935 else if (optlist->co_trueop == BIC) 1936 cpuctrl &= ~optlist->co_value; 1937 } else { 1938 if (optlist->co_falseop == OR) 1939 cpuctrl |= optlist->co_value; 1940 else if (optlist->co_falseop == BIC) 1941 cpuctrl &= ~optlist->co_value; 1942 } 1943 } 1944 ++optlist; 1945 } 1946 return(cpuctrl); 1947 } 1948 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 || CPU_SA110 */ 1949 1950 #if defined (CPU_ARM6) || defined(CPU_ARM7) || defined(CPU_ARM7TDMI) \ 1951 || defined(CPU_ARM8) 1952 struct cpu_option arm678_options[] = { 1953 #ifdef COMPAT_12 1954 { "nocache", IGN, BIC, CPU_CONTROL_IDC_ENABLE }, 1955 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 1956 #endif /* COMPAT_12 */ 1957 { "cpu.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 1958 { "cpu.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 1959 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1960 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 1961 { NULL, IGN, IGN, 0 } 1962 }; 1963 1964 #endif /* CPU_ARM6 || CPU_ARM7 || CPU_ARM7TDMI || CPU_ARM8 */ 1965 1966 #ifdef CPU_ARM6 1967 struct cpu_option arm6_options[] = { 1968 { "arm6.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 1969 { "arm6.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 1970 { "arm6.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 1971 { "arm6.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 1972 { NULL, IGN, IGN, 0 } 1973 }; 1974 1975 void 1976 arm6_setup(args) 1977 char *args; 1978 { 1979 int cpuctrl, cpuctrlmask; 1980 1981 /* Set up default control registers bits */ 1982 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1983 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1984 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 1985 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 1986 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 1987 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 1988 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 1989 | CPU_CONTROL_AFLT_ENABLE; 1990 1991 #ifdef ARM6_LATE_ABORT 1992 cpuctrl |= CPU_CONTROL_LABT_ENABLE; 1993 #endif /* ARM6_LATE_ABORT */ 1994 1995 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 1996 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 1997 #endif 1998 1999 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2000 cpuctrl = parse_cpu_options(args, arm6_options, cpuctrl); 2001 2002 #ifdef __ARMEB__ 2003 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2004 #endif 2005 2006 /* Clear out the cache */ 2007 cpu_idcache_wbinv_all(); 2008 2009 /* Set the control register */ 2010 curcpu()->ci_ctrl = cpuctrl; 2011 cpu_control(0xffffffff, cpuctrl); 2012 } 2013 #endif /* CPU_ARM6 */ 2014 2015 #ifdef CPU_ARM7 2016 struct cpu_option arm7_options[] = { 2017 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2018 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2019 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2020 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2021 #ifdef COMPAT_12 2022 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2023 #endif /* COMPAT_12 */ 2024 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2025 { NULL, IGN, IGN, 0 } 2026 }; 2027 2028 void 2029 arm7_setup(args) 2030 char *args; 2031 { 2032 int cpuctrl, cpuctrlmask; 2033 2034 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2035 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2036 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2037 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2038 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2039 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2040 | CPU_CONTROL_CPCLK | CPU_CONTROL_LABT_ENABLE 2041 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BEND_ENABLE 2042 | CPU_CONTROL_AFLT_ENABLE; 2043 2044 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2045 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2046 #endif 2047 2048 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2049 cpuctrl = parse_cpu_options(args, arm7_options, cpuctrl); 2050 2051 #ifdef __ARMEB__ 2052 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2053 #endif 2054 2055 /* Clear out the cache */ 2056 cpu_idcache_wbinv_all(); 2057 2058 /* Set the control register */ 2059 curcpu()->ci_ctrl = cpuctrl; 2060 cpu_control(0xffffffff, cpuctrl); 2061 } 2062 #endif /* CPU_ARM7 */ 2063 2064 #ifdef CPU_ARM7TDMI 2065 struct cpu_option arm7tdmi_options[] = { 2066 { "arm7.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2067 { "arm7.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2068 { "arm7.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2069 { "arm7.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2070 #ifdef COMPAT_12 2071 { "fpaclk2", BIC, OR, CPU_CONTROL_CPCLK }, 2072 #endif /* COMPAT_12 */ 2073 { "arm700.fpaclk", BIC, OR, CPU_CONTROL_CPCLK }, 2074 { NULL, IGN, IGN, 0 } 2075 }; 2076 2077 void 2078 arm7tdmi_setup(args) 2079 char *args; 2080 { 2081 int cpuctrl; 2082 2083 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2084 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2085 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2086 2087 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2088 cpuctrl = parse_cpu_options(args, arm7tdmi_options, cpuctrl); 2089 2090 #ifdef __ARMEB__ 2091 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2092 #endif 2093 2094 /* Clear out the cache */ 2095 cpu_idcache_wbinv_all(); 2096 2097 /* Set the control register */ 2098 curcpu()->ci_ctrl = cpuctrl; 2099 cpu_control(0xffffffff, cpuctrl); 2100 } 2101 #endif /* CPU_ARM7TDMI */ 2102 2103 #ifdef CPU_ARM8 2104 struct cpu_option arm8_options[] = { 2105 { "arm8.cache", BIC, OR, CPU_CONTROL_IDC_ENABLE }, 2106 { "arm8.nocache", OR, BIC, CPU_CONTROL_IDC_ENABLE }, 2107 { "arm8.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2108 { "arm8.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2109 #ifdef COMPAT_12 2110 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2111 #endif /* COMPAT_12 */ 2112 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2113 { "arm8.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2114 { NULL, IGN, IGN, 0 } 2115 }; 2116 2117 void 2118 arm8_setup(args) 2119 char *args; 2120 { 2121 int integer; 2122 int cpuctrl, cpuctrlmask; 2123 int clocktest; 2124 int setclock = 0; 2125 2126 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2127 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2128 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE; 2129 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2130 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2131 | CPU_CONTROL_IDC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2132 | CPU_CONTROL_BPRD_ENABLE | CPU_CONTROL_ROM_ENABLE 2133 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE; 2134 2135 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2136 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2137 #endif 2138 2139 cpuctrl = parse_cpu_options(args, arm678_options, cpuctrl); 2140 cpuctrl = parse_cpu_options(args, arm8_options, cpuctrl); 2141 2142 #ifdef __ARMEB__ 2143 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2144 #endif 2145 2146 /* Get clock configuration */ 2147 clocktest = arm8_clock_config(0, 0) & 0x0f; 2148 2149 /* Special ARM8 clock and test configuration */ 2150 if (get_bootconf_option(args, "arm8.clock.reset", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2151 clocktest = 0; 2152 setclock = 1; 2153 } 2154 if (get_bootconf_option(args, "arm8.clock.dynamic", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2155 if (integer) 2156 clocktest |= 0x01; 2157 else 2158 clocktest &= ~(0x01); 2159 setclock = 1; 2160 } 2161 if (get_bootconf_option(args, "arm8.clock.sync", BOOTOPT_TYPE_BOOLEAN, &integer)) { 2162 if (integer) 2163 clocktest |= 0x02; 2164 else 2165 clocktest &= ~(0x02); 2166 setclock = 1; 2167 } 2168 if (get_bootconf_option(args, "arm8.clock.fast", BOOTOPT_TYPE_BININT, &integer)) { 2169 clocktest = (clocktest & ~0xc0) | (integer & 3) << 2; 2170 setclock = 1; 2171 } 2172 if (get_bootconf_option(args, "arm8.test", BOOTOPT_TYPE_BININT, &integer)) { 2173 clocktest |= (integer & 7) << 5; 2174 setclock = 1; 2175 } 2176 2177 /* Clear out the cache */ 2178 cpu_idcache_wbinv_all(); 2179 2180 /* Set the control register */ 2181 curcpu()->ci_ctrl = cpuctrl; 2182 cpu_control(0xffffffff, cpuctrl); 2183 2184 /* Set the clock/test register */ 2185 if (setclock) 2186 arm8_clock_config(0x7f, clocktest); 2187 } 2188 #endif /* CPU_ARM8 */ 2189 2190 #ifdef CPU_ARM9 2191 struct cpu_option arm9_options[] = { 2192 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2193 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2194 { "arm9.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2195 { "arm9.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2196 { "arm9.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2197 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2198 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2199 { "arm9.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2200 { NULL, IGN, IGN, 0 } 2201 }; 2202 2203 void 2204 arm9_setup(args) 2205 char *args; 2206 { 2207 int cpuctrl, cpuctrlmask; 2208 2209 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2210 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2211 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2212 | CPU_CONTROL_WBUF_ENABLE; 2213 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2214 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2215 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2216 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2217 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2218 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_VECRELOC 2219 | CPU_CONTROL_ROUNDROBIN; 2220 2221 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2222 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2223 #endif 2224 2225 cpuctrl = parse_cpu_options(args, arm9_options, cpuctrl); 2226 2227 #ifdef __ARMEB__ 2228 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2229 #endif 2230 2231 if (vector_page == ARM_VECTORS_HIGH) 2232 cpuctrl |= CPU_CONTROL_VECRELOC; 2233 2234 /* Clear out the cache */ 2235 cpu_idcache_wbinv_all(); 2236 2237 /* Set the control register */ 2238 curcpu()->ci_ctrl = cpuctrl; 2239 cpu_control(cpuctrlmask, cpuctrl); 2240 2241 } 2242 #endif /* CPU_ARM9 */ 2243 2244 #if defined(CPU_ARM9E) || defined(CPU_ARM10) 2245 struct cpu_option arm10_options[] = { 2246 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2247 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2248 { "arm10.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2249 { "arm10.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2250 { "arm10.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2251 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2252 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2253 { "arm10.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2254 { NULL, IGN, IGN, 0 } 2255 }; 2256 2257 void 2258 arm10_setup(args) 2259 char *args; 2260 { 2261 int cpuctrl, cpuctrlmask; 2262 2263 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2264 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2265 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_BPRD_ENABLE; 2266 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2267 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2268 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2269 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2270 | CPU_CONTROL_BPRD_ENABLE 2271 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2272 2273 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2274 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2275 #endif 2276 2277 cpuctrl = parse_cpu_options(args, arm10_options, cpuctrl); 2278 2279 #ifdef __ARMEB__ 2280 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2281 #endif 2282 2283 if (vector_page == ARM_VECTORS_HIGH) 2284 cpuctrl |= CPU_CONTROL_VECRELOC; 2285 2286 /* Clear out the cache */ 2287 cpu_idcache_wbinv_all(); 2288 2289 /* Now really make sure they are clean. */ 2290 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2291 2292 /* Allow detection code to find the VFP if it's fitted. */ 2293 __asm volatile ("mcr\tp15, 0, %0, c1, c0, 2" : : "r" (0x0fffffff)); 2294 2295 /* Set the control register */ 2296 curcpu()->ci_ctrl = cpuctrl; 2297 cpu_control(0xffffffff, cpuctrl); 2298 2299 /* And again. */ 2300 cpu_idcache_wbinv_all(); 2301 } 2302 #endif /* CPU_ARM9E || CPU_ARM10 */ 2303 2304 #if defined(CPU_ARM11) 2305 struct cpu_option arm11_options[] = { 2306 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2307 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2308 { "arm11.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2309 { "arm11.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2310 { "arm11.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2311 { NULL, IGN, IGN, 0 } 2312 }; 2313 2314 void 2315 arm11_setup(args) 2316 char *args; 2317 { 2318 int cpuctrl, cpuctrlmask; 2319 2320 #if defined(PROCESS_ID_IS_CURCPU) 2321 /* set curcpu() */ 2322 __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store)); 2323 #elif defined(PROCESS_ID_IS_CURLWP) 2324 /* set curlwp() */ 2325 __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0)); 2326 #endif 2327 2328 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2329 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2330 /* | CPU_CONTROL_BPRD_ENABLE */; 2331 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_SYST_ENABLE 2332 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2333 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_BPRD_ENABLE 2334 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2335 | CPU_CONTROL_ROUNDROBIN | CPU_CONTROL_CPCLK; 2336 2337 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2338 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2339 #endif 2340 2341 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2342 2343 #ifdef __ARMEB__ 2344 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2345 #endif 2346 2347 if (vector_page == ARM_VECTORS_HIGH) 2348 cpuctrl |= CPU_CONTROL_VECRELOC; 2349 2350 /* Clear out the cache */ 2351 cpu_idcache_wbinv_all(); 2352 2353 /* Now really make sure they are clean. */ 2354 __asm volatile ("mcr\tp15, 0, r0, c7, c7, 0" : : ); 2355 2356 /* Set the control register */ 2357 curcpu()->ci_ctrl = cpuctrl; 2358 cpu_control(0xffffffff, cpuctrl); 2359 2360 /* And again. */ 2361 cpu_idcache_wbinv_all(); 2362 } 2363 #endif /* CPU_ARM11 */ 2364 2365 #if defined(CPU_ARM1136) 2366 void 2367 arm1136_setup(char *args) 2368 { 2369 int cpuctrl, cpuctrl_wax; 2370 uint32_t auxctrl, auxctrl_wax; 2371 uint32_t tmp, tmp2; 2372 uint32_t sbz=0; 2373 uint32_t cpuid; 2374 2375 #if defined(PROCESS_ID_IS_CURCPU) 2376 /* set curcpu() */ 2377 __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&cpu_info_store)); 2378 #elif defined(PROCESS_ID_IS_CURLWP) 2379 /* set curlwp() */ 2380 __asm("mcr\tp15, 0, %0, c13, c0, 4" : : "r"(&lwp0)); 2381 #endif 2382 2383 cpuid = cpu_id(); 2384 2385 cpuctrl = 2386 CPU_CONTROL_MMU_ENABLE | 2387 CPU_CONTROL_DC_ENABLE | 2388 CPU_CONTROL_WBUF_ENABLE | 2389 CPU_CONTROL_32BP_ENABLE | 2390 CPU_CONTROL_32BD_ENABLE | 2391 CPU_CONTROL_LABT_ENABLE | 2392 CPU_CONTROL_SYST_ENABLE | 2393 CPU_CONTROL_IC_ENABLE; 2394 2395 /* 2396 * "write as existing" bits 2397 * inverse of this is mask 2398 */ 2399 cpuctrl_wax = 2400 (3 << 30) | 2401 (1 << 29) | 2402 (1 << 28) | 2403 (3 << 26) | 2404 (3 << 19) | 2405 (1 << 17); 2406 2407 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2408 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2409 #endif 2410 2411 cpuctrl = parse_cpu_options(args, arm11_options, cpuctrl); 2412 2413 #ifdef __ARMEB__ 2414 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2415 #endif 2416 2417 if (vector_page == ARM_VECTORS_HIGH) 2418 cpuctrl |= CPU_CONTROL_VECRELOC; 2419 2420 auxctrl = 0; 2421 auxctrl_wax = ~0; 2422 /* This options enables the workaround for the 364296 ARM1136 2423 * r0pX errata (possible cache data corruption with 2424 * hit-under-miss enabled). It sets the undocumented bit 31 in 2425 * the auxiliary control register and the FI bit in the control 2426 * register, thus disabling hit-under-miss without putting the 2427 * processor into full low interrupt latency mode. ARM11MPCore 2428 * is not affected. 2429 */ 2430 if ((cpuid & CPU_ID_CPU_MASK) == CPU_ID_ARM1136JS) { /* ARM1136JSr0pX */ 2431 cpuctrl |= CPU_CONTROL_FI_ENABLE; 2432 auxctrl = ARM11R0_AUXCTL_PFI; 2433 auxctrl_wax = ~ARM11R0_AUXCTL_PFI; 2434 } 2435 2436 /* Clear out the cache */ 2437 cpu_idcache_wbinv_all(); 2438 2439 /* Now really make sure they are clean. */ 2440 __asm volatile ("mcr\tp15, 0, %0, c7, c7, 0" : : "r"(sbz)); 2441 2442 /* Set the control register */ 2443 curcpu()->ci_ctrl = cpuctrl; 2444 cpu_control(~cpuctrl_wax, cpuctrl); 2445 2446 __asm volatile ("mrc p15, 0, %0, c1, c0, 1\n\t" 2447 "bic %1, %0, %2\n\t" 2448 "eor %1, %0, %3\n\t" 2449 "teq %0, %1\n\t" 2450 "mcrne p15, 0, %1, c1, c0, 1\n\t" 2451 : "=r"(tmp), "=r"(tmp2) : 2452 "r"(~auxctrl_wax), "r"(auxctrl)); 2453 2454 /* And again. */ 2455 cpu_idcache_wbinv_all(); 2456 } 2457 #endif /* CPU_ARM1136 */ 2458 2459 #ifdef CPU_SA110 2460 struct cpu_option sa110_options[] = { 2461 #ifdef COMPAT_12 2462 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2463 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 2464 #endif /* COMPAT_12 */ 2465 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2466 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2467 { "sa110.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2468 { "sa110.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2469 { "sa110.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2470 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2471 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2472 { "sa110.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2473 { NULL, IGN, IGN, 0 } 2474 }; 2475 2476 void 2477 sa110_setup(args) 2478 char *args; 2479 { 2480 int cpuctrl, cpuctrlmask; 2481 2482 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2483 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2484 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2485 | CPU_CONTROL_WBUF_ENABLE; 2486 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2487 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2488 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2489 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2490 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2491 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 2492 | CPU_CONTROL_CPCLK; 2493 2494 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2495 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2496 #endif 2497 2498 cpuctrl = parse_cpu_options(args, sa110_options, cpuctrl); 2499 2500 #ifdef __ARMEB__ 2501 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2502 #endif 2503 2504 if (vector_page == ARM_VECTORS_HIGH) 2505 cpuctrl |= CPU_CONTROL_VECRELOC; 2506 2507 /* Clear out the cache */ 2508 cpu_idcache_wbinv_all(); 2509 2510 /* Set the control register */ 2511 curcpu()->ci_ctrl = cpuctrl; 2512 /* cpu_control(cpuctrlmask, cpuctrl);*/ 2513 cpu_control(0xffffffff, cpuctrl); 2514 2515 /* 2516 * enable clockswitching, note that this doesn't read or write to r0, 2517 * r0 is just to make it valid asm 2518 */ 2519 __asm ("mcr 15, 0, r0, c15, c1, 2"); 2520 } 2521 #endif /* CPU_SA110 */ 2522 2523 #if defined(CPU_SA1100) || defined(CPU_SA1110) 2524 struct cpu_option sa11x0_options[] = { 2525 #ifdef COMPAT_12 2526 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2527 { "nowritebuf", IGN, BIC, CPU_CONTROL_WBUF_ENABLE }, 2528 #endif /* COMPAT_12 */ 2529 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2530 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2531 { "sa11x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2532 { "sa11x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2533 { "sa11x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2534 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2535 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2536 { "sa11x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2537 { NULL, IGN, IGN, 0 } 2538 }; 2539 2540 void 2541 sa11x0_setup(args) 2542 char *args; 2543 { 2544 int cpuctrl, cpuctrlmask; 2545 2546 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2547 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2548 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2549 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE; 2550 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2551 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2552 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2553 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2554 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2555 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 2556 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 2557 2558 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2559 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2560 #endif 2561 2562 cpuctrl = parse_cpu_options(args, sa11x0_options, cpuctrl); 2563 2564 #ifdef __ARMEB__ 2565 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2566 #endif 2567 2568 if (vector_page == ARM_VECTORS_HIGH) 2569 cpuctrl |= CPU_CONTROL_VECRELOC; 2570 2571 /* Clear out the cache */ 2572 cpu_idcache_wbinv_all(); 2573 2574 /* Set the control register */ 2575 curcpu()->ci_ctrl = cpuctrl; 2576 cpu_control(0xffffffff, cpuctrl); 2577 } 2578 #endif /* CPU_SA1100 || CPU_SA1110 */ 2579 2580 #if defined(CPU_IXP12X0) 2581 struct cpu_option ixp12x0_options[] = { 2582 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2583 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2584 { "ixp12x0.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2585 { "ixp12x0.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2586 { "ixp12x0.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2587 { "cpu.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2588 { "cpu.nowritebuf", OR, BIC, CPU_CONTROL_WBUF_ENABLE }, 2589 { "ixp12x0.writebuf", BIC, OR, CPU_CONTROL_WBUF_ENABLE }, 2590 { NULL, IGN, IGN, 0 } 2591 }; 2592 2593 void 2594 ixp12x0_setup(args) 2595 char *args; 2596 { 2597 int cpuctrl, cpuctrlmask; 2598 2599 2600 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE 2601 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_SYST_ENABLE 2602 | CPU_CONTROL_IC_ENABLE; 2603 2604 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_AFLT_ENABLE 2605 | CPU_CONTROL_DC_ENABLE | CPU_CONTROL_WBUF_ENABLE 2606 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_SYST_ENABLE 2607 | CPU_CONTROL_ROM_ENABLE | CPU_CONTROL_IC_ENABLE 2608 | CPU_CONTROL_VECRELOC; 2609 2610 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2611 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2612 #endif 2613 2614 cpuctrl = parse_cpu_options(args, ixp12x0_options, cpuctrl); 2615 2616 #ifdef __ARMEB__ 2617 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2618 #endif 2619 2620 if (vector_page == ARM_VECTORS_HIGH) 2621 cpuctrl |= CPU_CONTROL_VECRELOC; 2622 2623 /* Clear out the cache */ 2624 cpu_idcache_wbinv_all(); 2625 2626 /* Set the control register */ 2627 curcpu()->ci_ctrl = cpuctrl; 2628 /* cpu_control(0xffffffff, cpuctrl); */ 2629 cpu_control(cpuctrlmask, cpuctrl); 2630 } 2631 #endif /* CPU_IXP12X0 */ 2632 2633 #if defined(CPU_XSCALE_80200) || defined(CPU_XSCALE_80321) || \ 2634 defined(__CPU_XSCALE_PXA2XX) || defined(CPU_XSCALE_IXP425) 2635 struct cpu_option xscale_options[] = { 2636 #ifdef COMPAT_12 2637 { "branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2638 { "nocache", IGN, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2639 #endif /* COMPAT_12 */ 2640 { "cpu.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2641 { "cpu.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2642 { "cpu.nocache", OR, BIC, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2643 { "xscale.branchpredict", BIC, OR, CPU_CONTROL_BPRD_ENABLE }, 2644 { "xscale.cache", BIC, OR, (CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE) }, 2645 { "xscale.icache", BIC, OR, CPU_CONTROL_IC_ENABLE }, 2646 { "xscale.dcache", BIC, OR, CPU_CONTROL_DC_ENABLE }, 2647 { NULL, IGN, IGN, 0 } 2648 }; 2649 2650 void 2651 xscale_setup(args) 2652 char *args; 2653 { 2654 uint32_t auxctl; 2655 int cpuctrl, cpuctrlmask; 2656 2657 /* 2658 * The XScale Write Buffer is always enabled. Our option 2659 * is to enable/disable coalescing. Note that bits 6:3 2660 * must always be enabled. 2661 */ 2662 2663 cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2664 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2665 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2666 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE 2667 | CPU_CONTROL_BPRD_ENABLE; 2668 cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE 2669 | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE 2670 | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE 2671 | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE 2672 | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE 2673 | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE 2674 | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC; 2675 2676 #ifndef ARM32_DISABLE_ALIGNMENT_FAULTS 2677 cpuctrl |= CPU_CONTROL_AFLT_ENABLE; 2678 #endif 2679 2680 cpuctrl = parse_cpu_options(args, xscale_options, cpuctrl); 2681 2682 #ifdef __ARMEB__ 2683 cpuctrl |= CPU_CONTROL_BEND_ENABLE; 2684 #endif 2685 2686 if (vector_page == ARM_VECTORS_HIGH) 2687 cpuctrl |= CPU_CONTROL_VECRELOC; 2688 2689 /* Clear out the cache */ 2690 cpu_idcache_wbinv_all(); 2691 2692 /* 2693 * Set the control register. Note that bits 6:3 must always 2694 * be set to 1. 2695 */ 2696 curcpu()->ci_ctrl = cpuctrl; 2697 /* cpu_control(cpuctrlmask, cpuctrl);*/ 2698 cpu_control(0xffffffff, cpuctrl); 2699 2700 /* Make sure write coalescing is turned on */ 2701 __asm volatile("mrc p15, 0, %0, c1, c0, 1" 2702 : "=r" (auxctl)); 2703 #ifdef XSCALE_NO_COALESCE_WRITES 2704 auxctl |= XSCALE_AUXCTL_K; 2705 #else 2706 auxctl &= ~XSCALE_AUXCTL_K; 2707 #endif 2708 __asm volatile("mcr p15, 0, %0, c1, c0, 1" 2709 : : "r" (auxctl)); 2710 } 2711 #endif /* CPU_XSCALE_80200 || CPU_XSCALE_80321 || __CPU_XSCALE_PXA2XX || CPU_XSCALE_IXP425 */ 2712