1 /* $NetBSD: ctlreg.h,v 1.44 2007/03/31 13:04:21 hannken Exp $ */ 2 3 /* 4 * Copyright (c) 1996-2002 Eduardo Horvath 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 12 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 13 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 14 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 15 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 16 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 17 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 18 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 19 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 20 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 21 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 22 * SUCH DAMAGE. 23 * 24 */ 25 26 #ifndef _SPARC_CTLREG_H_ 27 #define _SPARC_CTLREG_H_ 28 29 /* 30 * Sun 4u control registers. (includes address space definitions 31 * and some registers in control space). 32 */ 33 34 /* 35 * The Alternate address spaces. 36 * 37 * 0x00-0x7f are privileged 38 * 0x80-0xff can be used by users 39 */ 40 41 #define ASI_LITTLE 0x08 /* This bit should make an ASI little endian */ 42 43 #define ASI_NUCLEUS 0x04 /* [4u] kernel address space */ 44 #define ASI_NUCLEUS_LITTLE 0x0c /* [4u] kernel address space, little endian */ 45 46 #define ASI_AS_IF_USER_PRIMARY 0x10 /* [4u] primary user address space */ 47 #define ASI_AS_IF_USER_SECONDARY 0x11 /* [4u] secondary user address space */ 48 49 #define ASI_PHYS_CACHED 0x14 /* [4u] MMU bypass to main memory */ 50 #define ASI_PHYS_NON_CACHED 0x15 /* [4u] MMU bypass to I/O location */ 51 52 #define ASI_AS_IF_USER_PRIMARY_LITTLE 0x18 /* [4u] primary user address space, little endian */ 53 #define ASI_AS_IF_USER_SECONDARY_LITTLE 0x19 /* [4u] secondary user address space, little endian */ 54 55 #define ASI_PHYS_CACHED_LITTLE 0x1c /* [4u] MMU bypass to main memory, little endian */ 56 #define ASI_PHYS_NON_CACHED_LITTLE 0x1d /* [4u] MMU bypass to I/O location, little endian */ 57 58 #define ASI_NUCLEUS_QUAD_LDD 0x24 /* [4u] use w/LDDA to load 128-bit item */ 59 #define ASI_NUCLEUS_QUAD_LDD_LITTLE 0x2c /* [4u] use w/LDDA to load 128-bit item, little endian */ 60 61 #define ASI_FLUSH_D_PAGE_PRIMARY 0x38 /* [4u] flush D-cache page using primary context */ 62 #define ASI_FLUSH_D_PAGE_SECONDARY 0x39 /* [4u] flush D-cache page using secondary context */ 63 #define ASI_FLUSH_D_CTX_PRIMARY 0x3a /* [4u] flush D-cache context using primary context */ 64 #define ASI_FLUSH_D_CTX_SECONDARY 0x3b /* [4u] flush D-cache context using secondary context */ 65 66 #define ASI_LSU_CONTROL_REGISTER 0x45 /* [4u] load/store unit control register */ 67 68 #define ASI_DCACHE_DATA 0x46 /* [4u] diagnostic access to D-cache data RAM */ 69 #define ASI_DCACHE_TAG 0x47 /* [4u] diagnostic access to D-cache tag RAM */ 70 71 #define ASI_INTR_DISPATCH_STATUS 0x48 /* [4u] interrupt dispatch status register */ 72 #define ASI_INTR_RECEIVE 0x49 /* [4u] interrupt receive status register */ 73 #define ASI_MID_REG 0x4a /* [4u] hardware config and MID */ 74 #define ASI_ERROR_EN_REG 0x4b /* [4u] asynchronous error enables */ 75 #define ASI_AFSR 0x4c /* [4u] asynchronous fault status register */ 76 #define ASI_AFAR 0x4d /* [4u] asynchronous fault address register */ 77 78 #define ASI_ICACHE_DATA 0x66 /* [4u] diagnostic access to D-cache data RAM */ 79 #define ASI_ICACHE_TAG 0x67 /* [4u] diagnostic access to D-cache tag RAM */ 80 #define ASI_FLUSH_I_PAGE_PRIMARY 0x68 /* [4u] flush D-cache page using primary context */ 81 #define ASI_FLUSH_I_PAGE_SECONDARY 0x69 /* [4u] flush D-cache page using secondary context */ 82 #define ASI_FLUSH_I_CTX_PRIMARY 0x6a /* [4u] flush D-cache context using primary context */ 83 #define ASI_FLUSH_I_CTX_SECONDARY 0x6b /* [4u] flush D-cache context using secondary context */ 84 85 #define ASI_BLOCK_AS_IF_USER_PRIMARY 0x70 /* [4u] primary user address space, block loads/stores */ 86 #define ASI_BLOCK_AS_IF_USER_SECONDARY 0x71 /* [4u] secondary user address space, block loads/stores */ 87 88 #define ASI_ECACHE_DIAG 0x76 /* [4u] diag access to E-cache tag and data */ 89 #define ASI_DATAPATH_ERR_REG_WRITE 0x77 /* [4u] ASI is reused */ 90 91 #define ASI_BLOCK_AS_IF_USER_PRIMARY_LITTLE 0x78 /* [4u] primary user address space, block loads/stores */ 92 #define ASI_BLOCK_AS_IF_USER_SECONDARY_LITTLE 0x79 /* [4u] secondary user address space, block loads/stores */ 93 94 #define ASI_INTERRUPT_RECEIVE_DATA 0x7f /* [4u] interrupt receive data registers {0,1,2} */ 95 #define ASI_DATAPATH_ERR_REG_READ 0x7f /* [4u] read access to datapath error registers (ASI reused) */ 96 97 #define ASI_PRIMARY 0x80 /* [4u] primary address space */ 98 #define ASI_SECONDARY 0x81 /* [4u] secondary address space */ 99 #define ASI_PRIMARY_NOFAULT 0x82 /* [4u] primary address space, no fault */ 100 #define ASI_SECONDARY_NOFAULT 0x83 /* [4u] secondary address space, no fault */ 101 102 #define ASI_PRIMARY_LITTLE 0x88 /* [4u] primary address space, little endian */ 103 #define ASI_SECONDARY_LITTLE 0x89 /* [4u] secondary address space, little endian */ 104 #define ASI_PRIMARY_NOFAULT_LITTLE 0x8a /* [4u] primary address space, no fault, little endian */ 105 #define ASI_SECONDARY_NOFAULT_LITTLE 0x8b /* [4u] secondary address space, no fault, little endian */ 106 107 #define ASI_PST8_PRIMARY 0xc0 /* [VIS] Eight 8-bit partial store, primary */ 108 #define ASI_PST8_SECONDARY 0xc1 /* [VIS] Eight 8-bit partial store, secondary */ 109 #define ASI_PST16_PRIMARY 0xc2 /* [VIS] Four 16-bit partial store, primary */ 110 #define ASI_PST16_SECONDARY 0xc3 /* [VIS] Fout 16-bit partial store, secondary */ 111 #define ASI_PST32_PRIMARY 0xc4 /* [VIS] Two 32-bit partial store, primary */ 112 #define ASI_PST32_SECONDARY 0xc5 /* [VIS] Two 32-bit partial store, secondary */ 113 114 #define ASI_PST8_PRIMARY_LITTLE 0xc8 /* [VIS] Eight 8-bit partial store, primary, little endian */ 115 #define ASI_PST8_SECONDARY_LITTLE 0xc9 /* [VIS] Eight 8-bit partial store, secondary, little endian */ 116 #define ASI_PST16_PRIMARY_LITTLE 0xca /* [VIS] Four 16-bit partial store, primary, little endian */ 117 #define ASI_PST16_SECONDARY_LITTLE 0xcb /* [VIS] Fout 16-bit partial store, secondary, little endian */ 118 #define ASI_PST32_PRIMARY_LITTLE 0xcc /* [VIS] Two 32-bit partial store, primary, little endian */ 119 #define ASI_PST32_SECONDARY_LITTLE 0xcd /* [VIS] Two 32-bit partial store, secondary, little endian */ 120 121 #define ASI_FL8_PRIMARY 0xd0 /* [VIS] One 8-bit load/store floating, primary */ 122 #define ASI_FL8_SECONDARY 0xd1 /* [VIS] One 8-bit load/store floating, secondary */ 123 #define ASI_FL16_PRIMARY 0xd2 /* [VIS] One 16-bit load/store floating, primary */ 124 #define ASI_FL16_SECONDARY 0xd3 /* [VIS] One 16-bit load/store floating, secondary */ 125 126 #define ASI_FL8_PRIMARY_LITTLE 0xd8 /* [VIS] One 8-bit load/store floating, primary, little endian */ 127 #define ASI_FL8_SECONDARY_LITTLE 0xd9 /* [VIS] One 8-bit load/store floating, secondary, little endian */ 128 #define ASI_FL16_PRIMARY_LITTLE 0xda /* [VIS] One 16-bit load/store floating, primary, little endian */ 129 #define ASI_FL16_SECONDARY_LITTLE 0xdb /* [VIS] One 16-bit load/store floating, secondary, little endian */ 130 131 #define ASI_BLOCK_COMMIT_PRIMARY 0xe0 /* [4u] block store with commit, primary */ 132 #define ASI_BLOCK_COMMIT_SECONDARY 0xe1 /* [4u] block store with commit, secondary */ 133 #define ASI_BLOCK_PRIMARY 0xf0 /* [4u] block load/store, primary */ 134 #define ASI_BLOCK_SECONDARY 0xf1 /* [4u] block load/store, secondary */ 135 #define ASI_BLOCK_PRIMARY_LITTLE 0xf8 /* [4u] block load/store, primary, little endian */ 136 #define ASI_BLOCK_SECONDARY_LITTLE 0xf9 /* [4u] block load/store, secondary, little endian */ 137 138 139 /* 140 * These are the shorter names used by Solaris 141 */ 142 143 #define ASI_N ASI_NUCLEUS 144 #define ASI_NL ASI_NUCLEUS_LITTLE 145 #define ASI_AIUP ASI_AS_IF_USER_PRIMARY 146 #define ASI_AIUS ASI_AS_IF_USER_SECONDARY 147 #define ASI_AIUPL ASI_AS_IF_USER_PRIMARY_LITTLE 148 #define ASI_AIUSL ASI_AS_IF_USER_SECONDARY_LITTLE 149 #define ASI_P ASI_PRIMARY 150 #define ASI_S ASI_SECONDARY 151 #define ASI_PNF ASI_PRIMARY_NOFAULT 152 #define ASI_SNF ASI_SECONDARY_NOFAULT 153 #define ASI_PL ASI_PRIMARY_LITTLE 154 #define ASI_SL ASI_SECONDARY_LITTLE 155 #define ASI_PNFL ASI_PRIMARY_NOFAULT_LITTLE 156 #define ASI_SNFL ASI_SECONDARY_NOFAULT_LITTLE 157 #define ASI_FL8_P ASI_FL8_PRIMARY 158 #define ASI_FL8_S ASI_FL8_SECONDARY 159 #define ASI_FL16_P ASI_FL16_PRIMARY 160 #define ASI_FL16_S ASI_FL16_SECONDARY 161 #define ASI_FL8_PL ASI_FL8_PRIMARY_LITTLE 162 #define ASI_FL8_SL ASI_FL8_SECONDARY_LITTLE 163 #define ASI_FL16_PL ASI_FL16_PRIMARY_LITTLE 164 #define ASI_FL16_SL ASI_FL16_SECONDARY_LITTLE 165 #define ASI_BLK_AIUP ASI_BLOCK_AS_IF_USER_PRIMARY 166 #define ASI_BLK_AIUPL ASI_BLOCK_AS_IF_USER_PRIMARY_LITTLE 167 #define ASI_BLK_AIUS ASI_BLOCK_AS_IF_USER_SECONDARY 168 #define ASI_BLK_AIUSL ASI_BLOCK_AS_IF_USER_SECONDARY_LITTLE 169 #define ASI_BLK_COMMIT_P ASI_BLOCK_COMMIT_PRIMARY 170 #define ASI_BLK_COMMIT_PRIMARY ASI_BLOCK_COMMIT_PRIMARY 171 #define ASI_BLK_COMMIT_S ASI_BLOCK_COMMIT_SECONDARY 172 #define ASI_BLK_COMMIT_SECONDARY ASI_BLOCK_COMMIT_SECONDARY 173 #define ASI_BLK_P ASI_BLOCK_PRIMARY 174 #define ASI_BLK_PL ASI_BLOCK_PRIMARY_LITTLE 175 #define ASI_BLK_S ASI_BLOCK_SECONDARY 176 #define ASI_BLK_SL ASI_BLOCK_SECONDARY_LITTLE 177 178 /* Alternative spellings */ 179 #define ASI_PRIMARY_NO_FAULT ASI_PRIMARY_NOFAULT 180 #define ASI_PRIMARY_NO_FAULT_LITTLE ASI_PRIMARY_NOFAULT_LITTLE 181 #define ASI_SECONDARY_NO_FAULT ASI_SECONDARY_NOFAULT 182 #define ASI_SECONDARY_NO_FAULT_LITTLE ASI_SECONDARY_NOFAULT_LITTLE 183 184 #define PHYS_ASI(x) (((x) | 0x09) == 0x1d) 185 #define LITTLE_ASI(x) ((x) & ASI_LITTLE) 186 187 /* 188 * The following are 4u control registers 189 */ 190 191 /* Get the CPU's UPAID */ 192 #define UPA_CR_MID_SHIFT (17) 193 #define UPA_CR_MID_SIZE (5) 194 #define UPA_CR_MID_MASK \ 195 (((1 << UPA_CR_MID_SIZE) - 1) << UPA_CR_MID_SHIFT) 196 197 #define UPA_CR_MID(x) (((x)>>UPA_CR_MID_SHIFT)&((1 << UPA_CR_MID_SIZE) - 1)) 198 199 #ifdef _LOCORE 200 201 #define UPA_GET_MID(r1) \ 202 ldxa [%g0] ASI_MID_REG, r1 ; \ 203 srlx r1, UPA_CR_MID_SHIFT, r1 ; \ 204 and r1, (1 << UPA_CR_MID_SIZE) - 1, r1 205 206 #else 207 #define CPU_UPAID UPA_CR_MID(ldxa(0, ASI_MID_REG)) 208 #endif 209 210 /* 211 * [4u] MMU and Cache Control Register (MCCR) 212 * use ASI = 0x45 213 */ 214 #define ASI_MCCR ASI_LSU_CONTROL_REGISTER 215 #define MCCR 0x00 216 217 /* MCCR Bits and their meanings */ 218 #define MCCR_DMMU_EN 0x08 219 #define MCCR_IMMU_EN 0x04 220 #define MCCR_DCACHE_EN 0x02 221 #define MCCR_ICACHE_EN 0x01 222 223 224 /* 225 * MMU control registers 226 */ 227 228 /* Choose an MMU */ 229 #define ASI_DMMU 0x58 230 #define ASI_IMMU 0x50 231 232 /* Other assorted MMU ASIs */ 233 #define ASI_IMMU_8KPTR 0x51 234 #define ASI_IMMU_64KPTR 0x52 235 #define ASI_IMMU_DATA_IN 0x54 236 #define ASI_IMMU_TLB_DATA 0x55 237 #define ASI_IMMU_TLB_TAG 0x56 238 #define ASI_DMMU_8KPTR 0x59 239 #define ASI_DMMU_64KPTR 0x5a 240 #define ASI_DMMU_DATA_IN 0x5c 241 #define ASI_DMMU_TLB_DATA 0x5d 242 #define ASI_DMMU_TLB_TAG 0x5e 243 244 /* 245 * The following are the control registers 246 * They work on both MMUs unless noted. 247 * 248 * Register contents are defined later on individual registers. 249 */ 250 #define TSB_TAG_TARGET 0x0 251 #define TLB_DATA_IN 0x0 252 #define CTX_PRIMARY 0x08 /* primary context -- DMMU only */ 253 #define CTX_SECONDARY 0x10 /* secondary context -- DMMU only */ 254 #define SFSR 0x18 255 #define SFAR 0x20 /* fault address -- DMMU only */ 256 #define TSB 0x28 257 #define TLB_TAG_ACCESS 0x30 258 #define VIRTUAL_WATCHPOINT 0x38 259 #define PHYSICAL_WATCHPOINT 0x40 260 261 /* Tag Target bits */ 262 #define TAG_TARGET_VA_MASK 0x03ffffffffffffffffLL 263 #define TAG_TARGET_VA(x) (((x)<<22)&TAG_TARGET_VA_MASK) 264 #define TAG_TARGET_CONTEXT(x) ((x)>>48) 265 #define TAG_TARGET(c,v) ((((uint64_t)c)<<48)|(((uint64_t)v)&TAG_TARGET_VA_MASK)) 266 267 /* SFSR bits for both D_SFSR and I_SFSR */ 268 #define SFSR_ASI(x) ((x)>>16) 269 #define SFSR_FT_VA_OOR_2 0x02000 /* IMMU: jumpl or return to unsupportd VA */ 270 #define SFSR_FT_VA_OOR_1 0x01000 /* fault at unsupported VA */ 271 #define SFSR_FT_NFO 0x00800 /* DMMU: Access to page marked NFO */ 272 #define SFSR_ILL_ASI 0x00400 /* DMMU: Illegal (unsupported) ASI */ 273 #define SFSR_FT_IO_ATOMIC 0x00200 /* DMMU: Atomic access to noncacheable page */ 274 #define SFSR_FT_ILL_NF 0x00100 /* DMMU: NF load or flush to page marked E (has side effects) */ 275 #define SFSR_FT_PRIV 0x00080 /* Privilege violation */ 276 #define SFSR_FT_E 0x00040 /* DMUU: value of E bit associated address */ 277 #define SFSR_CTXT(x) (((x)>>4)&0x3) 278 #define SFSR_CTXT_IS_PRIM(x) (SFSR_CTXT(x)==0x00) 279 #define SFSR_CTXT_IS_SECOND(x) (SFSR_CTXT(x)==0x01) 280 #define SFSR_CTXT_IS_NUCLEUS(x) (SFSR_CTXT(x)==0x02) 281 #define SFSR_PRIV 0x00008 /* value of PSTATE.PRIV for faulting access */ 282 #define SFSR_W 0x00004 /* DMMU: attempted write */ 283 #define SFSR_OW 0x00002 /* Overwrite; prev vault was still valid */ 284 #define SFSR_FV 0x00001 /* Fault is valid */ 285 #define SFSR_FT (SFSR_FT_VA_OOR_2|SFSR_FT_VA_OOR_1|SFSR_FT_NFO| \ 286 SFSR_ILL_ASI|SFSR_FT_IO_ATOMIC|SFSR_FT_ILL_NF|SFSR_FT_PRIV) 287 288 #define SFSR_BITS "\177\20" \ 289 "f\20\30ASI\0" "b\16VAT\0" "b\15VAD\0" "b\14NFO\0" "b\13ASI\0" "b\12A\0" \ 290 "b\11NF\0" "b\10PRIV\0" "b\7E\0" "b\6NUCLEUS\0" "b\5SECONDCTX\0" "b\4PRIV\0" \ 291 "b\3W\0" "b\2OW\0" "b\1FV\0" 292 293 /* ASFR bits */ 294 #define ASFR_ME 0x100000000LL 295 #define ASFR_PRIV 0x080000000LL 296 #define ASFR_ISAP 0x040000000LL 297 #define ASFR_ETP 0x020000000LL 298 #define ASFR_IVUE 0x010000000LL 299 #define ASFR_TO 0x008000000LL 300 #define ASFR_BERR 0x004000000LL 301 #define ASFR_LDP 0x002000000LL 302 #define ASFR_CP 0x001000000LL 303 #define ASFR_WP 0x000800000LL 304 #define ASFR_EDP 0x000400000LL 305 #define ASFR_UE 0x000200000LL 306 #define ASFR_CE 0x000100000LL 307 #define ASFR_ETS 0x0000f0000LL 308 #define ASFT_P_SYND 0x00000ffffLL 309 310 #define AFSR_BITS "\177\20" \ 311 "b\40ME\0" "b\37PRIV\0" "b\36ISAP\0" "b\35ETP\0" \ 312 "b\34IVUE\0" "b\33TO\0" "b\32BERR\0" "b\31LDP\0" \ 313 "b\30CP\0" "b\27WP\0" "b\26EDP\0" "b\25UE\0" \ 314 "b\24CE\0" "f\20\4ETS\0" "f\0\20P_SYND\0" 315 316 /* 317 * Here's the spitfire TSB control register bits. 318 * 319 * Each TSB entry is 16-bytes wide. The TSB must be size aligned 320 */ 321 #define TSB_SIZE_512 0x0 /* 8kB, etc. */ 322 #define TSB_SIZE_1K 0x01 323 #define TSB_SIZE_2K 0x02 324 #define TSB_SIZE_4K 0x03 325 #define TSB_SIZE_8K 0x04 326 #define TSB_SIZE_16K 0x05 327 #define TSB_SIZE_32K 0x06 328 #define TSB_SIZE_64K 0x07 329 #define TSB_SPLIT 0x1000 330 #define TSB_BASE 0xffffffffffffe000 331 332 /* TLB Tag Access bits */ 333 #define TLB_TAG_ACCESS_VA 0xffffffffffffe000 334 #define TLB_TAG_ACCESS_CTX 0x0000000000001fff 335 336 /* 337 * TLB demap registers. TTEs are defined in v9pte.h 338 * 339 * Use the address space to select between IMMU and DMMU. 340 * The address of the register selects which context register 341 * to read the ASI from. 342 * 343 * The data stored in the register is interpreted as the VA to 344 * use. The DEMAP_CTX_<> registers ignore the address and demap the 345 * entire ASI. 346 * 347 */ 348 #define ASI_IMMU_DEMAP 0x57 /* [4u] IMMU TLB demap */ 349 #define ASI_DMMU_DEMAP 0x5f /* [4u] IMMU TLB demap */ 350 351 #define DEMAP_PAGE_NUCLEUS ((0x02)<<4) /* Demap page from kernel AS */ 352 #define DEMAP_PAGE_PRIMARY ((0x00)<<4) /* Demap a page from primary CTXT */ 353 #define DEMAP_PAGE_SECONDARY ((0x01)<<4) /* Demap page from secondary CTXT (DMMU only) */ 354 #define DEMAP_CTX_NUCLEUS ((0x06)<<4) /* Demap all of kernel CTXT */ 355 #define DEMAP_CTX_PRIMARY ((0x04)<<4) /* Demap all of primary CTXT */ 356 #define DEMAP_CTX_SECONDARY ((0x05)<<4) /* Demap all of secondary CTXT */ 357 358 /* 359 * Interrupt registers. This really gets hairy. 360 */ 361 362 /* IRSR -- Interrupt Receive Status Ragister */ 363 #define ASI_IRSR 0x49 364 #define IRSR 0x00 365 #define IRSR_BUSY 0x020 366 #define IRSR_MID(x) (x&0x1f) 367 368 /* IRDR -- Interrupt Receive Data Registers */ 369 #define ASI_IRDR 0x7f 370 #define IRDR_0H 0x40 371 #define IRDR_0L 0x48 /* unimplemented */ 372 #define IRDR_1H 0x50 373 #define IRDR_1L 0x58 /* unimplemented */ 374 #define IRDR_2H 0x60 375 #define IRDR_2L 0x68 /* unimplemented */ 376 #define IRDR_3H 0x70 /* unimplemented */ 377 #define IRDR_3L 0x78 /* unimplemented */ 378 379 /* SOFTINT ASRs */ 380 #define SET_SOFTINT %asr20 /* Sets these bits */ 381 #define CLEAR_SOFTINT %asr21 /* Clears these bits */ 382 #define SOFTINT %asr22 /* Reads the register */ 383 #define TICK_CMPR %asr23 384 385 #define TICK_INT 0x01 /* level-14 clock tick */ 386 #define SOFTINT1 (0x1<<1) 387 #define SOFTINT2 (0x1<<2) 388 #define SOFTINT3 (0x1<<3) 389 #define SOFTINT4 (0x1<<4) 390 #define SOFTINT5 (0x1<<5) 391 #define SOFTINT6 (0x1<<6) 392 #define SOFTINT7 (0x1<<7) 393 #define SOFTINT8 (0x1<<8) 394 #define SOFTINT9 (0x1<<9) 395 #define SOFTINT10 (0x1<<10) 396 #define SOFTINT11 (0x1<<11) 397 #define SOFTINT12 (0x1<<12) 398 #define SOFTINT13 (0x1<<13) 399 #define SOFTINT14 (0x1<<14) 400 #define SOFTINT15 (0x1<<15) 401 402 /* Interrupt Dispatch -- usually reserved for cross-calls */ 403 #define ASR_IDSR 0x48 /* Interrupt dispatch status reg */ 404 #define IDSR 0x00 405 #define IDSR_NACK 0x02 406 #define IDSR_BUSY 0x01 407 408 #define ASI_INTERRUPT_DISPATCH 0x77 /* [4u] spitfire interrupt dispatch regs */ 409 410 /* Interrupt delivery initiation */ 411 #define IDCR(x) ((((uint64_t)(x)) << 14) | 0x70) 412 413 #define IDDR_0H 0x40 /* Store data to send in these regs */ 414 #define IDDR_0L 0x48 /* unimplemented */ 415 #define IDDR_1H 0x50 416 #define IDDR_1L 0x58 /* unimplemented */ 417 #define IDDR_2H 0x60 418 #define IDDR_2L 0x68 /* unimplemented */ 419 #define IDDR_3H 0x70 /* unimplemented */ 420 #define IDDR_3L 0x78 /* unimplemented */ 421 422 /* 423 * Error registers 424 */ 425 426 /* Since we won't try to fix async errs, we don't care about the bits in the regs */ 427 #define ASI_AFAR 0x4d /* Asynchronous fault address register */ 428 #define AFAR 0x00 429 #define ASI_AFSR 0x4c /* Asynchronous fault status register */ 430 #define AFSR 0x00 431 432 #define ASI_P_EER 0x4b /* Error enable register */ 433 #define P_EER 0x00 434 #define P_EER_ISAPEN 0x04 /* Enable fatal on ISAP */ 435 #define P_EER_NCEEN 0x02 /* Enable trap on uncorrectable errs */ 436 #define P_EER_CEEN 0x01 /* Enable trap on correctable errs */ 437 438 #define ASI_DATAPATH_READ 0x7f /* Read the regs */ 439 #define ASI_DATAPATH_WRITE 0x77 /* Write to the regs */ 440 #define P_DPER_0 0x00 /* Datapath err reg 0 */ 441 #define P_DPER_1 0x18 /* Datapath err reg 1 */ 442 #define P_DCR_0 0x20 /* Datapath control reg 0 */ 443 #define P_DCR_1 0x38 /* Datapath control reg 0 */ 444 445 446 /* From sparc64/asm.h which I think I'll deprecate since it makes bus.h a pain. */ 447 448 #ifndef _LOCORE 449 /* 450 * GCC __asm constructs for doing assembly stuff. 451 */ 452 453 /* 454 * ``Routines'' to load and store from/to alternate address space. 455 * The location can be a variable, the asi value (address space indicator) 456 * must be a constant. 457 * 458 * N.B.: You can put as many special functions here as you like, since 459 * they cost no kernel space or time if they are not used. 460 * 461 * These were static inline functions, but gcc screws up the constraints 462 * on the address space identifiers (the "n"umeric value part) because 463 * it inlines too late, so we have to use the funny valued-macro syntax. 464 */ 465 466 /* 467 * Apparently the definition of bypass ASIs is that they all use the 468 * D$ so we need to flush the D$ to make sure we don't get data pollution. 469 */ 470 471 #ifdef __arch64__ 472 static __inline u_char 473 lduba(paddr_t loc, int asi) 474 { 475 register unsigned int _lduba_v; 476 477 __asm volatile( 478 "wr %2, %%g0, %%asi; " 479 "lduba [%1]%%asi, %0 " 480 : "=r" (_lduba_v) 481 : "r" ((unsigned long)(loc)), "r" (asi)); 482 return (_lduba_v); 483 } 484 #else 485 static __inline u_char 486 lduba(paddr_t loc, int asi) 487 { 488 register unsigned int _lduba_v, _loc_hi, _pstate; 489 490 _loc_hi = (((uint64_t)loc)>>32); 491 if (PHYS_ASI(asi)) { 492 __asm volatile( 493 "wr %4,%%g0,%%asi; " 494 "sllx %3,32,%0; " 495 "rdpr %%pstate,%1; " 496 "or %0,%2,%0; " 497 "wrpr %1,8,%%pstate; " 498 "membar #Sync; " 499 "lduba [%0]%%asi,%0; " 500 "wrpr %1,0,%%pstate; " 501 "membar #Sync; " 502 "wr %%g0, 0x82, %%asi " 503 : "=&r" (_lduba_v), "=&r" (_pstate) 504 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 505 } else { 506 __asm volatile( 507 "wr %3,%%g0,%%asi; " 508 "sllx %2,32,%0; " 509 "or %0,%1,%0; " 510 "lduba [%0]%%asi,%0; " 511 "wr %%g0, 0x82, %%asi " 512 : "=&r" (_lduba_v) 513 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 514 } 515 return (_lduba_v); 516 } 517 #endif 518 519 #ifdef __arch64__ 520 /* load half-word from alternate address space */ 521 static __inline u_short 522 lduha(paddr_t loc, int asi) 523 { 524 register unsigned int _lduha_v; 525 526 __asm volatile( 527 "wr %2, %%g0, %%asi; " 528 "lduha [%1]%%asi, %0 " 529 : "=r" (_lduha_v) 530 : "r" ((unsigned long)(loc)), "r" (asi)); 531 return (_lduha_v); 532 } 533 #else 534 /* load half-word from alternate address space */ 535 static __inline u_short 536 lduha(paddr_t loc, int asi) { 537 register unsigned int _lduha_v, _loc_hi, _pstate; 538 539 _loc_hi = (((uint64_t)loc)>>32); 540 541 if (PHYS_ASI(asi)) { 542 __asm volatile( 543 "wr %4,%%g0,%%asi; " 544 "sllx %3,32,%0; " 545 "rdpr %%pstate,%1; " 546 "wrpr %1,8,%%pstate; " 547 "or %0,%2,%0; " 548 "membar #Sync; " 549 "lduha [%0]%%asi,%0; " 550 "wrpr %1,0,%%pstate; " 551 "membar #Sync; " 552 "wr %%g0, 0x82, %%asi " 553 : "=&r" (_lduha_v), "=&r" (_pstate) 554 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 555 } else { 556 __asm volatile( 557 "wr %3,%%g0,%%asi; " 558 "sllx %2,32,%0; " 559 "or %0,%1,%0; " 560 "lduha [%0]%%asi,%0; " 561 "wr %%g0, 0x82, %%asi " 562 : "=&r" (_lduha_v) 563 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 564 } 565 return (_lduha_v); 566 } 567 #endif 568 569 570 #ifdef __arch64__ 571 /* load unsigned int from alternate address space */ 572 static __inline u_int 573 lda(paddr_t loc, int asi) 574 { 575 register unsigned int _lda_v; 576 577 __asm volatile( 578 "wr %2,%%g0,%%asi; " 579 "lda [%1]%%asi,%0 " 580 : "=r" (_lda_v) 581 : "r" ((unsigned long)(loc)), "r" (asi)); 582 return (_lda_v); 583 } 584 585 /* load signed int from alternate address space */ 586 static __inline int 587 ldswa(paddr_t loc, int asi) 588 { 589 register int _lda_v; 590 591 __asm volatile( 592 "wr %2,%%g0,%%asi; " 593 "ldswa [%1]%%asi,%0; " 594 : "=r" (_lda_v) 595 : "r" ((unsigned long)(loc)), "r" (asi)); 596 return (_lda_v); 597 } 598 #else /* __arch64__ */ 599 /* load unsigned int from alternate address space */ 600 static __inline u_int 601 lda(paddr_t loc, int asi) 602 { 603 register unsigned int _lda_v, _loc_hi, _pstate; 604 605 _loc_hi = (((uint64_t)loc)>>32); 606 if (PHYS_ASI(asi)) { 607 __asm volatile( 608 "wr %4,%%g0,%%asi; " 609 "rdpr %%pstate,%1; " 610 "sllx %3,32,%0; " 611 "wrpr %1,8,%%pstate; " 612 "or %0,%2,%0; " 613 "membar #Sync; " 614 "lda [%0]%%asi,%0; " 615 "wrpr %1,0,%%pstate; " 616 "membar #Sync; " 617 "wr %%g0, 0x82, %%asi " 618 : "=&r" (_lda_v), "=&r" (_pstate) 619 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 620 } else { 621 __asm volatile( 622 "wr %3,%%g0,%%asi; " 623 "sllx %2,32,%0; " 624 "or %0,%1,%0; " 625 "lda [%0]%%asi,%0; " 626 "wr %%g0, 0x82, %%asi " 627 : "=&r" (_lda_v) 628 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 629 } 630 return (_lda_v); 631 } 632 633 /* load signed int from alternate address space */ 634 static __inline int 635 ldswa(paddr_t loc, int asi) 636 { 637 register int _lda_v, _loc_hi, _pstate; 638 639 _loc_hi = (((uint64_t)loc)>>32); 640 if (PHYS_ASI(asi)) { 641 __asm volatile( 642 "wr %4,%%g0,%%asi; " 643 "rdpr %%pstate,%1; " 644 "wrpr %1,8,%%pstate; " 645 "sllx %3,32,%0; " 646 " or %0,%2,%0; " 647 "membar #Sync; " 648 "ldswa [%0]%%asi,%0; " 649 "wrpr %1,0,%%pstate; " 650 "membar #Sync; " 651 "wr %%g0, 0x82, %%asi " 652 : "=&r" (_lda_v), "=&r" (_pstate) 653 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 654 } else { 655 __asm volatile( 656 "wr %3,%%g0,%%asi; " 657 "sllx %2,32,%0; " 658 "or %0,%1,%0; " 659 "ldswa [%0]%%asi,%0; " 660 "wr %%g0, 0x82, %%asi " 661 : "=&r" (_lda_v) 662 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 663 } 664 return (_lda_v); 665 } 666 #endif /* __arch64__ */ 667 668 #ifdef __arch64__ 669 /* load 64-bit int from alternate address space -- these should never be used */ 670 static __inline uint64_t 671 ldda(paddr_t loc, int asi) 672 { 673 register long long _lda_v; 674 675 __asm volatile( 676 "wr %2,%%g0,%%asi; " 677 "ldda [%1]%%asi,%0 " 678 : "=r" (_lda_v) 679 : "r" ((unsigned long)(loc)), "r" (asi)); 680 return (_lda_v); 681 } 682 #else 683 /* load 64-bit int from alternate address space */ 684 static __inline uint64_t 685 ldda(paddr_t loc, int asi) 686 { 687 register long long _lda_v, _loc_hi, _pstate; 688 689 _loc_hi = (((uint64_t)loc)>>32); 690 if (PHYS_ASI(asi)) { 691 __asm volatile( 692 "wr %4,%%g0,%%asi; " 693 "rdpr %%pstate,%1; " 694 "wrpr %1,8,%%pstate; " 695 "sllx %3,32,%0; " 696 "or %0,%2,%0; " 697 "membar #Sync; " 698 "ldda [%0]%%asi,%0; " 699 "wrpr %1,0,%%pstate; " 700 "membar #Sync; " 701 "wr %%g0, 0x82, %%asi " 702 : "=&r" (_lda_v), "=&r" (_pstate) 703 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 704 } else { 705 __asm volatile( 706 "wr %3,%%g0,%%asi; " 707 "sllx %2,32,%0; " 708 " or %0,%1,%0; " 709 "ldda [%0]%%asi,%0; " 710 "wr %%g0, 0x82, %%asi " 711 : "=&r" (_lda_v) 712 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 713 } 714 return (_lda_v); 715 } 716 #endif 717 718 719 #ifdef __arch64__ 720 /* native load 64-bit int from alternate address space w/64-bit compiler*/ 721 static __inline uint64_t 722 ldxa(paddr_t loc, int asi) 723 { 724 register unsigned long _lda_v; 725 726 __asm volatile( 727 "wr %2,%%g0,%%asi; " 728 "ldxa [%1]%%asi,%0 " 729 : "=r" (_lda_v) 730 : "r" ((unsigned long)(loc)), "r" (asi)); 731 return (_lda_v); 732 } 733 #else 734 /* native load 64-bit int from alternate address space w/32-bit compiler*/ 735 static __inline uint64_t 736 ldxa(paddr_t loc, int asi) 737 { 738 register unsigned long _ldxa_lo, _ldxa_hi, _loc_hi; 739 740 _loc_hi = (((uint64_t)loc)>>32); 741 if (PHYS_ASI(asi)) { 742 __asm volatile( 743 "wr %4,%%g0,%%asi; " 744 "rdpr %%pstate,%1; " 745 "sllx %3,32,%0; " 746 "wrpr %1,8,%%pstate; " 747 "or %0, %2, %0; " 748 "membar #Sync; " 749 "ldxa [%0]%%asi,%0; " 750 "wrpr %1,0,%%pstate; " 751 "membar #Sync; " 752 "srlx %0, 32, %1; " 753 "srl %0, 0, %0; " 754 "wr %%g0, 0x82, %%asi " 755 : "=&r" (_ldxa_lo), "=&r" (_ldxa_hi) 756 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 757 } else { 758 __asm volatile( 759 "wr %4,%%g0,%%asi; " 760 "sllx %3,32,%0; " 761 "or %0,%2,%0; " 762 "ldxa [%0]%%asi,%0; " 763 "srlx %0,32,%1; " 764 "srl %0, 0, %0; " 765 "wr %%g0, 0x82, %%asi " 766 : "=&r" (_ldxa_lo), "=&r" (_ldxa_hi) 767 : "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 768 } 769 return ((((int64_t)_ldxa_hi)<<32)|_ldxa_lo); 770 } 771 #endif 772 773 /* store byte to alternate address space */ 774 #ifdef __arch64__ 775 static __inline void 776 stba(paddr_t loc, int asi, u_char value) 777 { 778 __asm volatile( 779 "wr %2, %%g0, %%asi; " 780 "stba %0, [%1]%%asi " 781 : : "r" ((int)(value)), "r" ((unsigned long)(loc)), "r" (asi)); 782 } 783 #else 784 static __inline void 785 stba(paddr_t loc, int asi, u_char value) 786 { 787 register int _loc_hi, _pstate; 788 789 _loc_hi = (((uint64_t)loc)>>32); 790 if (PHYS_ASI(asi)) { 791 __asm volatile( 792 "wr %5,%%g0,%%asi; " 793 "sllx %4,32,%0; " 794 "rdpr %%pstate,%1; " 795 "or %3,%0,%0; " 796 "wrpr %1,8,%%pstate; " 797 "stba %2,[%0]%%asi; " 798 "wrpr %1,0,%%pstate; " 799 "membar #Sync; " 800 "wr %%g0, 0x82, %%asi " 801 : "=&r" (_loc_hi), "=&r" (_pstate) 802 : "r" ((int)(value)), "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 803 } else { 804 __asm volatile( 805 "wr %4,%%g0,%%asi; " 806 "sllx %3,32,%0; " 807 "or %2,%0,%0; " 808 "stba %1,[%0]%%asi; " 809 "wr %%g0, 0x82, %%asi " 810 : "=&r" (_loc_hi) 811 : "r" ((int)(value)), "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi)); 812 } 813 } 814 #endif 815 816 /* store half-word to alternate address space */ 817 #ifdef __arch64__ 818 static __inline void 819 stha(paddr_t loc, int asi, u_short value) 820 { 821 __asm volatile( 822 "wr %2,%%g0,%%asi; " 823 "stha %0,[%1]%%asi " 824 : : "r" ((int)(value)), "r" ((unsigned long)(loc)), 825 "r" (asi) : "memory"); 826 } 827 #else 828 static __inline void 829 stha(paddr_t loc, int asi, u_short value) 830 { 831 register int _loc_hi, _pstate; 832 833 _loc_hi = (((uint64_t)loc)>>32); 834 if (PHYS_ASI(asi)) { 835 __asm volatile( 836 "wr %5,%%g0,%%asi; " 837 "sllx %4,32,%0; " 838 "rdpr %%pstate,%1; " 839 "or %3,%0,%0; " 840 "wrpr %1,8,%%pstate; " 841 "stha %2,[%0]%%asi; " 842 "wrpr %1,0,%%pstate; " 843 "membar #Sync; " 844 "wr %%g0, 0x82, %%asi " 845 : "=&r" (_loc_hi), "=&r" (_pstate) 846 : "r" ((int)(value)), "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi) 847 : "memory"); 848 } else { 849 __asm volatile( 850 "wr %4,%%g0,%%asi; " 851 "sllx %3,32,%0; " 852 "or %2,%0,%0; " 853 "stha %1,[%0]%%asi; " 854 "wr %%g0, 0x82, %%asi " 855 : "=&r" (_loc_hi) 856 : "r" ((int)(value)), "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi) 857 : "memory"); 858 } 859 } 860 #endif 861 862 863 /* store int to alternate address space */ 864 #ifdef __arch64__ 865 static __inline void 866 sta(paddr_t loc, int asi, u_int value) 867 { 868 __asm volatile( 869 "wr %2,%%g0,%%asi; " 870 "sta %0,[%1]%%asi " 871 : : "r" ((int)(value)), "r" ((unsigned long)(loc)), 872 "r" (asi) : "memory"); 873 } 874 #else 875 static __inline void 876 sta(paddr_t loc, int asi, u_int value) 877 { 878 register int _loc_hi, _pstate; 879 880 _loc_hi = (((uint64_t)loc)>>32); 881 if (PHYS_ASI(asi)) { 882 __asm volatile( 883 "wr %5,%%g0,%%asi; " 884 "sllx %4,32,%0; " 885 "rdpr %%pstate,%1; " 886 "or %3,%0,%0; " 887 "wrpr %1,8,%%pstate; " 888 "sta %2,[%0]%%asi; " 889 "wrpr %1,0,%%pstate; " 890 "membar #Sync; " 891 "wr %%g0, 0x82, %%asi " 892 : "=&r" (_loc_hi), "=&r" (_pstate) 893 : "r" ((int)(value)), "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi) 894 : "memory"); 895 } else { 896 __asm volatile( 897 "wr %4,%%g0,%%asi; " 898 "sllx %3,32,%0; " 899 "or %2,%0,%0; " 900 "sta %1,[%0]%%asi; " 901 "wr %%g0, 0x82, %%asi " 902 : "=&r" (_loc_hi) 903 : "r" ((int)(value)), "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi) 904 : "memory"); 905 } 906 } 907 #endif 908 909 /* store 64-bit int to alternate address space */ 910 #ifdef __arch64__ 911 static __inline void 912 stda(paddr_t loc, int asi, uint64_t value) 913 { 914 __asm volatile( 915 "wr %2,%%g0,%%asi; " 916 "stda %0,[%1]%%asi " 917 : : "r" ((long long)(value)), "r" ((unsigned long)(loc)), "r" (asi) 918 : "memory"); 919 } 920 #else 921 static __inline void 922 stda(paddr_t loc, int asi, uint64_t value) 923 { 924 register int _loc_hi, _pstate; 925 926 _loc_hi = (((uint64_t)loc)>>32); 927 if (PHYS_ASI(asi)) { 928 __asm volatile( 929 "wr %5,%%g0,%%asi; " 930 "sllx %4,32,%0; " 931 "rdpr %%pstate,%1; " 932 "or %3,%0,%0; " 933 "wrpr %1,8,%%pstate; " 934 "stda %2,[%0]%%asi; " 935 "wrpr %1,0,%%pstate; " 936 "membar #Sync; " 937 "wr %%g0, 0x82, %%asi " 938 : "=&r" (_loc_hi), "=&r" (_pstate) 939 : "r" ((long long)(value)), "r" ((unsigned long)(loc)), 940 "r" (_loc_hi), "r" (asi) 941 : "memory"); 942 } else { 943 __asm volatile( 944 "wr %4,%%g0,%%asi; " 945 "sllx %3,32,%0; " 946 "or %2,%0,%0; " 947 "stda %1,[%0]%%asi; " 948 "wr %%g0, 0x82, %%asi " 949 : "=&r" (_loc_hi) 950 : "r" ((long long)(value)), "r" ((unsigned long)(loc)), 951 "r" (_loc_hi), "r" (asi) 952 : "memory"); 953 } 954 } 955 #endif 956 957 /* set dmmu secondary context */ 958 static __inline void 959 dmmu_set_secondary_context(uint ctx) 960 { 961 __asm volatile( 962 "stxa %0,[%1]%2; " 963 "membar #Sync " 964 : : "r" (ctx), 965 "r" (CTX_SECONDARY), "n" (ASI_DMMU) 966 : "memory"); 967 } 968 969 #ifdef __arch64__ 970 /* native store 64-bit int to alternate address space w/64-bit compiler*/ 971 static __inline void 972 stxa(paddr_t loc, int asi, uint64_t value) 973 { 974 __asm volatile( 975 "wr %2,%%g0,%%asi; " 976 "stxa %0,[%1]%%asi " 977 : : "r" ((unsigned long)(value)), 978 "r" ((unsigned long)(loc)), "r" (asi) 979 : "memory"); 980 } 981 #else 982 /* native store 64-bit int to alternate address space w/32-bit compiler*/ 983 static __inline void 984 stxa(paddr_t loc, int asi, uint64_t value) 985 { 986 int _stxa_lo, _stxa_hi, _loc_hi; 987 988 _stxa_lo = value; 989 _stxa_hi = ((uint64_t)value)>>32; 990 _loc_hi = (((uint64_t)loc)>>32); 991 992 if (PHYS_ASI(asi)) { 993 __asm volatile( 994 "wr %7,%%g0,%%asi; " 995 "sllx %4,32,%1; " 996 "sllx %6,32,%0; " 997 "or %1,%3,%1; " 998 "rdpr %%pstate,%2; " 999 "or %0,%5,%0; " 1000 "wrpr %2,8,%%pstate; " 1001 "stxa %1,[%0]%%asi; " 1002 "wrpr %2,0,%%pstate; " 1003 "membar #Sync; " 1004 "wr %%g0, 0x82, %%asi " 1005 : "=&r" (_loc_hi), "=&r" (_stxa_hi), "=&r" ((int)(_stxa_lo)) 1006 : "r" ((int)(_stxa_lo)), "r" ((int)(_stxa_hi)), 1007 "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi) 1008 : "memory"); 1009 } else { 1010 __asm volatile( 1011 "wr %6,%%g0,%%asi; " 1012 "sllx %3,32,%1; " 1013 "sllx %5,32,%0; " 1014 "or %1,%2,%1; " 1015 "or %0,%4,%0; " 1016 "stxa %1,[%0]%%asi; " 1017 "wr %%g0, 0x82, %%asi " 1018 : "=&r" (_loc_hi), "=&r" (_stxa_hi) 1019 : "r" ((int)(_stxa_lo)), "r" ((int)(_stxa_hi)), 1020 "r" ((unsigned long)(loc)), "r" (_loc_hi), "r" (asi) 1021 : "memory"); 1022 } 1023 } 1024 #endif 1025 1026 #ifdef __arch64__ 1027 /* native store 32-bit int to alternate address space w/64-bit compiler*/ 1028 static __inline uint32_t 1029 casa(paddr_t loc, int asi, uint32_t value, uint32_t oldvalue) 1030 { 1031 __asm volatile( 1032 "wr %3,%%g0,%%asi; " 1033 "casa [%1]%%asi,%2,%0 " 1034 : "+r" (value) 1035 : "r" ((unsigned long)(loc)), "r" (oldvalue), "r" (asi) 1036 : "memory"); 1037 return (value); 1038 } 1039 /* native store 64-bit int to alternate address space w/64-bit compiler*/ 1040 static __inline uint64_t 1041 casxa(paddr_t loc, int asi, uint64_t value, uint64_t oldvalue) 1042 { 1043 __asm volatile( 1044 "wr %3,%%g0,%%asi; " 1045 "casxa [%1]%%asi,%2,%0 " 1046 : "+r" (value) 1047 : "r" ((unsigned long)(loc)), "r" (oldvalue), "r" (asi) 1048 : "memory"); 1049 return (value); 1050 } 1051 #else 1052 #if 0 1053 /* native store 64-bit int to alternate address space w/32-bit compiler*/ 1054 static __inline uint64_t 1055 casxa(paddr_t loc, int asi, uint64_t value, uint64_t oldvalue) 1056 { 1057 int _casxa_lo, _casxa_hi, _loc_hi, _oval_hi; 1058 1059 _casxa_lo = value; 1060 _casxa_hi = ((uint64_t)value)>>32; 1061 _oval_hi = ((uint64_t)oldvalue)>>32; 1062 _loc_hi = (((uint64_t)loc)>>32); 1063 1064 #ifdef __notyet 1065 /* 1066 * gcc cannot handle this since it thinks it has >10 asm operands. 1067 */ 1068 if (PHYS_ASI(asi)) { 1069 __asm volatile( 1070 "wr %6,%%g0,%%asi; " 1071 "sllx %1,32,%1; " 1072 "rdpr %%pstate,%2; " 1073 "sllx %0,32,%0; " 1074 "or %1,%2,%1; " 1075 "sllx %3,32,%3; " 1076 "or %0,%4,%0; " 1077 "or %3,%5,%3; " 1078 "wrpr %2,8,%%pstate; " 1079 "casxa [%0]%%asi,%3,%1; " 1080 "wrpr %2,0,%%pstate; " 1081 "andn %0,0x1f,%3; " 1082 "membar #Sync; " 1083 "sll %1,0,%2; " 1084 "srax %1,32,%1; " 1085 "wr %%g0, 0x82, %%asi " 1086 : "+r" (_loc_hi), "+r" (_casxa_hi), "+r" (_casxa_lo), "+r" (_oval_hi) 1087 : "r" ((unsigned long)(loc)), "r" ((unsigned int)(oldvalue)), 1088 "r" (asi) 1089 : "memory"); 1090 } else { 1091 __asm volatile( 1092 "wr %7,%%g0,%%asi; " 1093 "sllx %1,32,%1; " 1094 "sllx %5,32,%0; " 1095 "or %1,%2,%1; " 1096 "sllx %3,32,%2; " 1097 "or %0,%4,%0; " 1098 "or %2,%4,%2; " 1099 "casxa [%0]%%asi,%2,%1; " 1100 "sll %1,0,%2; " 1101 "srax %o1,32,%o1; " 1102 "wr %%g0, 0x82, %%asi " 1103 : "=&r" (_loc_hi), "+r" (_casxa_hi), "+r" (_casxa_lo) 1104 : "r" ((int)(_oval_hi)), "r" ((int)(oldvalue)), 1105 "r" ((unsigned long)(loc)), "r" (_loc_hi), 1106 "r" (asi) 1107 : "memory"); 1108 } 1109 #endif 1110 return (((uint64_t)_casxa_hi<<32)|(uint64_t)_casxa_lo); 1111 } 1112 #endif 1113 #endif 1114 1115 /* flush address from data cache */ 1116 #define flush(loc) ({ \ 1117 __asm volatile("flush %0" : : \ 1118 "r" ((unsigned long)(loc))); \ 1119 }) 1120 1121 /* Flush a D$ line */ 1122 #if 0 1123 #define flushline(loc) ({ \ 1124 stxa(((paddr_t)loc)&(~0x1f), (ASI_DCACHE_TAG), 0); \ 1125 membar_sync(); \ 1126 }) 1127 #endif 1128 1129 /* The following two enable or disable the dcache in the LSU control register */ 1130 #define dcenable() ({ \ 1131 int res; \ 1132 __asm volatile("ldxa [%%g0] %1,%0; or %0,%2,%0; stxa %0,[%%g0] %1; membar #Sync" \ 1133 : "r" (res) : "n" (ASI_MCCR), "n" (MCCR_DCACHE_EN)); \ 1134 }) 1135 #define dcdisable() ({ \ 1136 int res; \ 1137 __asm volatile("ldxa [%%g0] %1,%0; andn %0,%2,%0; stxa %0,[%%g0] %1; membar #Sync" \ 1138 : "r" (res) : "n" (ASI_MCCR), "n" (MCCR_DCACHE_EN)); \ 1139 }) 1140 1141 /* 1142 * SPARC V9 memory barrier instructions. 1143 */ 1144 /* Make all stores complete before next store */ 1145 #define membar_storestore() __asm volatile("membar #StoreStore" : :) 1146 /* Make all loads complete before next store */ 1147 #define membar_loadstore() __asm volatile("membar #LoadStore" : :) 1148 /* Make all stores complete before next load */ 1149 #define membar_storeload() __asm volatile("membar #StoreLoad" : :) 1150 /* Make all loads complete before next load */ 1151 #define membar_loadload() __asm volatile("membar #LoadLoad" : :) 1152 /* Complete all outstanding memory operations and exceptions */ 1153 #define membar_sync() __asm volatile("membar #Sync" : :) 1154 /* Complete all outstanding memory operations */ 1155 #define membar_memissue() __asm volatile("membar #MemIssue" : :) 1156 /* Complete all outstanding stores before any new loads */ 1157 #define membar_lookaside() __asm volatile("membar #Lookaside" : :) 1158 1159 #define membar_load() __asm volatile("membar #LoadLoad | #LoadStore" : :) 1160 #define membar_store() __asm volatile("membar #LoadStore | #StoreStore" : :) 1161 1162 #ifdef __arch64__ 1163 /* read 64-bit %tick register */ 1164 #define tick() ({ \ 1165 register u_long _tick_tmp; \ 1166 __asm volatile("rdpr %%tick, %0" : "=r" (_tick_tmp) :); \ 1167 _tick_tmp; \ 1168 }) 1169 #else 1170 /* read 64-bit %tick register on 32-bit system */ 1171 #define tick() ({ \ 1172 register u_int _tick_hi = 0, _tick_lo = 0; \ 1173 __asm volatile("rdpr %%tick, %0; srl %0,0,%1; srlx %0,32,%0 " \ 1174 : "=r" (_tick_hi), "=r" (_tick_lo) : ); \ 1175 (((uint64_t)_tick_hi)<<32)|((uint64_t)_tick_lo); \ 1176 }) 1177 #endif 1178 1179 extern void next_tick(long); 1180 #endif 1181 1182 #endif /* _SPARC_CTLREG_H_ */ 1183