1 /* $NetBSD: cpu.h,v 1.100 2018/01/16 08:23:17 mrg Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94 41 */ 42 43 #ifndef _CPU_H_ 44 #define _CPU_H_ 45 46 /* 47 * CTL_MACHDEP definitions. 48 */ 49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */ 50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */ 51 #define CPU_BOOT_ARGS 3 /* string: args booted with */ 52 #define CPU_ARCH 4 /* integer: cpu architecture version */ 53 #define CPU_MAXID 5 /* number of valid machdep ids */ 54 55 /* 56 * Exported definitions unique to SPARC cpu support. 57 */ 58 59 /* 60 * Sun-4 and Sun-4c virtual address cache. 61 * 62 * Sun-4 virtual caches come in two flavors, write-through (Sun-4c) 63 * and write-back (Sun-4). The write-back caches are much faster 64 * but require a bit more care. 65 * 66 * This is exported via sysctl so be careful changing it. 67 */ 68 enum vactype { VAC_UNKNOWN, VAC_NONE, VAC_WRITETHROUGH, VAC_WRITEBACK }; 69 70 /* 71 * Cache control information. 72 * 73 * This is exported via sysctl so be careful changing it. 74 */ 75 76 struct cacheinfo { 77 int c_totalsize; /* total size, in bytes */ 78 /* if split, MAX(icache,dcache) */ 79 int c_enabled; /* true => cache is enabled */ 80 int c_hwflush; /* true => have hardware flush */ 81 int c_linesize; /* line size, in bytes */ 82 /* if split, MIN(icache,dcache) */ 83 int c_l2linesize; /* log2(linesize) */ 84 int c_nlines; /* precomputed # of lines to flush */ 85 int c_physical; /* true => cache has physical 86 address tags */ 87 int c_associativity; /* # of "buckets" in cache line */ 88 int c_split; /* true => cache is split */ 89 90 int ic_totalsize; /* instruction cache */ 91 int ic_enabled; 92 int ic_linesize; 93 int ic_l2linesize; 94 int ic_nlines; 95 int ic_associativity; 96 97 int dc_totalsize; /* data cache */ 98 int dc_enabled; 99 int dc_linesize; 100 int dc_l2linesize; 101 int dc_nlines; 102 int dc_associativity; 103 104 int ec_totalsize; /* external cache info */ 105 int ec_enabled; 106 int ec_linesize; 107 int ec_l2linesize; 108 int ec_nlines; 109 int ec_associativity; 110 111 enum vactype c_vactype; 112 113 int c_flags; 114 #define CACHE_PAGETABLES 0x1 /* caching pagetables OK on (sun4m) */ 115 #define CACHE_TRAPPAGEBUG 0x2 /* trap page can't be cached (sun4) */ 116 #define CACHE_MANDATORY 0x4 /* if cache is on, don't use 117 uncached access */ 118 }; 119 120 /* Things needed by crash or the kernel */ 121 #if defined(_KERNEL) || defined(_KMEMUSER) 122 123 #if defined(_KERNEL_OPT) 124 #include "opt_multiprocessor.h" 125 #include "opt_lockdebug.h" 126 #include "opt_sparc_arch.h" 127 #endif 128 129 #include <sys/cpu_data.h> 130 #include <sys/evcnt.h> 131 132 #include <machine/intr.h> 133 #include <machine/psl.h> 134 135 #if defined(_KERNEL) 136 #include <sparc/sparc/cpuvar.h> 137 #include <sparc/sparc/intreg.h> 138 #endif 139 140 struct trapframe; 141 142 /* 143 * Message structure for Inter Processor Communication in MP systems 144 */ 145 struct xpmsg { 146 volatile int tag; 147 #define XPMSG15_PAUSECPU 1 148 #define XPMSG_FUNC 4 149 #define XPMSG_FTRP 5 150 151 volatile union { 152 /* 153 * Cross call: ask to run (*func)(arg0,arg1,arg2) 154 * or (*trap)(arg0,arg1,arg2). `trap' should be the 155 * address of a `fast trap' handler that executes in 156 * the trap window (see locore.s). 157 */ 158 struct xpmsg_func { 159 void (*func)(int, int, int); 160 void (*trap)(int, int, int); 161 int arg0; 162 int arg1; 163 int arg2; 164 } xpmsg_func; 165 } u; 166 volatile int received; 167 volatile int complete; 168 }; 169 170 /* 171 * The cpuinfo structure. This structure maintains information about one 172 * currently installed CPU (there may be several of these if the machine 173 * supports multiple CPUs, as on some Sun4m architectures). The information 174 * in this structure supersedes the old "cpumod", "mmumod", and similar 175 * fields. 176 */ 177 178 struct cpu_info { 179 struct cpu_data ci_data; /* MI per-cpu data */ 180 181 /* 182 * Primary Inter-processor message area. Keep this aligned 183 * to a cache line boundary if possible, as the structure 184 * itself is one or less (32/64 byte) cache-line. 185 */ 186 struct xpmsg msg __aligned(64); 187 188 /* Scheduler flags */ 189 int ci_want_ast; 190 int ci_want_resched; 191 192 /* 193 * SPARC cpu_info structures live at two VAs: one global 194 * VA (so each CPU can access any other CPU's cpu_info) 195 * and an alias VA CPUINFO_VA which is the same on each 196 * CPU and maps to that CPU's cpu_info. Since the alias 197 * CPUINFO_VA is how we locate our cpu_info, we have to 198 * self-reference the global VA so that we can return it 199 * in the curcpu() macro. 200 */ 201 struct cpu_info * volatile ci_self; 202 203 int ci_cpuid; /* CPU index (see cpus[] array) */ 204 205 /* Context administration */ 206 int *ctx_tbl; /* [4m] SRMMU-edible context table */ 207 paddr_t ctx_tbl_pa; /* [4m] ctx table physical address */ 208 209 /* Cache information */ 210 struct cacheinfo cacheinfo; /* see above */ 211 212 /* various flags to workaround anomalies in chips */ 213 volatile int flags; /* see CPUFLG_xxx, below */ 214 215 /* Per processor counter register (sun4m only) */ 216 volatile struct counter_4m *counterreg_4m; 217 218 /* Per processor interrupt mask register (sun4m only) */ 219 volatile struct icr_pi *intreg_4m; 220 /* 221 * Send a IPI to (cpi). For Ross cpus we need to read 222 * the pending register to avoid a hardware bug. 223 */ 224 #define raise_ipi(cpi,lvl) do { \ 225 volatile int x; \ 226 (cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl); \ 227 x = (cpi)->intreg_4m->pi_pend; __USE(x); \ 228 } while (0) 229 230 int sun4_mmu3l; /* [4]: 3-level MMU present */ 231 #if defined(SUN4_MMU3L) 232 #define HASSUN4_MMU3L (cpuinfo.sun4_mmu3l) 233 #else 234 #define HASSUN4_MMU3L (0) 235 #endif 236 int ci_idepth; /* Interrupt depth */ 237 238 /* 239 * The following pointers point to processes that are somehow 240 * associated with this CPU--running on it, using its FPU, 241 * etc. 242 */ 243 struct lwp *ci_curlwp; /* CPU owner */ 244 struct lwp *fplwp; /* FPU owner */ 245 246 int ci_mtx_count; 247 int ci_mtx_oldspl; 248 249 /* 250 * Idle PCB and Interrupt stack; 251 */ 252 void *eintstack; /* End of interrupt stack */ 253 #define INT_STACK_SIZE (128 * 128) /* 128 128-byte stack frames */ 254 void *redzone; /* DEBUG: stack red zone */ 255 #define REDSIZE (8*96) /* some room for bouncing */ 256 257 struct pcb *curpcb; /* CPU's PCB & kernel stack */ 258 259 /* locore defined: */ 260 void (*get_syncflt)(void); /* Not C-callable */ 261 int (*get_asyncflt)(u_int *, u_int *); 262 263 /* Synchronous Fault Status; temporary storage */ 264 struct { 265 int sfsr; 266 int sfva; 267 } syncfltdump; 268 269 /* 270 * Cache handling functions. 271 * Most cache flush function come in two flavours: one that 272 * acts only on the CPU it executes on, and another that 273 * uses inter-processor signals to flush the cache on 274 * all processor modules. 275 * The `ft_' versions are fast trap cache flush handlers. 276 */ 277 void (*cache_flush)(void *, u_int); 278 void (*vcache_flush_page)(int, int); 279 void (*sp_vcache_flush_page)(int, int); 280 void (*ft_vcache_flush_page)(int, int); 281 void (*vcache_flush_segment)(int, int, int); 282 void (*sp_vcache_flush_segment)(int, int, int); 283 void (*ft_vcache_flush_segment)(int, int, int); 284 void (*vcache_flush_region)(int, int); 285 void (*sp_vcache_flush_region)(int, int); 286 void (*ft_vcache_flush_region)(int, int); 287 void (*vcache_flush_context)(int); 288 void (*sp_vcache_flush_context)(int); 289 void (*ft_vcache_flush_context)(int); 290 291 /* The are helpers for (*cache_flush)() */ 292 void (*sp_vcache_flush_range)(int, int, int); 293 void (*ft_vcache_flush_range)(int, int, int); 294 295 void (*pcache_flush_page)(paddr_t, int); 296 void (*pure_vcache_flush)(void); 297 void (*cache_flush_all)(void); 298 299 /* Support for hardware-assisted page clear/copy */ 300 void (*zero_page)(paddr_t); 301 void (*copy_page)(paddr_t, paddr_t); 302 303 /* Virtual addresses for use in pmap copy_page/zero_page */ 304 void * vpage[2]; 305 int *vpage_pte[2]; /* pte location of vpage[] */ 306 307 void (*cache_enable)(void); 308 309 int cpu_type; /* Type: see CPUTYP_xxx below */ 310 311 /* Inter-processor message area (high priority but used infrequently) */ 312 struct xpmsg msg_lev15; 313 314 /* CPU information */ 315 int node; /* PROM node for this CPU */ 316 int mid; /* Module ID for MP systems */ 317 int mbus; /* 1 if CPU is on MBus */ 318 int mxcc; /* 1 if a MBus-level MXCC is present */ 319 const char *cpu_longname; /* CPU model */ 320 int cpu_impl; /* CPU implementation code */ 321 int cpu_vers; /* CPU version code */ 322 int mmu_impl; /* MMU implementation code */ 323 int mmu_vers; /* MMU version code */ 324 int master; /* 1 if this is bootup CPU */ 325 326 vaddr_t mailbox; /* VA of CPU's mailbox */ 327 328 int mmu_ncontext; /* Number of contexts supported */ 329 int mmu_nregion; /* Number of regions supported */ 330 int mmu_nsegment; /* [4/4c] Segments */ 331 int mmu_npmeg; /* [4/4c] Pmegs */ 332 333 /* XXX - we currently don't actually use the following */ 334 int arch; /* Architecture: CPU_SUN4x */ 335 int class; /* Class: SuperSPARC, microSPARC... */ 336 int classlvl; /* Iteration in class: 1, 2, etc. */ 337 int classsublvl; /* stepping in class (version) */ 338 339 int hz; /* Clock speed */ 340 341 /* FPU information */ 342 int fpupresent; /* true if FPU is present */ 343 int fpuvers; /* FPU revision */ 344 const char *fpu_name; /* FPU model */ 345 char fpu_namebuf[32];/* Buffer for FPU name, if necessary */ 346 347 /* XXX */ 348 volatile void *ci_ddb_regs; /* DDB regs */ 349 350 /* 351 * The following are function pointers to do interesting CPU-dependent 352 * things without having to do type-tests all the time 353 */ 354 355 /* bootup things: access to physical memory */ 356 u_int (*read_physmem)(u_int addr, int space); 357 void (*write_physmem)(u_int addr, u_int data); 358 void (*cache_tablewalks)(void); 359 void (*mmu_enable)(void); 360 void (*hotfix)(struct cpu_info *); 361 362 363 #if 0 364 /* hardware-assisted block operation routines */ 365 void (*hwbcopy)(const void *from, void *to, size_t len); 366 void (*hwbzero)(void *buf, size_t len); 367 368 /* routine to clear mbus-sbus buffers */ 369 void (*mbusflush)(void); 370 #endif 371 372 /* 373 * Memory error handler; parity errors, unhandled NMIs and other 374 * unrecoverable faults end up here. 375 */ 376 void (*memerr)(unsigned, u_int, u_int, struct trapframe *); 377 void (*idlespin)(struct cpu_info *); 378 /* Module Control Registers */ 379 /*bus_space_handle_t*/ long ci_mbusport; 380 /*bus_space_handle_t*/ long ci_mxccregs; 381 382 u_int ci_tt; /* Last trap (if tracing) */ 383 384 /* 385 * Start/End VA's of this cpu_info region; we upload the other pages 386 * in this region that aren't part of the cpu_info to uvm. 387 */ 388 vaddr_t ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2; 389 390 struct evcnt ci_savefpstate; 391 struct evcnt ci_savefpstate_null; 392 struct evcnt ci_xpmsg_mutex_fail; 393 struct evcnt ci_xpmsg_mutex_fail_call; 394 struct evcnt ci_xpmsg_mutex_not_held; 395 struct evcnt ci_xpmsg_bogus; 396 struct evcnt ci_intrcnt[16]; 397 struct evcnt ci_sintrcnt[16]; 398 }; 399 400 /* 401 * definitions of cpu-dependent requirements 402 * referenced in generic code 403 */ 404 #define cpuinfo (*(struct cpu_info *)CPUINFO_VA) 405 #define curcpu() (cpuinfo.ci_self) 406 #define curlwp (cpuinfo.ci_curlwp) 407 #define CPU_IS_PRIMARY(ci) ((ci)->master) 408 409 #define cpu_number() (cpuinfo.ci_cpuid) 410 411 #endif /* _KERNEL || _KMEMUSER */ 412 413 /* Kernel only things. */ 414 #if defined(_KERNEL) 415 void cpu_proc_fork(struct proc *, struct proc *); 416 417 #if defined(MULTIPROCESSOR) 418 void cpu_boot_secondary_processors(void); 419 #endif 420 421 /* 422 * Arguments to hardclock, softclock and statclock encapsulate the 423 * previous machine state in an opaque clockframe. The ipl is here 424 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr). 425 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false. 426 */ 427 struct clockframe { 428 u_int psr; /* psr before interrupt, excluding PSR_ET */ 429 u_int pc; /* pc at interrupt */ 430 u_int npc; /* npc at interrupt */ 431 u_int ipl; /* actual interrupt priority level */ 432 u_int fp; /* %fp at interrupt */ 433 }; 434 typedef struct clockframe clockframe; 435 436 extern int eintstack[]; 437 438 #define CLKF_USERMODE(framep) (((framep)->psr & PSR_PS) == 0) 439 #define CLKF_LOPRI(framep,n) (((framep)->psr & PSR_PIL) < (n) << 8) 440 #define CLKF_PC(framep) ((framep)->pc) 441 #if defined(MULTIPROCESSOR) 442 #define CLKF_INTR(framep) \ 443 ((framep)->fp > (u_int)cpuinfo.eintstack - INT_STACK_SIZE && \ 444 (framep)->fp < (u_int)cpuinfo.eintstack) 445 #else 446 #define CLKF_INTR(framep) ((framep)->fp < (u_int)eintstack) 447 #endif 448 449 void sparc_softintr_init(void); 450 451 /* 452 * Preempt the current process on the target CPU if in interrupt from 453 * user mode, or after the current trap/syscall if in system mode. 454 */ 455 #define cpu_need_resched(ci, flags) do { \ 456 __USE(flags); \ 457 (ci)->ci_want_resched = 1; \ 458 (ci)->ci_want_ast = 1; \ 459 \ 460 /* Just interrupt the target CPU, so it can notice its AST */ \ 461 if (((flags) & RESCHED_IMMED) || (ci)->ci_cpuid != cpu_number()) \ 462 XCALL0(sparc_noop, 1U << (ci)->ci_cpuid); \ 463 } while (/*CONSTCOND*/0) 464 465 /* 466 * Give a profiling tick to the current process when the user profiling 467 * buffer pages are invalid. On the sparc, request an ast to send us 468 * through trap(), marking the proc as needing a profiling tick. 469 */ 470 #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, cpuinfo.ci_want_ast = 1) 471 472 /* 473 * Notify the current process (p) that it has a signal pending, 474 * process as soon as possible. 475 */ 476 #define cpu_signotify(l) do { \ 477 (l)->l_cpu->ci_want_ast = 1; \ 478 \ 479 /* Just interrupt the target CPU, so it can notice its AST */ \ 480 if ((l)->l_cpu->ci_cpuid != cpu_number()) \ 481 XCALL0(sparc_noop, 1U << (l)->l_cpu->ci_cpuid); \ 482 } while (/*CONSTCOND*/0) 483 484 /* CPU architecture version */ 485 extern int cpu_arch; 486 487 /* Number of CPUs in the system */ 488 extern int sparc_ncpus; 489 490 /* Provide %pc of a lwp */ 491 #define LWP_PC(l) ((l)->l_md.md_tf->tf_pc) 492 493 /* 494 * Interrupt handler chains. Interrupt handlers should return 0 for 495 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a 496 * handler into the list. The handler is called with its (single) 497 * argument, or with a pointer to a clockframe if ih_arg is NULL. 498 * 499 * realfun/realarg are used to chain callers, usually with the 500 * biglock wrapper. 501 */ 502 extern struct intrhand { 503 int (*ih_fun)(void *); 504 void *ih_arg; 505 struct intrhand *ih_next; 506 int ih_classipl; 507 int (*ih_realfun)(void *); 508 void *ih_realarg; 509 } *intrhand[15]; 510 511 void intr_establish(int, int, struct intrhand *, void (*)(void), bool); 512 void intr_disestablish(int, struct intrhand *); 513 514 void intr_lock_kernel(void); 515 void intr_unlock_kernel(void); 516 517 /* disksubr.c */ 518 struct dkbad; 519 int isbad(struct dkbad *, int, int, int); 520 521 /* machdep.c */ 522 int ldcontrolb(void *); 523 void * reserve_dumppages(void *); 524 void wcopy(const void *, void *, u_int); 525 void wzero(void *, u_int); 526 527 /* clock.c */ 528 struct timeval; 529 void lo_microtime(struct timeval *); 530 void schedintr(void *); 531 532 /* locore.s */ 533 struct fpstate; 534 void ipi_savefpstate(struct fpstate *); 535 void savefpstate(struct fpstate *); 536 void loadfpstate(struct fpstate *); 537 int probeget(void *, int); 538 void write_all_windows(void); 539 void write_user_windows(void); 540 void lwp_trampoline(void); 541 struct pcb; 542 void snapshot(struct pcb *); 543 struct frame *getfp(void); 544 int xldcontrolb(void *, struct pcb *); 545 void copywords(const void *, void *, size_t); 546 void qcopy(const void *, void *, size_t); 547 void qzero(void *, size_t); 548 549 /* trap.c */ 550 void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t); 551 int rwindow_save(struct lwp *); 552 553 /* cons.c */ 554 int cnrom(void); 555 556 /* zs.c */ 557 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int)); 558 #ifdef KGDB 559 void zs_kgdb_init(void); 560 #endif 561 562 /* fb.c */ 563 void fb_unblank(void); 564 565 /* kgdb_stub.c */ 566 #ifdef KGDB 567 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *); 568 void kgdb_connect(int); 569 void kgdb_panic(void); 570 #endif 571 572 /* emul.c */ 573 struct trapframe; 574 int fixalign(struct lwp *, struct trapframe *, void **); 575 int emulinstr(int, struct trapframe *); 576 577 /* cpu.c */ 578 void mp_pause_cpus(void); 579 void mp_resume_cpus(void); 580 void mp_halt_cpus(void); 581 #ifdef DDB 582 void mp_pause_cpus_ddb(void); 583 void mp_resume_cpus_ddb(void); 584 #endif 585 586 /* intr.c */ 587 u_int setitr(u_int); 588 u_int getitr(void); 589 590 591 /* 592 * 593 * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits 594 * of the trap vector table. The next eight bits are supplied by the 595 * hardware when the trap occurs, and the bottom four bits are always 596 * zero (so that we can shove up to 16 bytes of executable code---exactly 597 * four instructions---into each trap vector). 598 * 599 * The hardware allocates half the trap vectors to hardware and half to 600 * software. 601 * 602 * Traps have priorities assigned (lower number => higher priority). 603 */ 604 605 struct trapvec { 606 int tv_instr[4]; /* the four instructions */ 607 }; 608 609 extern struct trapvec *trapbase; /* the 256 vectors */ 610 611 #endif /* _KERNEL */ 612 #endif /* _CPU_H_ */ 613