1 /* $NetBSD: cpu.h,v 1.124 2018/01/16 08:23:17 mrg Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94 41 */ 42 43 #ifndef _CPU_H_ 44 #define _CPU_H_ 45 46 /* 47 * CTL_MACHDEP definitions. 48 */ 49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */ 50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */ 51 #define CPU_BOOT_ARGS 3 /* string: args booted with */ 52 #define CPU_ARCH 4 /* integer: cpu architecture version */ 53 #define CPU_VIS 5 /* 0 - no VIS, 1 - VIS 1.0, etc. */ 54 #define CPU_MAXID 6 /* number of valid machdep ids */ 55 56 /* 57 * This is exported via sysctl for cpuctl(8). 58 */ 59 struct cacheinfo { 60 int c_itotalsize; 61 int c_ilinesize; 62 int c_dtotalsize; 63 int c_dlinesize; 64 int c_etotalsize; 65 int c_elinesize; 66 }; 67 68 #if defined(_KERNEL) || defined(_KMEMUSER) 69 /* 70 * Exported definitions unique to SPARC cpu support. 71 */ 72 73 #if defined(_KERNEL_OPT) 74 #include "opt_multiprocessor.h" 75 #include "opt_lockdebug.h" 76 #endif 77 78 #include <machine/psl.h> 79 #include <machine/reg.h> 80 #include <machine/pte.h> 81 #include <machine/intr.h> 82 #if defined(_KERNEL) 83 #include <machine/bus_defs.h> 84 #include <machine/cpuset.h> 85 #include <sparc64/sparc64/intreg.h> 86 #endif 87 #ifdef SUN4V 88 #include <machine/hypervisor.h> 89 #endif 90 91 #include <sys/cpu_data.h> 92 #include <sys/evcnt.h> 93 94 /* 95 * The cpu_info structure is part of a 64KB structure mapped both the kernel 96 * pmap and a single locked TTE a CPUINFO_VA for that particular processor. 97 * Each processor's cpu_info is accessible at CPUINFO_VA only for that 98 * processor. Other processors can access that through an additional mapping 99 * in the kernel pmap. 100 * 101 * The 64KB page contains: 102 * 103 * cpu_info 104 * interrupt stack (all remaining space) 105 * idle PCB 106 * idle stack (STACKSPACE - sizeof(PCB)) 107 * 32KB TSB 108 */ 109 110 struct cpu_info { 111 struct cpu_data ci_data; /* MI per-cpu data */ 112 113 114 /* 115 * SPARC cpu_info structures live at two VAs: one global 116 * VA (so each CPU can access any other CPU's cpu_info) 117 * and an alias VA CPUINFO_VA which is the same on each 118 * CPU and maps to that CPU's cpu_info. Since the alias 119 * CPUINFO_VA is how we locate our cpu_info, we have to 120 * self-reference the global VA so that we can return it 121 * in the curcpu() macro. 122 */ 123 struct cpu_info * volatile ci_self; 124 125 /* Most important fields first */ 126 struct lwp *ci_curlwp; 127 struct pcb *ci_cpcb; 128 struct cpu_info *ci_next; 129 130 struct lwp *ci_fplwp; 131 132 void *ci_eintstack; 133 134 int ci_mtx_count; 135 int ci_mtx_oldspl; 136 137 /* Spinning up the CPU */ 138 void (*ci_spinup)(void); 139 paddr_t ci_paddr; 140 141 int ci_cpuid; 142 143 uint64_t ci_ver; 144 145 /* CPU PROM information. */ 146 u_int ci_node; 147 const char *ci_name; 148 149 /* This is for sysctl. */ 150 struct cacheinfo ci_cacheinfo; 151 152 /* %tick and cpu frequency information */ 153 u_long ci_tick_increment; 154 uint64_t ci_cpu_clockrate[2]; /* %tick */ 155 uint64_t ci_system_clockrate[2]; /* %stick */ 156 157 /* Interrupts */ 158 struct intrhand *ci_intrpending[16]; 159 struct intrhand *ci_tick_ih; 160 161 /* Event counters */ 162 struct evcnt ci_tick_evcnt; 163 164 /* This could be under MULTIPROCESSOR, but there's no good reason */ 165 struct evcnt ci_ipi_evcnt[IPI_EVCNT_NUM]; 166 167 int ci_flags; 168 int ci_want_ast; 169 int ci_want_resched; 170 int ci_idepth; 171 172 /* 173 * A context is simply a small number that differentiates multiple mappings 174 * of the same address. Contexts on the spitfire are 13 bits, but could 175 * be as large as 17 bits. 176 * 177 * Each context is either free or attached to a pmap. 178 * 179 * The context table is an array of pointers to psegs. Just dereference 180 * the right pointer and you get to the pmap segment tables. These are 181 * physical addresses, of course. 182 * 183 * ci_ctx_lock protects this CPUs context allocation/free. 184 * These are all allocated almost with in the same cacheline. 185 */ 186 kmutex_t ci_ctx_lock; 187 int ci_pmap_next_ctx; 188 int ci_numctx; 189 paddr_t *ci_ctxbusy; 190 LIST_HEAD(, pmap) ci_pmap_ctxlist; 191 192 /* 193 * The TSBs are per cpu too (since MMU context differs between 194 * cpus). These are just caches for the TLBs. 195 */ 196 pte_t *ci_tsb_dmmu; 197 pte_t *ci_tsb_immu; 198 199 /* TSB description (sun4v). */ 200 struct tsb_desc *ci_tsb_desc; 201 202 /* MMU Fault Status Area (sun4v). 203 * Will be initialized to the physical address of the bottom of 204 * the interrupt stack. 205 */ 206 paddr_t ci_mmufsa; 207 208 /* 209 * sun4v mondo control fields 210 */ 211 paddr_t ci_cpumq; /* cpu mondo queue address */ 212 paddr_t ci_devmq; /* device mondo queue address */ 213 paddr_t ci_cpuset; /* mondo recipient address */ 214 paddr_t ci_mondo; /* mondo message address */ 215 216 /* probe fault in PCI config space reads */ 217 bool ci_pci_probe; 218 bool ci_pci_fault; 219 220 volatile void *ci_ddb_regs; /* DDB regs */ 221 }; 222 223 #endif /* _KERNEL || _KMEMUSER */ 224 225 #ifdef _KERNEL 226 227 #define CPUF_PRIMARY 1 228 229 /* 230 * CPU boot arguments. Used by secondary CPUs at the bootstrap time. 231 */ 232 struct cpu_bootargs { 233 u_int cb_node; /* PROM CPU node */ 234 volatile int cb_flags; 235 236 vaddr_t cb_ktext; 237 paddr_t cb_ktextp; 238 vaddr_t cb_ektext; 239 240 vaddr_t cb_kdata; 241 paddr_t cb_kdatap; 242 vaddr_t cb_ekdata; 243 244 paddr_t cb_cpuinfo; 245 int cb_cputyp; 246 }; 247 248 extern struct cpu_bootargs *cpu_args; 249 250 #if defined(MULTIPROCESSOR) 251 extern int sparc_ncpus; 252 #else 253 #define sparc_ncpus 1 254 #endif 255 256 extern struct cpu_info *cpus; 257 extern struct pool_cache *fpstate_cache; 258 259 #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self) 260 #define cpu_number() (curcpu()->ci_index) 261 #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) 262 263 #define CPU_INFO_ITERATOR int __unused 264 #define CPU_INFO_FOREACH(cii, ci) ci = cpus; ci != NULL; ci = ci->ci_next 265 266 #define curlwp curcpu()->ci_curlwp 267 #define fplwp curcpu()->ci_fplwp 268 #define curpcb curcpu()->ci_cpcb 269 270 #define want_ast curcpu()->ci_want_ast 271 #define want_resched curcpu()->ci_want_resched 272 273 /* 274 * definitions of cpu-dependent requirements 275 * referenced in generic code 276 */ 277 #define cpu_wait(p) /* nothing */ 278 void cpu_proc_fork(struct proc *, struct proc *); 279 280 /* run on the cpu itself */ 281 void cpu_pmap_init(struct cpu_info *); 282 /* run upfront to prepare the cpu_info */ 283 void cpu_pmap_prepare(struct cpu_info *, bool); 284 285 /* Helper functions to retrieve cache info */ 286 int cpu_ecache_associativity(int node); 287 int cpu_ecache_size(int node); 288 289 #if defined(MULTIPROCESSOR) 290 extern vaddr_t cpu_spinup_trampoline; 291 292 extern char *mp_tramp_code; 293 extern u_long mp_tramp_code_len; 294 extern u_long mp_tramp_dtlb_slots, mp_tramp_itlb_slots; 295 extern u_long mp_tramp_func; 296 extern u_long mp_tramp_ci; 297 298 void cpu_hatch(void); 299 void cpu_boot_secondary_processors(void); 300 301 /* 302 * Call a function on other cpus: 303 * multicast - send to everyone in the sparc64_cpuset_t 304 * broadcast - send to to all cpus but ourselves 305 * send - send to just this cpu 306 * The called function do not follow the C ABI, so need to be coded in 307 * assembler. 308 */ 309 typedef void (* ipifunc_t)(void *, void *); 310 311 void sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t); 312 void sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t); 313 extern void (*sparc64_send_ipi)(int, ipifunc_t, uint64_t, uint64_t); 314 315 /* 316 * Call an arbitrary C function on another cpu (or all others but ourself) 317 */ 318 typedef void (*ipi_c_call_func_t)(void*); 319 void sparc64_generic_xcall(struct cpu_info*, ipi_c_call_func_t, void*); 320 321 #endif 322 323 /* Provide %pc of a lwp */ 324 #define LWP_PC(l) ((l)->l_md.md_tf->tf_pc) 325 326 /* 327 * Arguments to hardclock, softclock and gatherstats encapsulate the 328 * previous machine state in an opaque clockframe. The ipl is here 329 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr). 330 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false. 331 */ 332 struct clockframe { 333 struct trapframe64 t; 334 }; 335 336 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0) 337 #define CLKF_PC(framep) ((framep)->t.tf_pc) 338 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */ 339 #define CLKF_INTR(framep) \ 340 ((!CLKF_USERMODE(framep))&& \ 341 (((framep)->t.tf_out[6] & 1 ) ? \ 342 (((vaddr_t)(framep)->t.tf_out[6] < \ 343 (vaddr_t)EINTSTACK-0x7ff) && \ 344 ((vaddr_t)(framep)->t.tf_out[6] > \ 345 (vaddr_t)INTSTACK-0x7ff)) : \ 346 (((vaddr_t)(framep)->t.tf_out[6] < \ 347 (vaddr_t)EINTSTACK) && \ 348 ((vaddr_t)(framep)->t.tf_out[6] > \ 349 (vaddr_t)INTSTACK)))) 350 351 /* 352 * Give a profiling tick to the current process when the user profiling 353 * buffer pages are invalid. On the sparc, request an ast to send us 354 * through trap(), marking the proc as needing a profiling tick. 355 */ 356 #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, want_ast = 1) 357 358 /* 359 * Notify an LWP that it has a signal pending, process as soon as possible. 360 */ 361 void cpu_signotify(struct lwp *); 362 363 364 /* 365 * Interrupt handler chains. Interrupt handlers should return 0 for 366 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a 367 * handler into the list. The handler is called with its (single) 368 * argument, or with a pointer to a clockframe if ih_arg is NULL. 369 */ 370 struct intrhand { 371 int (*ih_fun)(void *); 372 void *ih_arg; 373 /* if we have to take the biglock, we interpose a wrapper 374 * and need to save the original function and arg */ 375 int (*ih_realfun)(void *); 376 void *ih_realarg; 377 short ih_number; /* interrupt number */ 378 /* the H/W provides */ 379 char ih_pil; /* interrupt priority */ 380 struct intrhand *ih_next; /* global list */ 381 struct intrhand *ih_pending; /* interrupt queued */ 382 volatile uint64_t *ih_map; /* Interrupt map reg */ 383 volatile uint64_t *ih_clr; /* clear interrupt reg */ 384 void (*ih_ack)(struct intrhand *); /* ack interrupt function */ 385 bus_space_tag_t ih_bus; /* parent bus */ 386 struct evcnt ih_cnt; /* counter for vmstat */ 387 uint32_t ih_ivec; 388 char ih_name[32]; /* name for the above */ 389 }; 390 extern struct intrhand *intrhand[]; 391 extern struct intrhand *intrlev[MAXINTNUM]; 392 393 void intr_establish(int level, bool mpsafe, struct intrhand *); 394 void *sparc_softintr_establish(int, int (*)(void *), void *); 395 void sparc_softintr_schedule(void *); 396 void sparc_softintr_disestablish(void *); 397 struct intrhand *intrhand_alloc(void); 398 399 /* cpu.c */ 400 int cpu_myid(void); 401 402 /* disksubr.c */ 403 struct dkbad; 404 int isbad(struct dkbad *bt, int, int, int); 405 /* machdep.c */ 406 void * reserve_dumppages(void *); 407 /* clock.c */ 408 struct timeval; 409 int tickintr(void *); /* level 10/14 (tick) interrupt code */ 410 int stickintr(void *); /* system tick interrupt code */ 411 int stick2eintr(void *); /* system tick interrupt code */ 412 int clockintr(void *); /* level 10 (clock) interrupt code */ 413 int statintr(void *); /* level 14 (statclock) interrupt code */ 414 int schedintr(void *); /* level 10 (schedclock) interrupt code */ 415 void tickintr_establish(int, int (*)(void *)); 416 void stickintr_establish(int, int (*)(void *)); 417 void stick2eintr_establish(int, int (*)(void *)); 418 419 /* locore.s */ 420 struct fpstate64; 421 void savefpstate(struct fpstate64 *); 422 void loadfpstate(struct fpstate64 *); 423 void clearfpstate(void); 424 uint64_t probeget(paddr_t, int, int); 425 int probeset(paddr_t, int, int, uint64_t); 426 void setcputyp(int); 427 428 #define write_all_windows() __asm volatile("flushw" : : ) 429 #define write_user_windows() __asm volatile("flushw" : : ) 430 431 struct pcb; 432 void snapshot(struct pcb *); 433 struct frame *getfp(void); 434 void switchtoctx_us(int); 435 void switchtoctx_usiii(int); 436 void next_tick(long); 437 void next_stick(long); 438 /* trap.c */ 439 void cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t); 440 int rwindow_save(struct lwp *); 441 /* cons.c */ 442 int cnrom(void); 443 /* zs.c */ 444 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int)); 445 /* fb.c */ 446 void fb_unblank(void); 447 /* kgdb_stub.c */ 448 #ifdef KGDB 449 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *); 450 void kgdb_connect(int); 451 void kgdb_panic(void); 452 #endif 453 /* emul.c */ 454 int fixalign(struct lwp *, struct trapframe64 *); 455 int emulinstr(vaddr_t, struct trapframe64 *); 456 457 #else /* _KERNEL */ 458 459 /* 460 * XXX: provide some definitions for crash(8), probably can share 461 */ 462 #if defined(_KMEMUSER) 463 #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self) 464 #define curlwp curcpu()->ci_curlwp 465 #endif 466 467 #endif /* _KERNEL */ 468 #endif /* _CPU_H_ */ 469