1 /* $NetBSD: cpu.h,v 1.84 2008/10/05 01:53:05 nakayama Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94 41 */ 42 43 #ifndef _CPU_H_ 44 #define _CPU_H_ 45 46 /* 47 * CTL_MACHDEP definitions. 48 */ 49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */ 50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */ 51 #define CPU_BOOT_ARGS 3 /* string: args booted with */ 52 #define CPU_ARCH 4 /* integer: cpu architecture version */ 53 #define CPU_MAXID 5 /* number of valid machdep ids */ 54 55 #ifdef _KERNEL 56 /* 57 * Exported definitions unique to SPARC cpu support. 58 */ 59 60 #if defined(_KERNEL_OPT) 61 #include "opt_multiprocessor.h" 62 #include "opt_lockdebug.h" 63 #endif 64 65 #include <machine/psl.h> 66 #include <machine/reg.h> 67 #include <machine/pte.h> 68 #include <machine/intr.h> 69 #include <machine/cpuset.h> 70 #include <sparc64/sparc64/intreg.h> 71 72 #include <sys/cpu_data.h> 73 #include <sys/evcnt.h> 74 /* 75 * The cpu_info structure is part of a 64KB structure mapped both the kernel 76 * pmap and a single locked TTE a CPUINFO_VA for that particular processor. 77 * Each processor's cpu_info is accessible at CPUINFO_VA only for that 78 * processor. Other processors can access that through an additional mapping 79 * in the kernel pmap. 80 * 81 * The 64KB page contains: 82 * 83 * cpu_info 84 * interrupt stack (all remaining space) 85 * idle PCB 86 * idle stack (STACKSPACE - sizeof(PCB)) 87 * 32KB TSB 88 */ 89 90 struct cpu_info { 91 92 /* 93 * SPARC cpu_info structures live at two VAs: one global 94 * VA (so each CPU can access any other CPU's cpu_info) 95 * and an alias VA CPUINFO_VA which is the same on each 96 * CPU and maps to that CPU's cpu_info. Since the alias 97 * CPUINFO_VA is how we locate our cpu_info, we have to 98 * self-reference the global VA so that we can return it 99 * in the curcpu() macro. 100 */ 101 struct cpu_info * volatile ci_self; 102 103 /* Most important fields first */ 104 struct lwp *ci_curlwp; 105 struct pcb *ci_cpcb; 106 struct cpu_info *ci_next; 107 108 struct lwp *ci_fplwp; 109 110 void *ci_eintstack; 111 112 int ci_mtx_count; 113 int ci_mtx_oldspl; 114 115 /* Spinning up the CPU */ 116 void (*ci_spinup)(void); 117 paddr_t ci_paddr; 118 119 int ci_cpuid; 120 121 /* CPU PROM information. */ 122 u_int ci_node; 123 124 /* %tick and cpu frequency information */ 125 u_long ci_tick_increment; 126 uint64_t ci_cpu_clockrate[2]; 127 128 /* Interrupts */ 129 struct intrhand *ci_intrpending[16]; 130 struct intrhand *ci_tick_ih; 131 132 /* Event counters */ 133 struct evcnt ci_tick_evcnt; 134 #ifdef MULTIPROCESSOR 135 struct evcnt ci_ipi_evcnt[IPI_EVCNT_NUM]; 136 #endif 137 138 int ci_flags; 139 int ci_want_ast; 140 int ci_want_resched; 141 int ci_idepth; 142 143 /* 144 * A context is simply a small number that differentiates multiple mappings 145 * of the same address. Contexts on the spitfire are 13 bits, but could 146 * be as large as 17 bits. 147 * 148 * Each context is either free or attached to a pmap. 149 * 150 * The context table is an array of pointers to psegs. Just dereference 151 * the right pointer and you get to the pmap segment tables. These are 152 * physical addresses, of course. 153 * 154 */ 155 int ci_pmap_next_ctx; 156 int ci_numctx; 157 paddr_t *ci_ctxbusy; 158 LIST_HEAD(, pmap) ci_pmap_ctxlist; 159 160 /* 161 * The TSBs are per cpu too (since MMU context differs between 162 * cpus). These are just caches for the TLBs. 163 */ 164 pte_t *ci_tsb_dmmu; 165 pte_t *ci_tsb_immu; 166 167 struct cpu_data ci_data; /* MI per-cpu data */ 168 169 volatile void *ci_ddb_regs; /* DDB regs */ 170 }; 171 172 #define CPUF_PRIMARY 1 173 174 /* 175 * CPU boot arguments. Used by secondary CPUs at the bootstrap time. 176 */ 177 struct cpu_bootargs { 178 u_int cb_node; /* PROM CPU node */ 179 volatile int cb_flags; 180 181 vaddr_t cb_ktext; 182 paddr_t cb_ktextp; 183 vaddr_t cb_ektext; 184 185 vaddr_t cb_kdata; 186 paddr_t cb_kdatap; 187 vaddr_t cb_ekdata; 188 189 paddr_t cb_cpuinfo; 190 }; 191 192 extern struct cpu_bootargs *cpu_args; 193 194 extern int sparc_ncpus; 195 extern struct cpu_info *cpus; 196 extern struct pool_cache *fpstate_cache; 197 198 #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self) 199 #define cpu_number() (curcpu()->ci_index) 200 #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) 201 202 #define CPU_INFO_ITERATOR int 203 #define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = cpus; ci != NULL; \ 204 ci = ci->ci_next 205 206 #define curlwp curcpu()->ci_curlwp 207 #define fplwp curcpu()->ci_fplwp 208 #define curpcb curcpu()->ci_cpcb 209 210 #define want_ast curcpu()->ci_want_ast 211 #define want_resched curcpu()->ci_want_resched 212 213 /* 214 * definitions of cpu-dependent requirements 215 * referenced in generic code 216 */ 217 #define cpu_swapin(p) /* nothing */ 218 #define cpu_swapout(p) /* nothing */ 219 #define cpu_wait(p) /* nothing */ 220 void cpu_proc_fork(struct proc *, struct proc *); 221 222 /* run on the cpu itself */ 223 void cpu_pmap_init(struct cpu_info *); 224 /* run upfront to prepare the cpu_info */ 225 void cpu_pmap_prepare(struct cpu_info *, bool); 226 227 #if defined(MULTIPROCESSOR) 228 extern vaddr_t cpu_spinup_trampoline; 229 230 extern char *mp_tramp_code; 231 extern u_long mp_tramp_code_len; 232 extern u_long mp_tramp_tlb_slots; 233 extern u_long mp_tramp_func; 234 extern u_long mp_tramp_ci; 235 236 void cpu_hatch(void); 237 void cpu_boot_secondary_processors(void); 238 239 /* 240 * Call a function on other cpus: 241 * multicast - send to everyone in the sparc64_cpuset_t 242 * broadcast - send to to all cpus but ourselves 243 * send - send to just this cpu 244 */ 245 typedef void (* ipifunc_t)(void *); 246 247 void sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t); 248 void sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t); 249 void sparc64_send_ipi(int, ipifunc_t, uint64_t, uint64_t); 250 #endif 251 252 /* 253 * Arguments to hardclock, softclock and gatherstats encapsulate the 254 * previous machine state in an opaque clockframe. The ipl is here 255 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr). 256 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false. 257 */ 258 struct clockframe { 259 struct trapframe64 t; 260 }; 261 262 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0) 263 #define CLKF_PC(framep) ((framep)->t.tf_pc) 264 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */ 265 #define CLKF_INTR(framep) \ 266 ((!CLKF_USERMODE(framep))&& \ 267 (((framep)->t.tf_out[6] & 1 ) ? \ 268 (((vaddr_t)(framep)->t.tf_out[6] < \ 269 (vaddr_t)EINTSTACK-0x7ff) && \ 270 ((vaddr_t)(framep)->t.tf_out[6] > \ 271 (vaddr_t)INTSTACK-0x7ff)) : \ 272 (((vaddr_t)(framep)->t.tf_out[6] < \ 273 (vaddr_t)EINTSTACK) && \ 274 ((vaddr_t)(framep)->t.tf_out[6] > \ 275 (vaddr_t)INTSTACK)))) 276 277 /* 278 * Give a profiling tick to the current process when the user profiling 279 * buffer pages are invalid. On the sparc, request an ast to send us 280 * through trap(), marking the proc as needing a profiling tick. 281 */ 282 #define cpu_need_proftick(l) ((l)->l_pflag |= LP_OWEUPC, want_ast = 1) 283 284 /* 285 * Notify an LWP that it has a signal pending, process as soon as possible. 286 */ 287 void cpu_signotify(struct lwp *); 288 289 /* 290 * Interrupt handler chains. Interrupt handlers should return 0 for 291 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a 292 * handler into the list. The handler is called with its (single) 293 * argument, or with a pointer to a clockframe if ih_arg is NULL. 294 */ 295 struct intrhand { 296 int (*ih_fun)(void *); 297 void *ih_arg; 298 /* if we have to take the biglock, we interpose a wrapper 299 * and need to save the original function and arg */ 300 int (*ih_realfun)(void *); 301 void *ih_realarg; 302 short ih_number; /* interrupt number */ 303 /* the H/W provides */ 304 char ih_pil; /* interrupt priority */ 305 struct intrhand *ih_next; /* global list */ 306 struct intrhand *ih_pending; /* interrupt queued */ 307 volatile uint64_t *ih_map; /* Interrupt map reg */ 308 volatile uint64_t *ih_clr; /* clear interrupt reg */ 309 }; 310 extern struct intrhand *intrhand[]; 311 extern struct intrhand *intrlev[MAXINTNUM]; 312 313 void intr_establish(int level, bool mpsafe, struct intrhand *); 314 void *sparc_softintr_establish(int, int (*)(void *), void *); 315 void sparc_softintr_schedule(void *); 316 void sparc_softintr_disestablish(void *); 317 318 /* disksubr.c */ 319 struct dkbad; 320 int isbad(struct dkbad *bt, int, int, int); 321 /* machdep.c */ 322 void * reserve_dumppages(void *); 323 /* clock.c */ 324 struct timeval; 325 int tickintr(void *); /* level 10/14 (tick) interrupt code */ 326 int clockintr(void *); /* level 10 (clock) interrupt code */ 327 int statintr(void *); /* level 14 (statclock) interrupt code */ 328 int schedintr(void *); /* level 10 (schedclock) interrupt code */ 329 void tickintr_establish(int, int (*)(void *)); 330 /* locore.s */ 331 struct fpstate64; 332 void savefpstate(struct fpstate64 *); 333 void loadfpstate(struct fpstate64 *); 334 void clearfpstate(void); 335 uint64_t probeget(paddr_t, int, int); 336 int probeset(paddr_t, int, int, uint64_t); 337 338 #define write_all_windows() __asm volatile("flushw" : : ) 339 #define write_user_windows() __asm volatile("flushw" : : ) 340 341 void lwp_trampoline(void); 342 struct pcb; 343 void snapshot(struct pcb *); 344 struct frame *getfp(void); 345 void switchtoctx(int); 346 /* trap.c */ 347 void kill_user_windows(struct lwp *); 348 int rwindow_save(struct lwp *); 349 /* cons.c */ 350 int cnrom(void); 351 /* zs.c */ 352 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int)); 353 /* fb.c */ 354 void fb_unblank(void); 355 /* kgdb_stub.c */ 356 #ifdef KGDB 357 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *); 358 void kgdb_connect(int); 359 void kgdb_panic(void); 360 #endif 361 /* emul.c */ 362 int fixalign(struct lwp *, struct trapframe64 *); 363 int emulinstr(vaddr_t, struct trapframe64 *); 364 365 #endif /* _KERNEL */ 366 #endif /* _CPU_H_ */ 367