1 /* $NetBSD: cpu.h,v 1.54 2006/06/07 22:39:38 kardel Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * @(#)cpu.h 8.4 (Berkeley) 1/5/94 41 */ 42 43 #ifndef _CPU_H_ 44 #define _CPU_H_ 45 46 /* 47 * CTL_MACHDEP definitions. 48 */ 49 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */ 50 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */ 51 #define CPU_BOOT_ARGS 3 /* string: args booted with */ 52 #define CPU_ARCH 4 /* integer: cpu architecture version */ 53 #define CPU_MAXID 5 /* number of valid machdep ids */ 54 55 #define CTL_MACHDEP_NAMES { \ 56 { 0, 0 }, \ 57 { "booted_kernel", CTLTYPE_STRING }, \ 58 { "booted_device", CTLTYPE_STRING }, \ 59 { "boot_args", CTLTYPE_STRING }, \ 60 { "cpu_arch", CTLTYPE_INT }, \ 61 } 62 63 #ifdef _KERNEL 64 /* 65 * Exported definitions unique to SPARC cpu support. 66 */ 67 68 #if defined(_KERNEL_OPT) 69 #include "opt_multiprocessor.h" 70 #include "opt_lockdebug.h" 71 #endif 72 73 #include <machine/psl.h> 74 #include <machine/reg.h> 75 #include <machine/intr.h> 76 #include <machine/cpuset.h> 77 #include <sparc64/sparc64/intreg.h> 78 79 #include <sys/cpu_data.h> 80 /* 81 * The cpu_info structure is part of a 64KB structure mapped both the kernel 82 * pmap and a single locked TTE a CPUINFO_VA for that particular processor. 83 * Each processor's cpu_info is accessible at CPUINFO_VA only for that 84 * processor. Other processors can access that through an additional mapping 85 * in the kernel pmap. 86 * 87 * The 64KB page contains: 88 * 89 * cpu_info 90 * interrupt stack (all remaining space) 91 * idle PCB 92 * idle stack (STACKSPACE - sizeof(PCB)) 93 * 32KB TSB 94 */ 95 96 struct cpu_info { 97 98 /* 99 * SPARC cpu_info structures live at two VAs: one global 100 * VA (so each CPU can access any other CPU's cpu_info) 101 * and an alias VA CPUINFO_VA which is the same on each 102 * CPU and maps to that CPU's cpu_info. Since the alias 103 * CPUINFO_VA is how we locate our cpu_info, we have to 104 * self-reference the global VA so that we can return it 105 * in the curcpu() macro. 106 */ 107 struct cpu_info * volatile ci_self; 108 109 /* Most important fields first */ 110 struct lwp *ci_curlwp; 111 struct pcb *ci_cpcb; 112 struct cpu_info *ci_next; 113 114 struct lwp *ci_fplwp; 115 116 void *ci_eintstack; 117 struct pcb *ci_idle_u; 118 119 /* Spinning up the CPU */ 120 void (*ci_spinup)(void); 121 void *ci_initstack; 122 paddr_t ci_paddr; 123 124 int ci_number; 125 int ci_upaid; 126 int ci_cpuid; 127 128 /* CPU PROM information. */ 129 u_int ci_node; 130 131 int ci_flags; 132 int ci_want_ast; 133 int ci_want_resched; 134 135 struct cpu_data ci_data; /* MI per-cpu data */ 136 }; 137 138 #define CPUF_PRIMARY 1 139 140 /* 141 * CPU boot arguments. Used by secondary CPUs at the bootstrap time. 142 */ 143 struct cpu_bootargs { 144 u_int cb_node; /* PROM CPU node */ 145 volatile int cb_flags; 146 147 vaddr_t cb_ktext; 148 paddr_t cb_ktextp; 149 vaddr_t cb_ektext; 150 151 vaddr_t cb_kdata; 152 paddr_t cb_kdatap; 153 vaddr_t cb_ekdata; 154 155 paddr_t cb_cpuinfo; 156 157 void *cb_initstack; 158 }; 159 160 extern struct cpu_bootargs *cpu_args; 161 162 extern int sparc_ncpus; 163 extern struct cpu_info *cpus; 164 165 #define curcpu() (((struct cpu_info *)CPUINFO_VA)->ci_self) 166 #define cpu_number() (curcpu()->ci_number) 167 #define CPU_IS_PRIMARY(ci) ((ci)->ci_flags & CPUF_PRIMARY) 168 169 #define CPU_INFO_ITERATOR int 170 #define CPU_INFO_FOREACH(cii, ci) cii = 0, ci = cpus; ci != NULL; \ 171 ci = ci->ci_next 172 173 #define curlwp curcpu()->ci_curlwp 174 #define fplwp curcpu()->ci_fplwp 175 #define curpcb curcpu()->ci_cpcb 176 177 #define want_ast curcpu()->ci_want_ast 178 #define want_resched curcpu()->ci_want_resched 179 180 /* 181 * definitions of cpu-dependent requirements 182 * referenced in generic code 183 */ 184 #define cpu_swapin(p) /* nothing */ 185 #define cpu_swapout(p) /* nothing */ 186 #define cpu_wait(p) /* nothing */ 187 void cpu_proc_fork(struct proc *, struct proc *); 188 189 #if defined(MULTIPROCESSOR) 190 extern vaddr_t cpu_spinup_trampoline; 191 192 extern char *mp_tramp_code; 193 extern u_long mp_tramp_code_len; 194 extern u_long mp_tramp_tlb_slots; 195 extern u_long mp_tramp_func; 196 extern u_long mp_tramp_ci; 197 198 void cpu_hatch(void); 199 void cpu_boot_secondary_processors(void); 200 #endif 201 202 extern uint64_t cpu_clockrate[]; 203 204 /* 205 * Arguments to hardclock, softclock and gatherstats encapsulate the 206 * previous machine state in an opaque clockframe. The ipl is here 207 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr). 208 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false. 209 */ 210 extern int intstack[]; 211 extern int eintstack[]; 212 struct clockframe { 213 struct trapframe64 t; 214 }; 215 216 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0) 217 /* 218 * XXX Disable CLKF_BASEPRI() for now. If we use a counter-timer for 219 * the clock, the interrupt remains blocked until the interrupt handler 220 * returns and we write to the clear interrupt register. If we use 221 * %tick for the clock, we could get multiple interrupts, but the 222 * currently enabled INTR_INTERLOCK will prevent the interrupt from being 223 * posted twice anyway. 224 * 225 * Switching to %tick for all machines and disabling INTR_INTERLOCK 226 * in locore.s would allow us to take advantage of CLKF_BASEPRI(). 227 */ 228 #if 0 229 #define CLKF_BASEPRI(framep) (((framep)->t.tf_oldpil) == 0) 230 #else 231 #define CLKF_BASEPRI(framep) (0) 232 #endif 233 #define CLKF_PC(framep) ((framep)->t.tf_pc) 234 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */ 235 #define CLKF_INTR(framep) \ 236 ((!CLKF_USERMODE(framep))&& \ 237 (((framep)->t.tf_out[6] & 1 ) ? \ 238 (((vaddr_t)(framep)->t.tf_out[6] < \ 239 (vaddr_t)EINTSTACK-0x7ff) && \ 240 ((vaddr_t)(framep)->t.tf_out[6] > \ 241 (vaddr_t)INTSTACK-0x7ff)) : \ 242 (((vaddr_t)(framep)->t.tf_out[6] < \ 243 (vaddr_t)EINTSTACK) && \ 244 ((vaddr_t)(framep)->t.tf_out[6] > \ 245 (vaddr_t)INTSTACK)))) 246 247 248 extern struct intrhand soft01intr, soft01net, soft01clock; 249 250 void setsoftint(void); 251 void setsoftnet(void); 252 253 /* 254 * Preempt the current process if in interrupt from user mode, 255 * or after the current trap/syscall if in system mode. 256 */ 257 #define need_resched(ci) (want_resched = 1, want_ast = 1) 258 259 /* 260 * Give a profiling tick to the current process when the user profiling 261 * buffer pages are invalid. On the sparc, request an ast to send us 262 * through trap(), marking the proc as needing a profiling tick. 263 */ 264 #define need_proftick(p) ((p)->p_flag |= P_OWEUPC, want_ast = 1) 265 266 /* 267 * Notify the current process (p) that it has a signal pending, 268 * process as soon as possible. 269 */ 270 #define signotify(p) (want_ast = 1) 271 272 /* 273 * Interrupt handler chains. Interrupt handlers should return 0 for 274 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a 275 * handler into the list. The handler is called with its (single) 276 * argument, or with a pointer to a clockframe if ih_arg is NULL. 277 */ 278 struct intrhand { 279 int (*ih_fun)(void *); 280 void *ih_arg; 281 short ih_number; /* interrupt number */ 282 /* the H/W provides */ 283 char ih_pil; /* interrupt priority */ 284 struct intrhand *ih_next; /* global list */ 285 struct intrhand *ih_pending; /* interrupt queued */ 286 volatile uint64_t *ih_map; /* Interrupt map reg */ 287 volatile uint64_t *ih_clr; /* clear interrupt reg */ 288 }; 289 extern struct intrhand *intrhand[]; 290 extern struct intrhand *intrlev[MAXINTNUM]; 291 292 void intr_establish(int level, struct intrhand *); 293 294 #define mp_pause_cpus() sparc64_ipi_pause_cpus() 295 #define mp_resume_cpus() sparc64_ipi_resume_cpus() 296 297 /* disksubr.c */ 298 struct dkbad; 299 int isbad(struct dkbad *bt, int, int, int); 300 /* machdep.c */ 301 caddr_t reserve_dumppages(caddr_t); 302 /* clock.c */ 303 struct timeval; 304 int tickintr(void *); /* level 10 (tick) interrupt code */ 305 int clockintr(void *); /* level 10 (clock) interrupt code */ 306 int statintr(void *); /* level 14 (statclock) interrupt code */ 307 /* locore.s */ 308 struct fpstate64; 309 void savefpstate(struct fpstate64 *); 310 void loadfpstate(struct fpstate64 *); 311 uint64_t probeget(paddr_t, int, int); 312 int probeset(paddr_t, int, int, uint64_t); 313 314 #define write_all_windows() __asm volatile("flushw" : : ) 315 #define write_user_windows() __asm volatile("flushw" : : ) 316 317 void proc_trampoline(void); 318 struct pcb; 319 void snapshot(struct pcb *); 320 struct frame *getfp(void); 321 void switchtoctx(int); 322 /* trap.c */ 323 void kill_user_windows(struct lwp *); 324 int rwindow_save(struct lwp *); 325 /* cons.c */ 326 int cnrom(void); 327 /* zs.c */ 328 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int)); 329 #ifdef KGDB 330 void zs_kgdb_init(void); 331 #endif 332 /* fb.c */ 333 void fb_unblank(void); 334 /* kgdb_stub.c */ 335 #ifdef KGDB 336 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *); 337 void kgdb_connect(int); 338 void kgdb_panic(void); 339 #endif 340 /* emul.c */ 341 int fixalign(struct lwp *, struct trapframe64 *); 342 int emulinstr(vaddr_t, struct trapframe64 *); 343 344 /* 345 * 346 * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits 347 * of the trap vector table. The next eight bits are supplied by the 348 * hardware when the trap occurs, and the bottom four bits are always 349 * zero (so that we can shove up to 16 bytes of executable code---exactly 350 * four instructions---into each trap vector). 351 * 352 * The hardware allocates half the trap vectors to hardware and half to 353 * software. 354 * 355 * Traps have priorities assigned (lower number => higher priority). 356 */ 357 358 struct trapvec { 359 int tv_instr[8]; /* the eight instructions */ 360 }; 361 extern struct trapvec *trapbase; /* the 256 vectors */ 362 363 #endif /* _KERNEL */ 364 #endif /* _CPU_H_ */ 365