1 /* $NetBSD: cpu.h,v 1.35 2003/02/05 12:06:52 nakayama Exp $ */ 2 3 /* 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This software was developed by the Computer Systems Engineering group 8 * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and 9 * contributed to Berkeley. 10 * 11 * All advertising materials mentioning features or use of this software 12 * must display the following acknowledgement: 13 * This product includes software developed by the University of 14 * California, Lawrence Berkeley Laboratory. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by the University of 27 * California, Berkeley and its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * @(#)cpu.h 8.4 (Berkeley) 1/5/94 45 */ 46 47 #ifndef _CPU_H_ 48 #define _CPU_H_ 49 50 /* 51 * CTL_MACHDEP definitions. 52 */ 53 #define CPU_BOOTED_KERNEL 1 /* string: booted kernel name */ 54 #define CPU_BOOTED_DEVICE 2 /* string: device booted from */ 55 #define CPU_BOOT_ARGS 3 /* string: args booted with */ 56 #define CPU_ARCH 4 /* integer: cpu architecture version */ 57 #define CPU_MAXID 5 /* number of valid machdep ids */ 58 59 #define CTL_MACHDEP_NAMES { \ 60 { 0, 0 }, \ 61 { "booted_kernel", CTLTYPE_STRING }, \ 62 { "booted_device", CTLTYPE_STRING }, \ 63 { "boot_args", CTLTYPE_STRING }, \ 64 { "cpu_arch", CTLTYPE_INT }, \ 65 } 66 67 #ifdef _KERNEL 68 /* 69 * Exported definitions unique to SPARC cpu support. 70 */ 71 72 #if !defined(_LKM) 73 #include "opt_multiprocessor.h" 74 #include "opt_lockdebug.h" 75 #endif 76 77 #include <machine/psl.h> 78 #include <machine/reg.h> 79 #include <machine/intr.h> 80 #include <sparc64/sparc64/intreg.h> 81 82 #include <sys/sched.h> 83 /* 84 * The cpu_info structure is part of a 64KB structure mapped both the kernel 85 * pmap and a single locked TTE a CPUINFO_VA for that particular processor. 86 * Each processor's cpu_info is accessible at CPUINFO_VA only for that 87 * processor. Other processors can access that through an additional mapping 88 * in the kernel pmap. 89 * 90 * The 64KB page contains: 91 * 92 * cpu_info 93 * interrupt stack (all remaining space) 94 * idle PCB 95 * idle stack (STACKSPACE - sizeof(PCB)) 96 * 32KB TSB 97 */ 98 99 struct cpu_info { 100 /* Most important fields first */ 101 struct lwp *ci_curlwp; 102 struct pcb *ci_cpcb; 103 struct cpu_info *ci_next; 104 105 struct lwp *ci_fplwp; 106 int ci_number; 107 int ci_upaid; 108 struct schedstate_percpu ci_schedstate; 109 110 /* 111 * Variables used by cc_microtime(). 112 */ 113 struct timeval ci_cc_time; 114 int64_t ci_cc_cc; 115 int64_t ci_cc_ms_delta; 116 int64_t ci_cc_denom; 117 118 /* DEBUG/DIAGNOSTIC stuff */ 119 u_long ci_spin_locks; 120 u_long ci_simple_locks; 121 122 /* Spinning up the CPU */ 123 void (*ci_spinup) __P((void)); 124 void *ci_initstack; 125 paddr_t ci_paddr; 126 }; 127 128 extern struct cpu_info *cpus; 129 extern struct cpu_info cpu_info_store; 130 131 #if 1 132 #define curcpu() (&cpu_info_store) 133 #else 134 #define curcpu() ((struct cpu_info *)CPUINFO_VA) 135 #endif 136 137 /* 138 * definitions of cpu-dependent requirements 139 * referenced in generic code 140 */ 141 #define cpu_swapin(p) /* nothing */ 142 #define cpu_swapout(p) /* nothing */ 143 #define cpu_wait(p) /* nothing */ 144 #if 1 145 #define cpu_number() 0 146 #else 147 #define cpu_number() (curcpu()->ci_number) 148 #endif 149 150 /* This really should be somewhere else. */ 151 #define cpu_proc_fork(p1, p2) /* nothing */ 152 153 /* 154 * definitions for MI microtime(). 155 */ 156 extern struct timeval cc_microset_time; 157 #define microtime(tv) cc_microtime(tv) 158 void cc_microtime __P((struct timeval *)); 159 void cc_microset __P((struct cpu_info *)); 160 161 extern uint64_t cpu_clockrate[]; 162 163 /* 164 * Arguments to hardclock, softclock and gatherstats encapsulate the 165 * previous machine state in an opaque clockframe. The ipl is here 166 * as well for strayintr (see locore.s:interrupt and intr.c:strayintr). 167 * Note that CLKF_INTR is valid only if CLKF_USERMODE is false. 168 */ 169 extern int intstack[]; 170 extern int eintstack[]; 171 struct clockframe { 172 struct trapframe64 t; 173 }; 174 175 #define CLKF_USERMODE(framep) (((framep)->t.tf_tstate & TSTATE_PRIV) == 0) 176 /* 177 * XXX Disable CLKF_BASEPRI() for now. If we use a counter-timer for 178 * the clock, the interrupt remains blocked until the interrupt handler 179 * returns and we write to the clear interrupt register. If we use 180 * %tick for the clock, we could get multiple interrupts, but the 181 * currently enabled INTR_INTERLOCK will prevent the interrupt from being 182 * posted twice anyway. 183 * 184 * Switching to %tick for all machines and disabling INTR_INTERLOCK 185 * in locore.s would allow us to take advantage of CLKF_BASEPRI(). 186 */ 187 #if 0 188 #define CLKF_BASEPRI(framep) (((framep)->t.tf_oldpil) == 0) 189 #else 190 #define CLKF_BASEPRI(framep) (0) 191 #endif 192 #define CLKF_PC(framep) ((framep)->t.tf_pc) 193 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */ 194 #define CLKF_INTR(framep) \ 195 ((!CLKF_USERMODE(framep))&& \ 196 (((framep)->t.tf_out[6] & 1 ) ? \ 197 (((vaddr_t)(framep)->t.tf_out[6] < \ 198 (vaddr_t)EINTSTACK-0x7ff) && \ 199 ((vaddr_t)(framep)->t.tf_out[6] > \ 200 (vaddr_t)INTSTACK-0x7ff)) : \ 201 (((vaddr_t)(framep)->t.tf_out[6] < \ 202 (vaddr_t)EINTSTACK) && \ 203 ((vaddr_t)(framep)->t.tf_out[6] > \ 204 (vaddr_t)INTSTACK)))) 205 206 /* 207 * Software interrupt request `register'. 208 */ 209 #ifdef DEPRECATED 210 union sir { 211 int sir_any; 212 char sir_which[4]; 213 } sir; 214 215 #define SIR_NET 0 216 #define SIR_CLOCK 1 217 #endif 218 219 extern struct intrhand soft01intr, soft01net, soft01clock; 220 221 #if 0 222 #define setsoftint() send_softint(-1, IPL_SOFTINT, &soft01intr) 223 #define setsoftnet() send_softint(-1, IPL_SOFTNET, &soft01net) 224 #else 225 void setsoftint __P((void)); 226 void setsoftnet __P((void)); 227 #endif 228 229 int want_ast; 230 231 /* 232 * Preempt the current process if in interrupt from user mode, 233 * or after the current trap/syscall if in system mode. 234 */ 235 int want_resched; /* resched() was called */ 236 #define need_resched(ci) (want_resched = 1, want_ast = 1) 237 238 /* 239 * Give a profiling tick to the current process when the user profiling 240 * buffer pages are invalid. On the sparc, request an ast to send us 241 * through trap(), marking the proc as needing a profiling tick. 242 */ 243 #define need_proftick(p) ((p)->p_flag |= P_OWEUPC, want_ast = 1) 244 245 /* 246 * Notify the current process (p) that it has a signal pending, 247 * process as soon as possible. 248 */ 249 #define signotify(p) (want_ast = 1) 250 251 /* 252 * Only one process may own the FPU state. 253 * 254 * XXX this must be per-cpu (eventually) 255 */ 256 struct lwp *fplwp; /* FPU owner */ 257 258 /* 259 * Interrupt handler chains. Interrupt handlers should return 0 for 260 * ``not me'' or 1 (``I took care of it''). intr_establish() inserts a 261 * handler into the list. The handler is called with its (single) 262 * argument, or with a pointer to a clockframe if ih_arg is NULL. 263 */ 264 struct intrhand { 265 int (*ih_fun) __P((void *)); 266 void *ih_arg; 267 short ih_number; /* interrupt number */ 268 /* the H/W provides */ 269 char ih_pil; /* interrupt priority */ 270 struct intrhand *ih_next; /* global list */ 271 struct intrhand *ih_pending; /* interrupt queued */ 272 volatile u_int64_t *ih_map; /* Interrupt map reg */ 273 volatile u_int64_t *ih_clr; /* clear interrupt reg */ 274 }; 275 extern struct intrhand *intrhand[]; 276 extern struct intrhand *intrlev[MAXINTNUM]; 277 278 void intr_establish __P((int level, struct intrhand *)); 279 280 /* cpu.c */ 281 paddr_t cpu_alloc __P((void)); 282 u_int64_t cpu_init __P((paddr_t, int)); 283 /* disksubr.c */ 284 struct dkbad; 285 int isbad __P((struct dkbad *bt, int, int, int)); 286 /* machdep.c */ 287 int ldcontrolb __P((caddr_t)); 288 void dumpconf __P((void)); 289 caddr_t reserve_dumppages __P((caddr_t)); 290 /* clock.c */ 291 struct timeval; 292 int tickintr __P((void *)); /* level 10 (tick) interrupt code */ 293 int clockintr __P((void *));/* level 10 (clock) interrupt code */ 294 int statintr __P((void *)); /* level 14 (statclock) interrupt code */ 295 /* locore.s */ 296 struct fpstate64; 297 void savefpstate __P((struct fpstate64 *)); 298 void loadfpstate __P((struct fpstate64 *)); 299 u_int64_t probeget __P((paddr_t, int, int)); 300 int probeset __P((paddr_t, int, int, u_int64_t)); 301 #if 0 302 void write_all_windows __P((void)); 303 void write_user_windows __P((void)); 304 #else 305 #define write_all_windows() __asm __volatile("flushw" : : ) 306 #define write_user_windows() __asm __volatile("flushw" : : ) 307 #endif 308 void proc_trampoline __P((void)); 309 struct pcb; 310 void snapshot __P((struct pcb *)); 311 struct frame *getfp __P((void)); 312 int xldcontrolb __P((caddr_t, struct pcb *)); 313 void copywords __P((const void *, void *, size_t)); 314 void qcopy __P((const void *, void *, size_t)); 315 void qzero __P((void *, size_t)); 316 void switchtoctx __P((int)); 317 /* locore2.c */ 318 void remrq __P((struct proc *)); 319 /* trap.c */ 320 void kill_user_windows __P((struct lwp *)); 321 int rwindow_save __P((struct lwp *)); 322 /* amd7930intr.s */ 323 void amd7930_trap __P((void)); 324 /* cons.c */ 325 int cnrom __P((void)); 326 /* zs.c */ 327 void zsconsole __P((struct tty *, int, int, void (**)(struct tty *, int))); 328 #ifdef KGDB 329 void zs_kgdb_init __P((void)); 330 #endif 331 /* fb.c */ 332 void fb_unblank __P((void)); 333 /* kgdb_stub.c */ 334 #ifdef KGDB 335 void kgdb_attach __P((int (*)(void *), void (*)(void *, int), void *)); 336 void kgdb_connect __P((int)); 337 void kgdb_panic __P((void)); 338 #endif 339 /* emul.c */ 340 int fixalign __P((struct lwp *, struct trapframe64 *)); 341 int emulinstr __P((vaddr_t, struct trapframe64 *)); 342 343 /* 344 * 345 * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits 346 * of the trap vector table. The next eight bits are supplied by the 347 * hardware when the trap occurs, and the bottom four bits are always 348 * zero (so that we can shove up to 16 bytes of executable code---exactly 349 * four instructions---into each trap vector). 350 * 351 * The hardware allocates half the trap vectors to hardware and half to 352 * software. 353 * 354 * Traps have priorities assigned (lower number => higher priority). 355 */ 356 357 struct trapvec { 358 int tv_instr[8]; /* the eight instructions */ 359 }; 360 extern struct trapvec *trapbase; /* the 256 vectors */ 361 362 extern void wzero __P((void *, u_int)); 363 extern void wcopy __P((const void *, void *, u_int)); 364 365 #endif /* _KERNEL */ 366 #endif /* _CPU_H_ */ 367