xref: /netbsd-src/sys/arch/sparc/include/cpu.h (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: cpu.h,v 1.110 2021/08/14 17:51:19 ryo Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)cpu.h	8.4 (Berkeley) 1/5/94
41  */
42 
43 #ifndef _CPU_H_
44 #define _CPU_H_
45 
46 /*
47  * CTL_MACHDEP definitions.
48  */
49 #define	CPU_BOOTED_KERNEL	1	/* string: booted kernel name */
50 #define	CPU_BOOTED_DEVICE	2	/* string: device booted from */
51 #define	CPU_BOOT_ARGS		3	/* string: args booted with */
52 #define	CPU_ARCH		4	/* integer: cpu architecture version */
53 
54 /*
55  * Exported definitions unique to SPARC cpu support.
56  */
57 
58 /*
59  * Sun-4 and Sun-4c virtual address cache.
60  *
61  * Sun-4 virtual caches come in two flavors, write-through (Sun-4c)
62  * and write-back (Sun-4).  The write-back caches are much faster
63  * but require a bit more care.
64  *
65  * This is exported via sysctl so be careful changing it.
66  */
67 enum vactype { VAC_UNKNOWN, VAC_NONE, VAC_WRITETHROUGH, VAC_WRITEBACK };
68 
69 /*
70  * Cache control information.
71  *
72  * This is exported via sysctl so be careful changing it.
73  */
74 
75 struct cacheinfo {
76 	int	c_totalsize;		/* total size, in bytes */
77 					/* if split, MAX(icache,dcache) */
78 	int	c_enabled;		/* true => cache is enabled */
79 	int	c_hwflush;		/* true => have hardware flush */
80 	int	c_linesize;		/* line size, in bytes */
81 					/* if split, MIN(icache,dcache) */
82 	int	c_l2linesize;		/* log2(linesize) */
83 	int	c_nlines;		/* precomputed # of lines to flush */
84 	int	c_physical;		/* true => cache has physical
85 						   address tags */
86 	int 	c_associativity;	/* # of "buckets" in cache line */
87 	int 	c_split;		/* true => cache is split */
88 
89 	int 	ic_totalsize;		/* instruction cache */
90 	int 	ic_enabled;
91 	int 	ic_linesize;
92 	int 	ic_l2linesize;
93 	int 	ic_nlines;
94 	int 	ic_associativity;
95 
96 	int 	dc_totalsize;		/* data cache */
97 	int 	dc_enabled;
98 	int 	dc_linesize;
99 	int 	dc_l2linesize;
100 	int 	dc_nlines;
101 	int 	dc_associativity;
102 
103 	int	ec_totalsize;		/* external cache info */
104 	int 	ec_enabled;
105 	int	ec_linesize;
106 	int	ec_l2linesize;
107 	int 	ec_nlines;
108 	int 	ec_associativity;
109 
110 	enum vactype	c_vactype;
111 
112 	int	c_flags;
113 #define CACHE_PAGETABLES	0x1	/* caching pagetables OK on (sun4m) */
114 #define CACHE_TRAPPAGEBUG	0x2	/* trap page can't be cached (sun4) */
115 #define CACHE_MANDATORY		0x4	/* if cache is on, don't use
116 					   uncached access */
117 };
118 
119 /* Things needed by crash or the kernel */
120 #if defined(_KERNEL) || defined(_KMEMUSER)
121 
122 #if defined(_KERNEL_OPT)
123 #include "opt_gprof.h"
124 #include "opt_multiprocessor.h"
125 #include "opt_lockdebug.h"
126 #include "opt_sparc_arch.h"
127 #endif
128 
129 #include <sys/cpu_data.h>
130 #include <sys/evcnt.h>
131 
132 #include <machine/intr.h>
133 #include <machine/psl.h>
134 
135 #if defined(_KERNEL)
136 #include <sparc/sparc/cpuvar.h>
137 #include <sparc/sparc/intreg.h>
138 #endif
139 
140 struct trapframe;
141 
142 /*
143  * Message structure for Inter Processor Communication in MP systems
144  */
145 struct xpmsg {
146 	volatile int tag;
147 #define	XPMSG15_PAUSECPU	1
148 #define	XPMSG_FUNC		4
149 #define	XPMSG_FTRP		5
150 
151 	volatile union {
152 		/*
153 		 * Cross call: ask to run (*func)(arg0,arg1,arg2)
154 		 * or (*trap)(arg0,arg1,arg2). `trap' should be the
155 		 * address of a `fast trap' handler that executes in
156 		 * the trap window (see locore.s).
157 		 */
158 		struct xpmsg_func {
159 			void	(*func)(int, int, int);
160 			void	(*trap)(int, int, int);
161 			int	arg0;
162 			int	arg1;
163 			int	arg2;
164 		} xpmsg_func;
165 	} u;
166 	volatile int	received;
167 	volatile int	complete;
168 };
169 
170 /*
171  * The cpuinfo structure. This structure maintains information about one
172  * currently installed CPU (there may be several of these if the machine
173  * supports multiple CPUs, as on some Sun4m architectures). The information
174  * in this structure supersedes the old "cpumod", "mmumod", and similar
175  * fields.
176  */
177 
178 struct cpu_info {
179 	/*
180 	 * Primary Inter-processor message area.  Keep this aligned
181 	 * to a cache line boundary if possible, as the structure
182 	 * itself is one or less (32/64 byte) cache-line.
183 	 */
184 	struct xpmsg	msg __aligned(64);
185 
186 	/* Scheduler flags */
187 	int	ci_want_ast;
188 	int	ci_want_resched;
189 
190 	/*
191 	 * SPARC cpu_info structures live at two VAs: one global
192 	 * VA (so each CPU can access any other CPU's cpu_info)
193 	 * and an alias VA CPUINFO_VA which is the same on each
194 	 * CPU and maps to that CPU's cpu_info.  Since the alias
195 	 * CPUINFO_VA is how we locate our cpu_info, we have to
196 	 * self-reference the global VA so that we can return it
197 	 * in the curcpu() macro.
198 	 */
199 	struct cpu_info * volatile ci_self;
200 
201 	int		ci_cpuid;	/* CPU index (see cpus[] array) */
202 
203 	/* Context administration */
204 	int		*ctx_tbl;	/* [4m] SRMMU-edible context table */
205 	paddr_t		ctx_tbl_pa;	/* [4m] ctx table physical address */
206 
207 	/* Cache information */
208 	struct cacheinfo	cacheinfo;	/* see above */
209 
210 	/* various flags to workaround anomalies in chips */
211 	volatile int	flags;		/* see CPUFLG_xxx, below */
212 
213 	/* Per processor counter register (sun4m only) */
214 	volatile struct counter_4m	*counterreg_4m;
215 
216 	/* Per processor interrupt mask register (sun4m only) */
217 	volatile struct icr_pi	*intreg_4m;
218 	/*
219 	 * Send a IPI to (cpi).  For Ross cpus we need to read
220 	 * the pending register to avoid a hardware bug.
221 	 */
222 #define raise_ipi(cpi,lvl)	do {			\
223 	volatile int x;						\
224 	(cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl);	\
225 	x = (cpi)->intreg_4m->pi_pend; __USE(x);	\
226 } while (0)
227 
228 	int		sun4_mmu3l;	/* [4]: 3-level MMU present */
229 #if defined(SUN4_MMU3L)
230 #define HASSUN4_MMU3L	(cpuinfo.sun4_mmu3l)
231 #else
232 #define HASSUN4_MMU3L	(0)
233 #endif
234 	int		ci_idepth;		/* Interrupt depth */
235 
236 	/*
237 	 * The following pointers point to processes that are somehow
238 	 * associated with this CPU--running on it, using its FPU,
239 	 * etc.
240 	 */
241 	struct	lwp	*ci_curlwp;		/* CPU owner */
242 	struct	lwp	*ci_onproc;		/* current user LWP / kthread */
243 	struct	lwp 	*fplwp;			/* FPU owner */
244 
245 	int		ci_mtx_count;
246 	int		ci_mtx_oldspl;
247 
248 	/*
249 	 * Idle PCB and Interrupt stack;
250 	 */
251 	void		*eintstack;		/* End of interrupt stack */
252 #define INT_STACK_SIZE	(128 * 128)		/* 128 128-byte stack frames */
253 	void		*redzone;		/* DEBUG: stack red zone */
254 #define REDSIZE		(8*96)			/* some room for bouncing */
255 
256 	struct	pcb	*curpcb;		/* CPU's PCB & kernel stack */
257 
258 	/* locore defined: */
259 	void	(*get_syncflt)(void);		/* Not C-callable */
260 	int	(*get_asyncflt)(u_int *, u_int *);
261 
262 	/* Synchronous Fault Status; temporary storage */
263 	struct {
264 		int	sfsr;
265 		int	sfva;
266 	} syncfltdump;
267 
268 	/*
269 	 * Cache handling functions.
270 	 * Most cache flush function come in two flavours: one that
271 	 * acts only on the CPU it executes on, and another that
272 	 * uses inter-processor signals to flush the cache on
273 	 * all processor modules.
274 	 * The `ft_' versions are fast trap cache flush handlers.
275 	 */
276 	void	(*cache_flush)(void *, u_int);
277 	void	(*vcache_flush_page)(int, int);
278 	void	(*sp_vcache_flush_page)(int, int);
279 	void	(*ft_vcache_flush_page)(int, int);
280 	void	(*vcache_flush_segment)(int, int, int);
281 	void	(*sp_vcache_flush_segment)(int, int, int);
282 	void	(*ft_vcache_flush_segment)(int, int, int);
283 	void	(*vcache_flush_region)(int, int);
284 	void	(*sp_vcache_flush_region)(int, int);
285 	void	(*ft_vcache_flush_region)(int, int);
286 	void	(*vcache_flush_context)(int);
287 	void	(*sp_vcache_flush_context)(int);
288 	void	(*ft_vcache_flush_context)(int);
289 
290 	/* The are helpers for (*cache_flush)() */
291 	void	(*sp_vcache_flush_range)(int, int, int);
292 	void	(*ft_vcache_flush_range)(int, int, int);
293 
294 	void	(*pcache_flush_page)(paddr_t, int);
295 	void	(*pure_vcache_flush)(void);
296 	void	(*cache_flush_all)(void);
297 
298 	/* Support for hardware-assisted page clear/copy */
299 	void	(*zero_page)(paddr_t);
300 	void	(*copy_page)(paddr_t, paddr_t);
301 
302 	/* Virtual addresses for use in pmap copy_page/zero_page */
303 	void *	vpage[2];
304 	int	*vpage_pte[2];		/* pte location of vpage[] */
305 
306 	void	(*cache_enable)(void);
307 
308 	int	cpu_type;	/* Type: see CPUTYP_xxx below */
309 
310 	/* Inter-processor message area (high priority but used infrequently) */
311 	struct xpmsg	msg_lev15;
312 
313 	/* CPU information */
314 	int		node;		/* PROM node for this CPU */
315 	int		mid;		/* Module ID for MP systems */
316 	int		mbus;		/* 1 if CPU is on MBus */
317 	int		mxcc;		/* 1 if a MBus-level MXCC is present */
318 	const char	*cpu_longname;	/* CPU model */
319 	int		cpu_impl;	/* CPU implementation code */
320 	int		cpu_vers;	/* CPU version code */
321 	int		mmu_impl;	/* MMU implementation code */
322 	int		mmu_vers;	/* MMU version code */
323 	int		master;		/* 1 if this is bootup CPU */
324 
325 	vaddr_t		mailbox;	/* VA of CPU's mailbox */
326 
327 	int		mmu_ncontext;	/* Number of contexts supported */
328 	int		mmu_nregion; 	/* Number of regions supported */
329 	int		mmu_nsegment;	/* [4/4c] Segments */
330 	int		mmu_npmeg;	/* [4/4c] Pmegs */
331 
332 /* XXX - we currently don't actually use the following */
333 	int		arch;		/* Architecture: CPU_SUN4x */
334 	int		class;		/* Class: SuperSPARC, microSPARC... */
335 	int		classlvl;	/* Iteration in class: 1, 2, etc. */
336 	int		classsublvl;	/* stepping in class (version) */
337 
338 	int		hz;		/* Clock speed */
339 
340 	/* FPU information */
341 	int		fpupresent;	/* true if FPU is present */
342 	int		fpuvers;	/* FPU revision */
343 	const char	*fpu_name;	/* FPU model */
344 	char		fpu_namebuf[32];/* Buffer for FPU name, if necessary */
345 
346 	/* XXX */
347 	volatile void	*ci_ddb_regs;		/* DDB regs */
348 
349 	/*
350 	 * The following are function pointers to do interesting CPU-dependent
351 	 * things without having to do type-tests all the time
352 	 */
353 
354 	/* bootup things: access to physical memory */
355 	u_int	(*read_physmem)(u_int addr, int space);
356 	void	(*write_physmem)(u_int addr, u_int data);
357 	void	(*cache_tablewalks)(void);
358 	void	(*mmu_enable)(void);
359 	void	(*hotfix)(struct cpu_info *);
360 
361 
362 #if 0
363 	/* hardware-assisted block operation routines */
364 	void		(*hwbcopy)(const void *from, void *to, size_t len);
365 	void		(*hwbzero)(void *buf, size_t len);
366 
367 	/* routine to clear mbus-sbus buffers */
368 	void		(*mbusflush)(void);
369 #endif
370 
371 	/*
372 	 * Memory error handler; parity errors, unhandled NMIs and other
373 	 * unrecoverable faults end up here.
374 	 */
375 	void		(*memerr)(unsigned, u_int, u_int, struct trapframe *);
376 	void		(*idlespin)(void);
377 	/* Module Control Registers */
378 	/*bus_space_handle_t*/ long ci_mbusport;
379 	/*bus_space_handle_t*/ long ci_mxccregs;
380 
381 	u_int	ci_tt;			/* Last trap (if tracing) */
382 
383 	/*
384 	 * Start/End VA's of this cpu_info region; we upload the other pages
385 	 * in this region that aren't part of the cpu_info to uvm.
386 	 */
387 	vaddr_t	ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2;
388 
389 	struct evcnt ci_savefpstate;
390 	struct evcnt ci_savefpstate_null;
391 	struct evcnt ci_xpmsg_mutex_fail;
392 	struct evcnt ci_xpmsg_mutex_fail_call;
393 	struct evcnt ci_xpmsg_mutex_not_held;
394 	struct evcnt ci_xpmsg_bogus;
395 	struct evcnt ci_intrcnt[16];
396 	struct evcnt ci_sintrcnt[16];
397 
398 	struct cpu_data ci_data;	/* MI per-cpu data */
399 
400 #if defined(GPROF) && defined(MULTIPROCESSOR)
401 	struct gmonparam *ci_gmon;	/* MI per-cpu GPROF */
402 #endif
403 };
404 
405 #endif /* _KERNEL || _KMEMUSER */
406 
407 /* Kernel only things. */
408 #if defined(_KERNEL)
409 
410 /*
411  * definitions of cpu-dependent requirements
412  * referenced in generic code
413  */
414 #define	cpuinfo			(*(struct cpu_info *)CPUINFO_VA)
415 #define	curcpu()		(cpuinfo.ci_self)
416 #define	curlwp			(cpuinfo.ci_curlwp)
417 #define	CPU_IS_PRIMARY(ci)	((ci)->master)
418 
419 #define	cpu_number()		(cpuinfo.ci_cpuid)
420 
421 void	cpu_proc_fork(struct proc *, struct proc *);
422 
423 #if defined(MULTIPROCESSOR)
424 void	cpu_boot_secondary_processors(void);
425 #endif
426 
427 /*
428  * Arguments to hardclock, softclock and statclock encapsulate the
429  * previous machine state in an opaque clockframe.  The ipl is here
430  * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
431  * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
432  */
433 struct clockframe {
434 	u_int	psr;		/* psr before interrupt, excluding PSR_ET */
435 	u_int	pc;		/* pc at interrupt */
436 	u_int	npc;		/* npc at interrupt */
437 	u_int	ipl;		/* actual interrupt priority level */
438 	u_int	fp;		/* %fp at interrupt */
439 };
440 typedef struct clockframe clockframe;
441 
442 extern int eintstack[];
443 
444 #define	CLKF_USERMODE(framep)	(((framep)->psr & PSR_PS) == 0)
445 #define	CLKF_LOPRI(framep,n)	(((framep)->psr & PSR_PIL) < (n) << 8)
446 #define	CLKF_PC(framep)		((framep)->pc)
447 #if defined(MULTIPROCESSOR)
448 #define	CLKF_INTR(framep)						\
449 	((framep)->fp > (u_int)cpuinfo.eintstack - INT_STACK_SIZE &&	\
450 	 (framep)->fp < (u_int)cpuinfo.eintstack)
451 #else
452 #define	CLKF_INTR(framep)	((framep)->fp < (u_int)eintstack)
453 #endif
454 
455 void	sparc_softintr_init(void);
456 
457 /*
458  * Preempt the current process on the target CPU if in interrupt from
459  * user mode, or after the current trap/syscall if in system mode.
460  */
461 #define cpu_need_resched(ci, l, flags) do {				\
462 	__USE(flags);							\
463 	(ci)->ci_want_ast = 1;						\
464 									\
465 	/* Just interrupt the target CPU, so it can notice its AST */	\
466 	if ((flags & RESCHED_REMOTE) != 0)				\
467 		XCALL0(sparc_noop, 1U << (ci)->ci_cpuid);		\
468 } while (/*CONSTCOND*/0)
469 
470 /*
471  * Give a profiling tick to the current process when the user profiling
472  * buffer pages are invalid.  On the sparc, request an ast to send us
473  * through trap(), marking the proc as needing a profiling tick.
474  */
475 #define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, cpuinfo.ci_want_ast = 1)
476 
477 /*
478  * Notify the current process (p) that it has a signal pending,
479  * process as soon as possible.
480  */
481 #define cpu_signotify(l) do {						\
482 	(l)->l_cpu->ci_want_ast = 1;					\
483 									\
484 	/* Just interrupt the target CPU, so it can notice its AST */	\
485 	if ((l)->l_cpu->ci_cpuid != cpu_number())			\
486 		XCALL0(sparc_noop, 1U << (l)->l_cpu->ci_cpuid);		\
487 } while (/*CONSTCOND*/0)
488 
489 /* CPU architecture version */
490 extern int cpu_arch;
491 
492 /* Number of CPUs in the system */
493 extern int sparc_ncpus;
494 
495 /* Provide %pc of a lwp */
496 #define LWP_PC(l)       ((l)->l_md.md_tf->tf_pc)
497 
498 /* Hardware cross-call mutex */
499 extern kmutex_t xpmsg_mutex;
500 
501 /*
502  * Interrupt handler chains.  Interrupt handlers should return 0 for
503  * ``not me'' or 1 (``I took care of it'').  intr_establish() inserts a
504  * handler into the list.  The handler is called with its (single)
505  * argument, or with a pointer to a clockframe if ih_arg is NULL.
506  *
507  * realfun/realarg are used to chain callers, usually with the
508  * biglock wrapper.
509  */
510 extern struct intrhand {
511 	int	(*ih_fun)(void *);
512 	void	*ih_arg;
513 	struct	intrhand *ih_next;
514 	int	ih_classipl;
515 	int	(*ih_realfun)(void *);
516 	void	*ih_realarg;
517 } *intrhand[15];
518 
519 void	intr_establish(int, int, struct intrhand *, void (*)(void), bool);
520 void	intr_disestablish(int, struct intrhand *);
521 
522 void	intr_lock_kernel(void);
523 void	intr_unlock_kernel(void);
524 
525 /* disksubr.c */
526 struct dkbad;
527 int isbad(struct dkbad *, int, int, int);
528 
529 /* machdep.c */
530 int	ldcontrolb(void *);
531 void *	reserve_dumppages(void *);
532 void	wcopy(const void *, void *, u_int);
533 void	wzero(void *, u_int);
534 
535 /* clock.c */
536 struct timeval;
537 void	lo_microtime(struct timeval *);
538 void	schedintr(void *);
539 
540 /* locore.s */
541 struct fpstate;
542 void	ipi_savefpstate(struct fpstate *);
543 void	savefpstate(struct fpstate *);
544 void	loadfpstate(struct fpstate *);
545 int	probeget(void *, int);
546 void	write_all_windows(void);
547 void	write_user_windows(void);
548 void 	lwp_trampoline(void);
549 struct pcb;
550 void	snapshot(struct pcb *);
551 struct frame *getfp(void);
552 int	xldcontrolb(void *, struct pcb *);
553 void	copywords(const void *, void *, size_t);
554 void	qcopy(const void *, void *, size_t);
555 void	qzero(void *, size_t);
556 
557 /* trap.c */
558 void	cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
559 int	rwindow_save(struct lwp *);
560 
561 /* cons.c */
562 int	cnrom(void);
563 
564 /* zs.c */
565 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
566 #ifdef KGDB
567 void zs_kgdb_init(void);
568 #endif
569 
570 /* fb.c */
571 void	fb_unblank(void);
572 
573 /* kgdb_stub.c */
574 #ifdef KGDB
575 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
576 void kgdb_connect(int);
577 void kgdb_panic(void);
578 #endif
579 
580 /* emul.c */
581 struct trapframe;
582 int fixalign(struct lwp *, struct trapframe *, void **);
583 int emulinstr(int, struct trapframe *);
584 
585 /* cpu.c */
586 void mp_pause_cpus(void);
587 void mp_resume_cpus(void);
588 void mp_halt_cpus(void);
589 #ifdef DDB
590 void mp_pause_cpus_ddb(void);
591 void mp_resume_cpus_ddb(void);
592 #endif
593 
594 /* intr.c */
595 u_int setitr(u_int);
596 u_int getitr(void);
597 
598 
599 /*
600  *
601  * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
602  * of the trap vector table.  The next eight bits are supplied by the
603  * hardware when the trap occurs, and the bottom four bits are always
604  * zero (so that we can shove up to 16 bytes of executable code---exactly
605  * four instructions---into each trap vector).
606  *
607  * The hardware allocates half the trap vectors to hardware and half to
608  * software.
609  *
610  * Traps have priorities assigned (lower number => higher priority).
611  */
612 
613 struct trapvec {
614 	int	tv_instr[4];		/* the four instructions */
615 };
616 
617 extern struct trapvec *trapbase;	/* the 256 vectors */
618 
619 #endif /* _KERNEL */
620 #endif /* _CPU_H_ */
621