xref: /netbsd-src/sys/arch/sparc64/include/cpu.h (revision 5e4c038a45edbc7d63b7c2daa76e29f88b64a4e3)
1 /*	$NetBSD: cpu.h,v 1.31 2002/05/14 21:21:45 eeh Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. All advertising materials mentioning features or use of this software
25  *    must display the following acknowledgement:
26  *	This product includes software developed by the University of
27  *	California, Berkeley and its contributors.
28  * 4. Neither the name of the University nor the names of its contributors
29  *    may be used to endorse or promote products derived from this software
30  *    without specific prior written permission.
31  *
32  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
33  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
34  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
35  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
36  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
40  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
41  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
42  * SUCH DAMAGE.
43  *
44  *	@(#)cpu.h	8.4 (Berkeley) 1/5/94
45  */
46 
47 #ifndef _CPU_H_
48 #define _CPU_H_
49 
50 /*
51  * CTL_MACHDEP definitions.
52  */
53 #define	CPU_BOOTED_KERNEL	1	/* string: booted kernel name */
54 #define	CPU_MAXID		2	/* number of valid machdep ids */
55 
56 #define	CTL_MACHDEP_NAMES {			\
57 	{ 0, 0 },				\
58 	{ "booted_kernel", CTLTYPE_STRING },	\
59 }
60 
61 #ifdef _KERNEL
62 /*
63  * Exported definitions unique to SPARC cpu support.
64  */
65 
66 #if !defined(_LKM)
67 #include "opt_multiprocessor.h"
68 #include "opt_lockdebug.h"
69 #endif
70 
71 #include <machine/psl.h>
72 #include <machine/reg.h>
73 #include <machine/intr.h>
74 #include <sparc64/sparc64/intreg.h>
75 
76 #include <sys/sched.h>
77 /*
78  * The cpu_info structure is part of a 64KB structure mapped both the kernel
79  * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
80  * Each processor's cpu_info is accessible at CPUINFO_VA only for that
81  * processor.  Other processors can access that through an additional mapping
82  * in the kernel pmap.
83  *
84  * The 64KB page contains:
85  *
86  * cpu_info
87  * interrupt stack (all remaining space)
88  * idle PCB
89  * idle stack (STACKSPACE - sizeof(PCB))
90  * 32KB TSB
91  */
92 
93 struct cpu_info {
94 	/* Most important fields first */
95 	struct proc		*ci_curproc;
96 	struct pcb		*ci_cpcb;	/* also initial stack */
97 	struct cpu_info		*ci_next;
98 
99 	struct proc		*ci_fpproc;
100 	int			ci_number;
101 	int			ci_upaid;
102 	struct schedstate_percpu ci_schedstate; /* scheduler state */
103 
104 	/* DEBUG/DIAGNOSTIC stuff */
105 	u_long			ci_spin_locks;	/* # of spin locks held */
106 	u_long			ci_simple_locks;/* # of simple locks held */
107 
108 	/* Spinning up the CPU */
109 	void			(*ci_spinup) __P((void)); /* spinup routine */
110 	void			*ci_initstack;
111 	paddr_t			ci_paddr;	/* Phys addr of this structure. */
112 };
113 
114 extern struct cpu_info *cpus;
115 extern struct cpu_info cpu_info_store;
116 
117 #if 1
118 #define	curcpu()	(&cpu_info_store)
119 #else
120 #define	curcpu()	((struct cpu_info *)CPUINFO_VA)
121 #endif
122 
123 /*
124  * definitions of cpu-dependent requirements
125  * referenced in generic code
126  */
127 #define	cpu_swapin(p)	/* nothing */
128 #define	cpu_swapout(p)	/* nothing */
129 #define	cpu_wait(p)	/* nothing */
130 #if 1
131 #define cpu_number()	0
132 #else
133 #define	cpu_number()	(curcpu()->ci_number)
134 #endif
135 
136 /*
137  * Arguments to hardclock, softclock and gatherstats encapsulate the
138  * previous machine state in an opaque clockframe.  The ipl is here
139  * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
140  * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
141  */
142 extern int intstack[];
143 extern int eintstack[];
144 struct clockframe {
145 	struct trapframe64 t;
146 };
147 
148 #define	CLKF_USERMODE(framep)	(((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
149 /*
150  * XXX Disable CLKF_BASEPRI() for now.  If we use a counter-timer for
151  * the clock, the interrupt remains blocked until the interrupt handler
152  * returns and we write to the clear interrupt register.  If we use
153  * %tick for the clock, we could get multiple interrupts, but the
154  * currently enabled INTR_INTERLOCK will prevent the interrupt from being
155  * posted twice anyway.
156  *
157  * Switching to %tick for all machines and disabling INTR_INTERLOCK
158  * in locore.s would allow us to take advantage of CLKF_BASEPRI().
159  */
160 #if 0
161 #define	CLKF_BASEPRI(framep)	(((framep)->t.tf_oldpil) == 0)
162 #else
163 #define	CLKF_BASEPRI(framep)	(0)
164 #endif
165 #define	CLKF_PC(framep)		((framep)->t.tf_pc)
166 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
167 #define	CLKF_INTR(framep)						\
168 	((!CLKF_USERMODE(framep))&&					\
169 		(((framep)->t.tf_out[6] & 1 ) ?				\
170 			(((vaddr_t)(framep)->t.tf_out[6] <		\
171 				(vaddr_t)EINTSTACK-0x7ff) &&		\
172 			((vaddr_t)(framep)->t.tf_out[6] >		\
173 				(vaddr_t)INTSTACK-0x7ff)) :		\
174 			(((vaddr_t)(framep)->t.tf_out[6] <		\
175 				(vaddr_t)EINTSTACK) &&			\
176 			((vaddr_t)(framep)->t.tf_out[6] >		\
177 				(vaddr_t)INTSTACK))))
178 
179 /*
180  * Software interrupt request `register'.
181  */
182 #ifdef DEPRECATED
183 union sir {
184 	int	sir_any;
185 	char	sir_which[4];
186 } sir;
187 
188 #define SIR_NET		0
189 #define SIR_CLOCK	1
190 #endif
191 
192 extern struct intrhand soft01intr, soft01net, soft01clock;
193 
194 #if 0
195 #define setsoftint()	send_softint(-1, IPL_SOFTINT, &soft01intr)
196 #define setsoftnet()	send_softint(-1, IPL_SOFTNET, &soft01net)
197 #else
198 void setsoftint __P((void));
199 void setsoftnet __P((void));
200 #endif
201 
202 int	want_ast;
203 
204 /*
205  * Preempt the current process if in interrupt from user mode,
206  * or after the current trap/syscall if in system mode.
207  */
208 int	want_resched;		/* resched() was called */
209 #define	need_resched(ci)	(want_resched = 1, want_ast = 1)
210 
211 /*
212  * Give a profiling tick to the current process when the user profiling
213  * buffer pages are invalid.  On the sparc, request an ast to send us
214  * through trap(), marking the proc as needing a profiling tick.
215  */
216 #define	need_proftick(p)	((p)->p_flag |= P_OWEUPC, want_ast = 1)
217 
218 /*
219  * Notify the current process (p) that it has a signal pending,
220  * process as soon as possible.
221  */
222 #define	signotify(p)		(want_ast = 1)
223 
224 /*
225  * Only one process may own the FPU state.
226  *
227  * XXX this must be per-cpu (eventually)
228  */
229 struct	proc *fpproc;		/* FPU owner */
230 int	foundfpu;		/* true => we have an FPU */
231 
232 /*
233  * Interrupt handler chains.  Interrupt handlers should return 0 for
234  * ``not me'' or 1 (``I took care of it'').  intr_establish() inserts a
235  * handler into the list.  The handler is called with its (single)
236  * argument, or with a pointer to a clockframe if ih_arg is NULL.
237  */
238 struct intrhand {
239 	int			(*ih_fun) __P((void *));
240 	void			*ih_arg;
241 	short			ih_number;	/* interrupt number */
242 						/* the H/W provides */
243 	char			ih_pil;		/* interrupt priority */
244 	struct intrhand		*ih_next;	/* global list */
245 	struct intrhand		*ih_pending;	/* interrupt queued */
246 	volatile u_int64_t	*ih_map;	/* Interrupt map reg */
247 	volatile u_int64_t	*ih_clr;	/* clear interrupt reg */
248 };
249 extern struct intrhand *intrhand[];
250 extern struct intrhand *intrlev[MAXINTNUM];
251 
252 void	intr_establish __P((int level, struct intrhand *));
253 
254 /* cpu.c */
255 paddr_t cpu_alloc __P((void));
256 u_int64_t cpu_init __P((paddr_t, int));
257 /* disksubr.c */
258 struct dkbad;
259 int isbad __P((struct dkbad *bt, int, int, int));
260 /* machdep.c */
261 int	ldcontrolb __P((caddr_t));
262 void	dumpconf __P((void));
263 caddr_t	reserve_dumppages __P((caddr_t));
264 /* clock.c */
265 struct timeval;
266 int	tickintr __P((void *)); /* level 10 (tick) interrupt code */
267 int	clockintr __P((void *));/* level 10 (clock) interrupt code */
268 int	statintr __P((void *));	/* level 14 (statclock) interrupt code */
269 /* locore.s */
270 struct fpstate64;
271 void	savefpstate __P((struct fpstate64 *));
272 void	loadfpstate __P((struct fpstate64 *));
273 u_int64_t	probeget __P((paddr_t, int, int));
274 int	probeset __P((paddr_t, int, int, u_int64_t));
275 #if 0
276 void	write_all_windows __P((void));
277 void	write_user_windows __P((void));
278 #else
279 #define	 write_all_windows() __asm __volatile("flushw" : : )
280 #define	 write_user_windows() __asm __volatile("flushw" : : )
281 #endif
282 void 	proc_trampoline __P((void));
283 struct pcb;
284 void	snapshot __P((struct pcb *));
285 struct frame *getfp __P((void));
286 int	xldcontrolb __P((caddr_t, struct pcb *));
287 void	copywords __P((const void *, void *, size_t));
288 void	qcopy __P((const void *, void *, size_t));
289 void	qzero __P((void *, size_t));
290 void	switchtoctx __P((int));
291 /* locore2.c */
292 void	remrq __P((struct proc *));
293 /* trap.c */
294 void	kill_user_windows __P((struct proc *));
295 int	rwindow_save __P((struct proc *));
296 /* amd7930intr.s */
297 void	amd7930_trap __P((void));
298 /* cons.c */
299 int	cnrom __P((void));
300 /* zs.c */
301 void zsconsole __P((struct tty *, int, int, void (**)(struct tty *, int)));
302 #ifdef KGDB
303 void zs_kgdb_init __P((void));
304 #endif
305 /* fb.c */
306 void	fb_unblank __P((void));
307 /* kgdb_stub.c */
308 #ifdef KGDB
309 void kgdb_attach __P((int (*)(void *), void (*)(void *, int), void *));
310 void kgdb_connect __P((int));
311 void kgdb_panic __P((void));
312 #endif
313 /* emul.c */
314 int	fixalign __P((struct proc *, struct trapframe64 *));
315 int	emulinstr __P((vaddr_t, struct trapframe64 *));
316 
317 /*
318  *
319  * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
320  * of the trap vector table.  The next eight bits are supplied by the
321  * hardware when the trap occurs, and the bottom four bits are always
322  * zero (so that we can shove up to 16 bytes of executable code---exactly
323  * four instructions---into each trap vector).
324  *
325  * The hardware allocates half the trap vectors to hardware and half to
326  * software.
327  *
328  * Traps have priorities assigned (lower number => higher priority).
329  */
330 
331 struct trapvec {
332 	int	tv_instr[8];		/* the eight instructions */
333 };
334 extern struct trapvec *trapbase;	/* the 256 vectors */
335 
336 extern void wzero __P((void *, u_int));
337 extern void wcopy __P((const void *, void *, u_int));
338 
339 #endif /* _KERNEL */
340 #endif /* _CPU_H_ */
341