xref: /netbsd-src/sys/arch/sparc64/include/cpu.h (revision 1b9578b8c2c1f848eeb16dabbfd7d1f0d9fdefbd)
1 /*	$NetBSD: cpu.h,v 1.96 2011/04/13 03:40:00 mrg Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)cpu.h	8.4 (Berkeley) 1/5/94
41  */
42 
43 #ifndef _CPU_H_
44 #define _CPU_H_
45 
46 /*
47  * CTL_MACHDEP definitions.
48  */
49 #define	CPU_BOOTED_KERNEL	1	/* string: booted kernel name */
50 #define	CPU_BOOTED_DEVICE	2	/* string: device booted from */
51 #define	CPU_BOOT_ARGS		3	/* string: args booted with */
52 #define	CPU_ARCH		4	/* integer: cpu architecture version */
53 #define	CPU_MAXID		5	/* number of valid machdep ids */
54 
55 #if defined(_KERNEL) || defined(_KMEMUSER)
56 /*
57  * Exported definitions unique to SPARC cpu support.
58  */
59 
60 #if defined(_KERNEL_OPT)
61 #include "opt_multiprocessor.h"
62 #include "opt_lockdebug.h"
63 #endif
64 
65 #include <machine/psl.h>
66 #include <machine/reg.h>
67 #include <machine/pte.h>
68 #include <machine/intr.h>
69 #if defined(_KERNEL)
70 #include <machine/cpuset.h>
71 #include <sparc64/sparc64/intreg.h>
72 #endif
73 
74 #include <sys/cpu_data.h>
75 #include <sys/evcnt.h>
76 
77 /*
78  * The cpu_info structure is part of a 64KB structure mapped both the kernel
79  * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
80  * Each processor's cpu_info is accessible at CPUINFO_VA only for that
81  * processor.  Other processors can access that through an additional mapping
82  * in the kernel pmap.
83  *
84  * The 64KB page contains:
85  *
86  * cpu_info
87  * interrupt stack (all remaining space)
88  * idle PCB
89  * idle stack (STACKSPACE - sizeof(PCB))
90  * 32KB TSB
91  */
92 
93 struct cpu_info {
94 	struct cpu_data		ci_data;	/* MI per-cpu data */
95 
96 
97 	/*
98 	 * SPARC cpu_info structures live at two VAs: one global
99 	 * VA (so each CPU can access any other CPU's cpu_info)
100 	 * and an alias VA CPUINFO_VA which is the same on each
101 	 * CPU and maps to that CPU's cpu_info.  Since the alias
102 	 * CPUINFO_VA is how we locate our cpu_info, we have to
103 	 * self-reference the global VA so that we can return it
104 	 * in the curcpu() macro.
105 	 */
106 	struct cpu_info * volatile ci_self;
107 
108 	/* Most important fields first */
109 	struct lwp		*ci_curlwp;
110 	struct pcb		*ci_cpcb;
111 	struct cpu_info		*ci_next;
112 
113 	struct lwp		*ci_fplwp;
114 
115 	void			*ci_eintstack;
116 
117 	int			ci_mtx_count;
118 	int			ci_mtx_oldspl;
119 
120 	/* Spinning up the CPU */
121 	void			(*ci_spinup)(void);
122 	paddr_t			ci_paddr;
123 
124 	int			ci_cpuid;
125 
126 	/* CPU PROM information. */
127 	u_int			ci_node;
128 
129 	/* %tick and cpu frequency information */
130 	u_long			ci_tick_increment;
131 	uint64_t		ci_cpu_clockrate[2];
132 
133 	/* Interrupts */
134 	struct intrhand		*ci_intrpending[16];
135 	struct intrhand		*ci_tick_ih;
136 
137 	/* Event counters */
138 	struct evcnt		ci_tick_evcnt;
139 
140 	/* This could be under MULTIPROCESSOR, but there's no good reason */
141 	struct evcnt		ci_ipi_evcnt[IPI_EVCNT_NUM];
142 
143 	int			ci_flags;
144 	int			ci_want_ast;
145 	int			ci_want_resched;
146 	int			ci_idepth;
147 
148 /*
149  * A context is simply a small number that differentiates multiple mappings
150  * of the same address.  Contexts on the spitfire are 13 bits, but could
151  * be as large as 17 bits.
152  *
153  * Each context is either free or attached to a pmap.
154  *
155  * The context table is an array of pointers to psegs.  Just dereference
156  * the right pointer and you get to the pmap segment tables.  These are
157  * physical addresses, of course.
158  *
159  * ci_ctx_lock protects this CPUs context allocation/free.
160  * These are all allocated almost with in the same cacheline.
161  */
162 	kmutex_t		ci_ctx_lock;
163 	int			ci_pmap_next_ctx;
164 	int			ci_numctx;
165 	paddr_t 		*ci_ctxbusy;
166 	LIST_HEAD(, pmap) 	ci_pmap_ctxlist;
167 
168 	/*
169 	 * The TSBs are per cpu too (since MMU context differs between
170 	 * cpus). These are just caches for the TLBs.
171 	 */
172 	pte_t			*ci_tsb_dmmu;
173 	pte_t			*ci_tsb_immu;
174 
175 	volatile void		*ci_ddb_regs;	/* DDB regs */
176 };
177 
178 #endif /* _KERNEL || _KMEMUSER */
179 
180 #ifdef _KERNEL
181 
182 #define CPUF_PRIMARY	1
183 
184 /*
185  * CPU boot arguments. Used by secondary CPUs at the bootstrap time.
186  */
187 struct cpu_bootargs {
188 	u_int	cb_node;	/* PROM CPU node */
189 	volatile int cb_flags;
190 
191 	vaddr_t cb_ktext;
192 	paddr_t cb_ktextp;
193 	vaddr_t cb_ektext;
194 
195 	vaddr_t cb_kdata;
196 	paddr_t cb_kdatap;
197 	vaddr_t cb_ekdata;
198 
199 	paddr_t	cb_cpuinfo;
200 };
201 
202 extern struct cpu_bootargs *cpu_args;
203 
204 #if defined(MULTIPROCESSOR)
205 extern int sparc_ncpus;
206 #else
207 #define sparc_ncpus 1
208 #endif
209 
210 extern struct cpu_info *cpus;
211 extern struct pool_cache *fpstate_cache;
212 
213 #define	curcpu()	(((struct cpu_info *)CPUINFO_VA)->ci_self)
214 #define	cpu_number()	(curcpu()->ci_index)
215 #define	CPU_IS_PRIMARY(ci)	((ci)->ci_flags & CPUF_PRIMARY)
216 
217 #define CPU_INFO_ITERATOR		int
218 #define CPU_INFO_FOREACH(cii, ci)	cii = 0, ci = cpus; ci != NULL; \
219 					ci = ci->ci_next
220 
221 #define curlwp		curcpu()->ci_curlwp
222 #define fplwp		curcpu()->ci_fplwp
223 #define curpcb		curcpu()->ci_cpcb
224 
225 #define want_ast	curcpu()->ci_want_ast
226 #define want_resched	curcpu()->ci_want_resched
227 
228 /*
229  * definitions of cpu-dependent requirements
230  * referenced in generic code
231  */
232 #define	cpu_wait(p)	/* nothing */
233 void cpu_proc_fork(struct proc *, struct proc *);
234 
235 /* run on the cpu itself */
236 void	cpu_pmap_init(struct cpu_info *);
237 /* run upfront to prepare the cpu_info */
238 void	cpu_pmap_prepare(struct cpu_info *, bool);
239 
240 #if defined(MULTIPROCESSOR)
241 extern vaddr_t cpu_spinup_trampoline;
242 
243 extern  char   *mp_tramp_code;
244 extern  u_long  mp_tramp_code_len;
245 extern  u_long  mp_tramp_tlb_slots;
246 extern  u_long  mp_tramp_func;
247 extern  u_long  mp_tramp_ci;
248 
249 void	cpu_hatch(void);
250 void	cpu_boot_secondary_processors(void);
251 
252 /*
253  * Call a function on other cpus:
254  *	multicast - send to everyone in the sparc64_cpuset_t
255  *	broadcast - send to to all cpus but ourselves
256  *	send - send to just this cpu
257  * The called function do not follow the C ABI, so need to be coded in
258  * assembler.
259  */
260 typedef void (* ipifunc_t)(void *, void *);
261 
262 void	sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t);
263 void	sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t);
264 void	sparc64_send_ipi(int, ipifunc_t, uint64_t, uint64_t);
265 
266 /*
267  * Call an arbitrary C function on another cpu (or all others but ourself)
268  */
269 typedef void (*ipi_c_call_func_t)(void*);
270 void	sparc64_generic_xcall(struct cpu_info*, ipi_c_call_func_t, void*);
271 
272 #endif
273 
274 /* Provide %pc of a lwp */
275 #define	LWP_PC(l)	((l)->l_md.md_tf->tf_pc)
276 
277 /*
278  * Arguments to hardclock, softclock and gatherstats encapsulate the
279  * previous machine state in an opaque clockframe.  The ipl is here
280  * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
281  * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
282  */
283 struct clockframe {
284 	struct trapframe64 t;
285 };
286 
287 #define	CLKF_USERMODE(framep)	(((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
288 #define	CLKF_PC(framep)		((framep)->t.tf_pc)
289 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
290 #define	CLKF_INTR(framep)						\
291 	((!CLKF_USERMODE(framep))&&					\
292 		(((framep)->t.tf_out[6] & 1 ) ?				\
293 			(((vaddr_t)(framep)->t.tf_out[6] <		\
294 				(vaddr_t)EINTSTACK-0x7ff) &&		\
295 			((vaddr_t)(framep)->t.tf_out[6] >		\
296 				(vaddr_t)INTSTACK-0x7ff)) :		\
297 			(((vaddr_t)(framep)->t.tf_out[6] <		\
298 				(vaddr_t)EINTSTACK) &&			\
299 			((vaddr_t)(framep)->t.tf_out[6] >		\
300 				(vaddr_t)INTSTACK))))
301 
302 /*
303  * Give a profiling tick to the current process when the user profiling
304  * buffer pages are invalid.  On the sparc, request an ast to send us
305  * through trap(), marking the proc as needing a profiling tick.
306  */
307 #define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, want_ast = 1)
308 
309 /*
310  * Notify an LWP that it has a signal pending, process as soon as possible.
311  */
312 void cpu_signotify(struct lwp *);
313 
314 /*
315  * Interrupt handler chains.  Interrupt handlers should return 0 for
316  * ``not me'' or 1 (``I took care of it'').  intr_establish() inserts a
317  * handler into the list.  The handler is called with its (single)
318  * argument, or with a pointer to a clockframe if ih_arg is NULL.
319  */
320 struct intrhand {
321 	int			(*ih_fun)(void *);
322 	void			*ih_arg;
323 	/* if we have to take the biglock, we interpose a wrapper
324 	 * and need to save the original function and arg */
325 	int			(*ih_realfun)(void *);
326 	void			*ih_realarg;
327 	short			ih_number;	/* interrupt number */
328 						/* the H/W provides */
329 	char			ih_pil;		/* interrupt priority */
330 	struct intrhand		*ih_next;	/* global list */
331 	struct intrhand		*ih_pending;	/* interrupt queued */
332 	volatile uint64_t	*ih_map;	/* Interrupt map reg */
333 	volatile uint64_t	*ih_clr;	/* clear interrupt reg */
334 };
335 extern struct intrhand *intrhand[];
336 extern struct intrhand *intrlev[MAXINTNUM];
337 
338 void	intr_establish(int level, bool mpsafe, struct intrhand *);
339 void	*sparc_softintr_establish(int, int (*)(void *), void *);
340 void	sparc_softintr_schedule(void *);
341 void	sparc_softintr_disestablish(void *);
342 
343 /* disksubr.c */
344 struct dkbad;
345 int isbad(struct dkbad *bt, int, int, int);
346 /* machdep.c */
347 void *	reserve_dumppages(void *);
348 /* clock.c */
349 struct timeval;
350 int	tickintr(void *);	/* level 10/14 (tick) interrupt code */
351 int	clockintr(void *);	/* level 10 (clock) interrupt code */
352 int	statintr(void *);	/* level 14 (statclock) interrupt code */
353 int	schedintr(void *);	/* level 10 (schedclock) interrupt code */
354 void	tickintr_establish(int, int (*)(void *));
355 /* locore.s */
356 struct fpstate64;
357 void	savefpstate(struct fpstate64 *);
358 void	loadfpstate(struct fpstate64 *);
359 void	clearfpstate(void);
360 uint64_t	probeget(paddr_t, int, int);
361 int	probeset(paddr_t, int, int, uint64_t);
362 
363 #define	 write_all_windows() __asm volatile("flushw" : : )
364 #define	 write_user_windows() __asm volatile("flushw" : : )
365 
366 struct pcb;
367 void	snapshot(struct pcb *);
368 struct frame *getfp(void);
369 void	switchtoctx_us(int);
370 void	switchtoctx_usiii(int);
371 void	next_tick(long);
372 /* trap.c */
373 void	kill_user_windows(struct lwp *);
374 int	rwindow_save(struct lwp *);
375 /* cons.c */
376 int	cnrom(void);
377 /* zs.c */
378 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
379 /* fb.c */
380 void	fb_unblank(void);
381 /* kgdb_stub.c */
382 #ifdef KGDB
383 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
384 void kgdb_connect(int);
385 void kgdb_panic(void);
386 #endif
387 /* emul.c */
388 int	fixalign(struct lwp *, struct trapframe64 *);
389 int	emulinstr(vaddr_t, struct trapframe64 *);
390 
391 #else /* _KERNEL */
392 
393 /*
394  * XXX: provide some definitions for crash(8), probably can share
395  */
396 #if defined(_KMEMUSER)
397 #define	curcpu()	(((struct cpu_info *)CPUINFO_VA)->ci_self)
398 #define curlwp		curcpu()->ci_curlwp
399 #endif
400 
401 #endif /* _KERNEL */
402 #endif /* _CPU_H_ */
403