xref: /netbsd-src/sys/arch/sparc/include/cpu.h (revision 87d689fb734c654d2486f87f7be32f1b53ecdbec)
1 /*	$NetBSD: cpu.h,v 1.99 2017/12/02 00:48:04 macallan Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)cpu.h	8.4 (Berkeley) 1/5/94
41  */
42 
43 #ifndef _CPU_H_
44 #define _CPU_H_
45 
46 /*
47  * CTL_MACHDEP definitions.
48  */
49 #define	CPU_BOOTED_KERNEL	1	/* string: booted kernel name */
50 #define	CPU_BOOTED_DEVICE	2	/* string: device booted from */
51 #define	CPU_BOOT_ARGS		3	/* string: args booted with */
52 #define	CPU_ARCH		4	/* integer: cpu architecture version */
53 #define	CPU_MAXID		5	/* number of valid machdep ids */
54 
55 /*
56  * Exported definitions unique to SPARC cpu support.
57  */
58 
59 /* Things needed by crash or the kernel */
60 #if defined(_KERNEL) || defined(_KMEMUSER)
61 
62 #if defined(_KERNEL_OPT)
63 #include "opt_multiprocessor.h"
64 #include "opt_lockdebug.h"
65 #include "opt_sparc_arch.h"
66 #endif
67 
68 #include <sys/cpu_data.h>
69 #include <sys/evcnt.h>
70 
71 #include <machine/intr.h>
72 #include <machine/psl.h>
73 
74 #if defined(_KERNEL)
75 #include <sparc/sparc/cpuvar.h>
76 #include <sparc/sparc/intreg.h>
77 #else
78 #include <arch/sparc/sparc/vaddrs.h>
79 #include <arch/sparc/sparc/cache.h>
80 #endif
81 
82 struct trapframe;
83 
84 /*
85  * Message structure for Inter Processor Communication in MP systems
86  */
87 struct xpmsg {
88 	volatile int tag;
89 #define	XPMSG15_PAUSECPU	1
90 #define	XPMSG_FUNC		4
91 #define	XPMSG_FTRP		5
92 
93 	volatile union {
94 		/*
95 		 * Cross call: ask to run (*func)(arg0,arg1,arg2)
96 		 * or (*trap)(arg0,arg1,arg2). `trap' should be the
97 		 * address of a `fast trap' handler that executes in
98 		 * the trap window (see locore.s).
99 		 */
100 		struct xpmsg_func {
101 			void	(*func)(int, int, int);
102 			void	(*trap)(int, int, int);
103 			int	arg0;
104 			int	arg1;
105 			int	arg2;
106 		} xpmsg_func;
107 	} u;
108 	volatile int	received;
109 	volatile int	complete;
110 };
111 
112 /*
113  * The cpuinfo structure. This structure maintains information about one
114  * currently installed CPU (there may be several of these if the machine
115  * supports multiple CPUs, as on some Sun4m architectures). The information
116  * in this structure supersedes the old "cpumod", "mmumod", and similar
117  * fields.
118  */
119 
120 struct cpu_info {
121 	struct cpu_data ci_data;	/* MI per-cpu data */
122 
123 	/*
124 	 * Primary Inter-processor message area.  Keep this aligned
125 	 * to a cache line boundary if possible, as the structure
126 	 * itself is one (normal 32 byte) cache-line.
127 	 */
128 	struct xpmsg	msg __aligned(32);
129 
130 	/* Scheduler flags */
131 	int	ci_want_ast;
132 	int	ci_want_resched;
133 
134 	/*
135 	 * SPARC cpu_info structures live at two VAs: one global
136 	 * VA (so each CPU can access any other CPU's cpu_info)
137 	 * and an alias VA CPUINFO_VA which is the same on each
138 	 * CPU and maps to that CPU's cpu_info.  Since the alias
139 	 * CPUINFO_VA is how we locate our cpu_info, we have to
140 	 * self-reference the global VA so that we can return it
141 	 * in the curcpu() macro.
142 	 */
143 	struct cpu_info * volatile ci_self;
144 
145 	int		ci_cpuid;	/* CPU index (see cpus[] array) */
146 
147 	/* Context administration */
148 	int		*ctx_tbl;	/* [4m] SRMMU-edible context table */
149 	paddr_t		ctx_tbl_pa;	/* [4m] ctx table physical address */
150 
151 	/* Cache information */
152 	struct cacheinfo	cacheinfo;	/* see cache.h */
153 
154 	/* various flags to workaround anomalies in chips */
155 	volatile int	flags;		/* see CPUFLG_xxx, below */
156 
157 	/* Per processor counter register (sun4m only) */
158 	volatile struct counter_4m	*counterreg_4m;
159 
160 	/* Per processor interrupt mask register (sun4m only) */
161 	volatile struct icr_pi	*intreg_4m;
162 	/*
163 	 * Send a IPI to (cpi).  For Ross cpus we need to read
164 	 * the pending register to avoid a hardware bug.
165 	 */
166 #define raise_ipi(cpi,lvl)	do {			\
167 	volatile int x;						\
168 	(cpi)->intreg_4m->pi_set = PINTR_SINTRLEV(lvl);	\
169 	x = (cpi)->intreg_4m->pi_pend; __USE(x);	\
170 } while (0)
171 
172 	int		sun4_mmu3l;	/* [4]: 3-level MMU present */
173 #if defined(SUN4_MMU3L)
174 #define HASSUN4_MMU3L	(cpuinfo.sun4_mmu3l)
175 #else
176 #define HASSUN4_MMU3L	(0)
177 #endif
178 	int		ci_idepth;		/* Interrupt depth */
179 
180 	/*
181 	 * The following pointers point to processes that are somehow
182 	 * associated with this CPU--running on it, using its FPU,
183 	 * etc.
184 	 */
185 	struct	lwp	*ci_curlwp;		/* CPU owner */
186 	struct	lwp 	*fplwp;			/* FPU owner */
187 
188 	int		ci_mtx_count;
189 	int		ci_mtx_oldspl;
190 
191 	/*
192 	 * Idle PCB and Interrupt stack;
193 	 */
194 	void		*eintstack;		/* End of interrupt stack */
195 #define INT_STACK_SIZE	(128 * 128)		/* 128 128-byte stack frames */
196 	void		*redzone;		/* DEBUG: stack red zone */
197 #define REDSIZE		(8*96)			/* some room for bouncing */
198 
199 	struct	pcb	*curpcb;		/* CPU's PCB & kernel stack */
200 
201 	/* locore defined: */
202 	void	(*get_syncflt)(void);		/* Not C-callable */
203 	int	(*get_asyncflt)(u_int *, u_int *);
204 
205 	/* Synchronous Fault Status; temporary storage */
206 	struct {
207 		int	sfsr;
208 		int	sfva;
209 	} syncfltdump;
210 
211 	/*
212 	 * Cache handling functions.
213 	 * Most cache flush function come in two flavours: one that
214 	 * acts only on the CPU it executes on, and another that
215 	 * uses inter-processor signals to flush the cache on
216 	 * all processor modules.
217 	 * The `ft_' versions are fast trap cache flush handlers.
218 	 */
219 	void	(*cache_flush)(void *, u_int);
220 	void	(*vcache_flush_page)(int, int);
221 	void	(*sp_vcache_flush_page)(int, int);
222 	void	(*ft_vcache_flush_page)(int, int);
223 	void	(*vcache_flush_segment)(int, int, int);
224 	void	(*sp_vcache_flush_segment)(int, int, int);
225 	void	(*ft_vcache_flush_segment)(int, int, int);
226 	void	(*vcache_flush_region)(int, int);
227 	void	(*sp_vcache_flush_region)(int, int);
228 	void	(*ft_vcache_flush_region)(int, int);
229 	void	(*vcache_flush_context)(int);
230 	void	(*sp_vcache_flush_context)(int);
231 	void	(*ft_vcache_flush_context)(int);
232 
233 	/* The are helpers for (*cache_flush)() */
234 	void	(*sp_vcache_flush_range)(int, int, int);
235 	void	(*ft_vcache_flush_range)(int, int, int);
236 
237 	void	(*pcache_flush_page)(paddr_t, int);
238 	void	(*pure_vcache_flush)(void);
239 	void	(*cache_flush_all)(void);
240 
241 	/* Support for hardware-assisted page clear/copy */
242 	void	(*zero_page)(paddr_t);
243 	void	(*copy_page)(paddr_t, paddr_t);
244 
245 	/* Virtual addresses for use in pmap copy_page/zero_page */
246 	void *	vpage[2];
247 	int	*vpage_pte[2];		/* pte location of vpage[] */
248 
249 	void	(*cache_enable)(void);
250 
251 	int	cpu_type;	/* Type: see CPUTYP_xxx below */
252 
253 	/* Inter-processor message area (high priority but used infrequently) */
254 	struct xpmsg	msg_lev15;
255 
256 	/* CPU information */
257 	int		node;		/* PROM node for this CPU */
258 	int		mid;		/* Module ID for MP systems */
259 	int		mbus;		/* 1 if CPU is on MBus */
260 	int		mxcc;		/* 1 if a MBus-level MXCC is present */
261 	const char	*cpu_longname;	/* CPU model */
262 	int		cpu_impl;	/* CPU implementation code */
263 	int		cpu_vers;	/* CPU version code */
264 	int		mmu_impl;	/* MMU implementation code */
265 	int		mmu_vers;	/* MMU version code */
266 	int		master;		/* 1 if this is bootup CPU */
267 
268 	vaddr_t		mailbox;	/* VA of CPU's mailbox */
269 
270 	int		mmu_ncontext;	/* Number of contexts supported */
271 	int		mmu_nregion; 	/* Number of regions supported */
272 	int		mmu_nsegment;	/* [4/4c] Segments */
273 	int		mmu_npmeg;	/* [4/4c] Pmegs */
274 
275 /* XXX - we currently don't actually use the following */
276 	int		arch;		/* Architecture: CPU_SUN4x */
277 	int		class;		/* Class: SuperSPARC, microSPARC... */
278 	int		classlvl;	/* Iteration in class: 1, 2, etc. */
279 	int		classsublvl;	/* stepping in class (version) */
280 
281 	int		hz;		/* Clock speed */
282 
283 	/* FPU information */
284 	int		fpupresent;	/* true if FPU is present */
285 	int		fpuvers;	/* FPU revision */
286 	const char	*fpu_name;	/* FPU model */
287 	char		fpu_namebuf[32];/* Buffer for FPU name, if necessary */
288 
289 	/* XXX */
290 	volatile void	*ci_ddb_regs;		/* DDB regs */
291 
292 	/*
293 	 * The following are function pointers to do interesting CPU-dependent
294 	 * things without having to do type-tests all the time
295 	 */
296 
297 	/* bootup things: access to physical memory */
298 	u_int	(*read_physmem)(u_int addr, int space);
299 	void	(*write_physmem)(u_int addr, u_int data);
300 	void	(*cache_tablewalks)(void);
301 	void	(*mmu_enable)(void);
302 	void	(*hotfix)(struct cpu_info *);
303 
304 
305 #if 0
306 	/* hardware-assisted block operation routines */
307 	void		(*hwbcopy)(const void *from, void *to, size_t len);
308 	void		(*hwbzero)(void *buf, size_t len);
309 
310 	/* routine to clear mbus-sbus buffers */
311 	void		(*mbusflush)(void);
312 #endif
313 
314 	/*
315 	 * Memory error handler; parity errors, unhandled NMIs and other
316 	 * unrecoverable faults end up here.
317 	 */
318 	void		(*memerr)(unsigned, u_int, u_int, struct trapframe *);
319 	void		(*idlespin)(struct cpu_info *);
320 	/* Module Control Registers */
321 	/*bus_space_handle_t*/ long ci_mbusport;
322 	/*bus_space_handle_t*/ long ci_mxccregs;
323 
324 	u_int	ci_tt;			/* Last trap (if tracing) */
325 
326 	/*
327 	 * Start/End VA's of this cpu_info region; we upload the other pages
328 	 * in this region that aren't part of the cpu_info to uvm.
329 	 */
330 	vaddr_t	ci_free_sva1, ci_free_eva1, ci_free_sva2, ci_free_eva2;
331 
332 	struct evcnt ci_savefpstate;
333 	struct evcnt ci_savefpstate_null;
334 	struct evcnt ci_xpmsg_mutex_fail;
335 	struct evcnt ci_xpmsg_mutex_fail_call;
336 	struct evcnt ci_xpmsg_mutex_not_held;
337 	struct evcnt ci_xpmsg_bogus;
338 	struct evcnt ci_intrcnt[16];
339 	struct evcnt ci_sintrcnt[16];
340 };
341 
342 /*
343  * definitions of cpu-dependent requirements
344  * referenced in generic code
345  */
346 #define	cpuinfo			(*(struct cpu_info *)CPUINFO_VA)
347 #define	curcpu()		(cpuinfo.ci_self)
348 #define	curlwp			(cpuinfo.ci_curlwp)
349 #define	CPU_IS_PRIMARY(ci)	((ci)->master)
350 
351 #define	cpu_number()		(cpuinfo.ci_cpuid)
352 
353 #endif /* _KERNEL || _KMEMUSER */
354 
355 /* Kernel only things. */
356 #if defined(_KERNEL)
357 void	cpu_proc_fork(struct proc *, struct proc *);
358 
359 #if defined(MULTIPROCESSOR)
360 void	cpu_boot_secondary_processors(void);
361 #endif
362 
363 /*
364  * Arguments to hardclock, softclock and statclock encapsulate the
365  * previous machine state in an opaque clockframe.  The ipl is here
366  * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
367  * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
368  */
369 struct clockframe {
370 	u_int	psr;		/* psr before interrupt, excluding PSR_ET */
371 	u_int	pc;		/* pc at interrupt */
372 	u_int	npc;		/* npc at interrupt */
373 	u_int	ipl;		/* actual interrupt priority level */
374 	u_int	fp;		/* %fp at interrupt */
375 };
376 typedef struct clockframe clockframe;
377 
378 extern int eintstack[];
379 
380 #define	CLKF_USERMODE(framep)	(((framep)->psr & PSR_PS) == 0)
381 #define	CLKF_LOPRI(framep,n)	(((framep)->psr & PSR_PIL) < (n) << 8)
382 #define	CLKF_PC(framep)		((framep)->pc)
383 #if defined(MULTIPROCESSOR)
384 #define	CLKF_INTR(framep)						\
385 	((framep)->fp > (u_int)cpuinfo.eintstack - INT_STACK_SIZE &&	\
386 	 (framep)->fp < (u_int)cpuinfo.eintstack)
387 #else
388 #define	CLKF_INTR(framep)	((framep)->fp < (u_int)eintstack)
389 #endif
390 
391 void	sparc_softintr_init(void);
392 
393 /*
394  * Preempt the current process on the target CPU if in interrupt from
395  * user mode, or after the current trap/syscall if in system mode.
396  */
397 #define cpu_need_resched(ci, flags) do {				\
398 	__USE(flags);							\
399 	(ci)->ci_want_resched = 1;					\
400 	(ci)->ci_want_ast = 1;						\
401 									\
402 	/* Just interrupt the target CPU, so it can notice its AST */	\
403 	if (((flags) & RESCHED_IMMED) || (ci)->ci_cpuid != cpu_number()) \
404 		XCALL0(sparc_noop, 1U << (ci)->ci_cpuid);		\
405 } while (/*CONSTCOND*/0)
406 
407 /*
408  * Give a profiling tick to the current process when the user profiling
409  * buffer pages are invalid.  On the sparc, request an ast to send us
410  * through trap(), marking the proc as needing a profiling tick.
411  */
412 #define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, cpuinfo.ci_want_ast = 1)
413 
414 /*
415  * Notify the current process (p) that it has a signal pending,
416  * process as soon as possible.
417  */
418 #define cpu_signotify(l) do {						\
419 	(l)->l_cpu->ci_want_ast = 1;					\
420 									\
421 	/* Just interrupt the target CPU, so it can notice its AST */	\
422 	if ((l)->l_cpu->ci_cpuid != cpu_number())			\
423 		XCALL0(sparc_noop, 1U << (l)->l_cpu->ci_cpuid);		\
424 } while (/*CONSTCOND*/0)
425 
426 /* CPU architecture version */
427 extern int cpu_arch;
428 
429 /* Number of CPUs in the system */
430 extern int sparc_ncpus;
431 
432 /* Provide %pc of a lwp */
433 #define LWP_PC(l)       ((l)->l_md.md_tf->tf_pc)
434 
435 /*
436  * Interrupt handler chains.  Interrupt handlers should return 0 for
437  * ``not me'' or 1 (``I took care of it'').  intr_establish() inserts a
438  * handler into the list.  The handler is called with its (single)
439  * argument, or with a pointer to a clockframe if ih_arg is NULL.
440  *
441  * realfun/realarg are used to chain callers, usually with the
442  * biglock wrapper.
443  */
444 extern struct intrhand {
445 	int	(*ih_fun)(void *);
446 	void	*ih_arg;
447 	struct	intrhand *ih_next;
448 	int	ih_classipl;
449 	int	(*ih_realfun)(void *);
450 	void	*ih_realarg;
451 } *intrhand[15];
452 
453 void	intr_establish(int, int, struct intrhand *, void (*)(void), bool);
454 void	intr_disestablish(int, struct intrhand *);
455 
456 void	intr_lock_kernel(void);
457 void	intr_unlock_kernel(void);
458 
459 /* disksubr.c */
460 struct dkbad;
461 int isbad(struct dkbad *, int, int, int);
462 
463 /* machdep.c */
464 int	ldcontrolb(void *);
465 void *	reserve_dumppages(void *);
466 void	wcopy(const void *, void *, u_int);
467 void	wzero(void *, u_int);
468 
469 /* clock.c */
470 struct timeval;
471 void	lo_microtime(struct timeval *);
472 void	schedintr(void *);
473 
474 /* locore.s */
475 struct fpstate;
476 void	ipi_savefpstate(struct fpstate *);
477 void	savefpstate(struct fpstate *);
478 void	loadfpstate(struct fpstate *);
479 int	probeget(void *, int);
480 void	write_all_windows(void);
481 void	write_user_windows(void);
482 void 	lwp_trampoline(void);
483 struct pcb;
484 void	snapshot(struct pcb *);
485 struct frame *getfp(void);
486 int	xldcontrolb(void *, struct pcb *);
487 void	copywords(const void *, void *, size_t);
488 void	qcopy(const void *, void *, size_t);
489 void	qzero(void *, size_t);
490 
491 /* trap.c */
492 void	cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
493 int	rwindow_save(struct lwp *);
494 
495 /* cons.c */
496 int	cnrom(void);
497 
498 /* zs.c */
499 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
500 #ifdef KGDB
501 void zs_kgdb_init(void);
502 #endif
503 
504 /* fb.c */
505 void	fb_unblank(void);
506 
507 /* kgdb_stub.c */
508 #ifdef KGDB
509 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
510 void kgdb_connect(int);
511 void kgdb_panic(void);
512 #endif
513 
514 /* emul.c */
515 struct trapframe;
516 int fixalign(struct lwp *, struct trapframe *, void **);
517 int emulinstr(int, struct trapframe *);
518 
519 /* cpu.c */
520 void mp_pause_cpus(void);
521 void mp_resume_cpus(void);
522 void mp_halt_cpus(void);
523 #ifdef DDB
524 void mp_pause_cpus_ddb(void);
525 void mp_resume_cpus_ddb(void);
526 #endif
527 
528 /* intr.c */
529 u_int setitr(u_int);
530 u_int getitr(void);
531 
532 
533 /*
534  *
535  * The SPARC has a Trap Base Register (TBR) which holds the upper 20 bits
536  * of the trap vector table.  The next eight bits are supplied by the
537  * hardware when the trap occurs, and the bottom four bits are always
538  * zero (so that we can shove up to 16 bytes of executable code---exactly
539  * four instructions---into each trap vector).
540  *
541  * The hardware allocates half the trap vectors to hardware and half to
542  * software.
543  *
544  * Traps have priorities assigned (lower number => higher priority).
545  */
546 
547 struct trapvec {
548 	int	tv_instr[4];		/* the four instructions */
549 };
550 
551 extern struct trapvec *trapbase;	/* the 256 vectors */
552 
553 #endif /* _KERNEL */
554 #endif /* _CPU_H_ */
555