xref: /netbsd-src/sys/arch/sparc64/include/cpu.h (revision 867d70fc718005c0918b8b8b2f9d7f2d52d0a0db)
1 /*	$NetBSD: cpu.h,v 1.133 2021/08/14 17:51:19 ryo Exp $ */
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This software was developed by the Computer Systems Engineering group
8  * at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
9  * contributed to Berkeley.
10  *
11  * All advertising materials mentioning features or use of this software
12  * must display the following acknowledgement:
13  *	This product includes software developed by the University of
14  *	California, Lawrence Berkeley Laboratory.
15  *
16  * Redistribution and use in source and binary forms, with or without
17  * modification, are permitted provided that the following conditions
18  * are met:
19  * 1. Redistributions of source code must retain the above copyright
20  *    notice, this list of conditions and the following disclaimer.
21  * 2. Redistributions in binary form must reproduce the above copyright
22  *    notice, this list of conditions and the following disclaimer in the
23  *    documentation and/or other materials provided with the distribution.
24  * 3. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	@(#)cpu.h	8.4 (Berkeley) 1/5/94
41  */
42 
43 #ifndef _CPU_H_
44 #define _CPU_H_
45 
46 /*
47  * CTL_MACHDEP definitions.
48  */
49 #define	CPU_BOOTED_KERNEL	1	/* string: booted kernel name */
50 #define	CPU_BOOTED_DEVICE	2	/* string: device booted from */
51 #define	CPU_BOOT_ARGS		3	/* string: args booted with */
52 #define	CPU_ARCH		4	/* integer: cpu architecture version */
53 #define CPU_VIS			5	/* 0 - no VIS, 1 - VIS 1.0, etc. */
54 
55 /*
56  * This is exported via sysctl for cpuctl(8).
57  */
58 struct cacheinfo {
59 	int 	c_itotalsize;
60 	int 	c_ilinesize;
61 	int 	c_dtotalsize;
62 	int 	c_dlinesize;
63 	int 	c_etotalsize;
64 	int 	c_elinesize;
65 };
66 
67 #if defined(_KERNEL) || defined(_KMEMUSER)
68 /*
69  * Exported definitions unique to SPARC cpu support.
70  */
71 
72 #if defined(_KERNEL_OPT)
73 #include "opt_gprof.h"
74 #include "opt_multiprocessor.h"
75 #include "opt_lockdebug.h"
76 #endif
77 
78 #include <machine/psl.h>
79 #include <machine/reg.h>
80 #include <machine/pte.h>
81 #include <machine/intr.h>
82 #if defined(_KERNEL)
83 #include <machine/bus_defs.h>
84 #include <machine/cpuset.h>
85 #include <sparc64/sparc64/intreg.h>
86 #endif
87 #ifdef SUN4V
88 #include <machine/hypervisor.h>
89 #endif
90 
91 #include <sys/cpu_data.h>
92 #include <sys/evcnt.h>
93 
94 /*
95  * The cpu_info structure is part of a 64KB structure mapped both the kernel
96  * pmap and a single locked TTE a CPUINFO_VA for that particular processor.
97  * Each processor's cpu_info is accessible at CPUINFO_VA only for that
98  * processor.  Other processors can access that through an additional mapping
99  * in the kernel pmap.
100  *
101  * The 64KB page contains:
102  *
103  * cpu_info
104  * interrupt stack (all remaining space)
105  * idle PCB
106  * idle stack (STACKSPACE - sizeof(PCB))
107  * 32KB TSB
108  */
109 
110 struct cpu_info {
111 	struct cpu_data		ci_data;	/* MI per-cpu data */
112 
113 
114 	/*
115 	 * SPARC cpu_info structures live at two VAs: one global
116 	 * VA (so each CPU can access any other CPU's cpu_info)
117 	 * and an alias VA CPUINFO_VA which is the same on each
118 	 * CPU and maps to that CPU's cpu_info.  Since the alias
119 	 * CPUINFO_VA is how we locate our cpu_info, we have to
120 	 * self-reference the global VA so that we can return it
121 	 * in the curcpu() macro.
122 	 */
123 	struct cpu_info * volatile ci_self;
124 
125 	/* Most important fields first */
126 	struct lwp		*ci_curlwp;
127 	struct lwp		*ci_onproc;	/* current user LWP / kthread */
128 	struct pcb		*ci_cpcb;
129 	struct cpu_info		*ci_next;
130 
131 	struct lwp		*ci_fplwp;
132 
133 	void			*ci_eintstack;
134 
135 	int			ci_mtx_count;
136 	int			ci_mtx_oldspl;
137 
138 	/* Spinning up the CPU */
139 	void			(*ci_spinup)(void);
140 	paddr_t			ci_paddr;
141 
142 	int			ci_cpuid;
143 
144 	uint64_t		ci_ver;
145 
146 	/* CPU PROM information. */
147 	u_int			ci_node;
148 	const char		*ci_name;
149 
150 	/* This is for sysctl. */
151 	struct cacheinfo	ci_cacheinfo;
152 
153 	/* %tick and cpu frequency information */
154 	u_long			ci_tick_increment;
155 	uint64_t		ci_cpu_clockrate[2];	/* %tick */
156 	uint64_t		ci_system_clockrate[2];	/* %stick */
157 
158 	/* Interrupts */
159 	struct intrhand		*ci_intrpending[16];
160 	struct intrhand		*ci_tick_ih;
161 
162 	/* Event counters */
163 	struct evcnt		ci_tick_evcnt;
164 
165 	/* This could be under MULTIPROCESSOR, but there's no good reason */
166 	struct evcnt		ci_ipi_evcnt[IPI_EVCNT_NUM];
167 
168 	int			ci_flags;
169 	int			ci_want_ast;
170 	int			ci_want_resched;
171 	int			ci_idepth;
172 
173 /*
174  * A context is simply a small number that differentiates multiple mappings
175  * of the same address.  Contexts on the spitfire are 13 bits, but could
176  * be as large as 17 bits.
177  *
178  * Each context is either free or attached to a pmap.
179  *
180  * The context table is an array of pointers to psegs.  Just dereference
181  * the right pointer and you get to the pmap segment tables.  These are
182  * physical addresses, of course.
183  *
184  * ci_ctx_lock protects this CPUs context allocation/free.
185  * These are all allocated almost with in the same cacheline.
186  */
187 	kmutex_t		ci_ctx_lock;
188 	int			ci_pmap_next_ctx;
189 	int			ci_numctx;
190 	paddr_t 		*ci_ctxbusy;
191 	LIST_HEAD(, pmap) 	ci_pmap_ctxlist;
192 
193 	/*
194 	 * The TSBs are per cpu too (since MMU context differs between
195 	 * cpus). These are just caches for the TLBs.
196 	 */
197 	pte_t			*ci_tsb_dmmu;
198 	pte_t			*ci_tsb_immu;
199 
200 	/* TSB description (sun4v). */
201 	struct tsb_desc         *ci_tsb_desc;
202 
203 	/* MMU Fault Status Area (sun4v).
204 	 * Will be initialized to the physical address of the bottom of
205 	 * the interrupt stack.
206 	 */
207 	paddr_t			ci_mmufsa;
208 
209 	/*
210 	 * sun4v mondo control fields
211 	 */
212 	paddr_t			ci_cpumq;  /* cpu mondo queue address */
213 	paddr_t			ci_devmq;  /* device mondo queue address */
214 	paddr_t			ci_cpuset; /* mondo recipient address */
215 	paddr_t			ci_mondo;  /* mondo message address */
216 
217 	/* probe fault in PCI config space reads */
218 	bool			ci_pci_probe;
219 	bool			ci_pci_fault;
220 
221 	volatile void		*ci_ddb_regs;	/* DDB regs */
222 
223 	void (*ci_idlespin)(void);
224 
225 #if defined(GPROF) && defined(MULTIPROCESSOR)
226 	struct gmonparam *ci_gmon;	/* MI per-cpu GPROF */
227 #endif
228 };
229 
230 #endif /* _KERNEL || _KMEMUSER */
231 
232 #ifdef _KERNEL
233 
234 #define CPUF_PRIMARY	1
235 
236 /*
237  * CPU boot arguments. Used by secondary CPUs at the bootstrap time.
238  */
239 struct cpu_bootargs {
240 	u_int	cb_node;	/* PROM CPU node */
241 	volatile int cb_flags;
242 
243 	vaddr_t cb_ktext;
244 	paddr_t cb_ktextp;
245 	vaddr_t cb_ektext;
246 
247 	vaddr_t cb_kdata;
248 	paddr_t cb_kdatap;
249 	vaddr_t cb_ekdata;
250 
251 	paddr_t	cb_cpuinfo;
252 	int cb_cputyp;
253 };
254 
255 extern struct cpu_bootargs *cpu_args;
256 
257 #if defined(MULTIPROCESSOR)
258 extern int sparc_ncpus;
259 #else
260 #define sparc_ncpus 1
261 #endif
262 
263 extern struct cpu_info *cpus;
264 extern struct pool_cache *fpstate_cache;
265 
266 /* CURCPU_INT() a local (per CPU) view of our cpu_info */
267 #define	CURCPU_INT()	((struct cpu_info *)CPUINFO_VA)
268 /* in general we prefer the globaly visible pointer */
269 #define	curcpu()	(CURCPU_INT()->ci_self)
270 #define	cpu_number()	(curcpu()->ci_index)
271 #define	CPU_IS_PRIMARY(ci)	((ci)->ci_flags & CPUF_PRIMARY)
272 
273 #define CPU_INFO_ITERATOR		int __unused
274 #define CPU_INFO_FOREACH(cii, ci)	ci = cpus; ci != NULL; ci = ci->ci_next
275 
276 /* these are only valid on the local cpu */
277 #define curlwp		CURCPU_INT()->ci_curlwp
278 #define fplwp		CURCPU_INT()->ci_fplwp
279 #define curpcb		CURCPU_INT()->ci_cpcb
280 #define want_ast	CURCPU_INT()->ci_want_ast
281 
282 /*
283  * definitions of cpu-dependent requirements
284  * referenced in generic code
285  */
286 #define	cpu_wait(p)	/* nothing */
287 void cpu_proc_fork(struct proc *, struct proc *);
288 
289 /* run on the cpu itself */
290 void	cpu_pmap_init(struct cpu_info *);
291 /* run upfront to prepare the cpu_info */
292 void	cpu_pmap_prepare(struct cpu_info *, bool);
293 
294 /* Helper functions to retrieve cache info */
295 int	cpu_ecache_associativity(int node);
296 int	cpu_ecache_size(int node);
297 
298 #if defined(MULTIPROCESSOR)
299 extern vaddr_t cpu_spinup_trampoline;
300 
301 extern  char   *mp_tramp_code;
302 extern  u_long  mp_tramp_code_len;
303 extern  u_long  mp_tramp_dtlb_slots, mp_tramp_itlb_slots;
304 extern  u_long  mp_tramp_func;
305 extern  u_long  mp_tramp_ci;
306 
307 void	cpu_hatch(void);
308 void	cpu_boot_secondary_processors(void);
309 
310 /*
311  * Call a function on other cpus:
312  *	multicast - send to everyone in the sparc64_cpuset_t
313  *	broadcast - send to to all cpus but ourselves
314  *	send - send to just this cpu
315  * The called function do not follow the C ABI, so need to be coded in
316  * assembler.
317  */
318 typedef void (* ipifunc_t)(void *, void *);
319 
320 void	sparc64_multicast_ipi(sparc64_cpuset_t, ipifunc_t, uint64_t, uint64_t);
321 void	sparc64_broadcast_ipi(ipifunc_t, uint64_t, uint64_t);
322 extern void (*sparc64_send_ipi)(int, ipifunc_t, uint64_t, uint64_t);
323 
324 /*
325  * Call an arbitrary C function on another cpu (or all others but ourself)
326  */
327 typedef void (*ipi_c_call_func_t)(void*);
328 void	sparc64_generic_xcall(struct cpu_info*, ipi_c_call_func_t, void*);
329 
330 #endif
331 
332 /* Provide %pc of a lwp */
333 #define	LWP_PC(l)	((l)->l_md.md_tf->tf_pc)
334 
335 /*
336  * Arguments to hardclock, softclock and gatherstats encapsulate the
337  * previous machine state in an opaque clockframe.  The ipl is here
338  * as well for strayintr (see locore.s:interrupt and intr.c:strayintr).
339  * Note that CLKF_INTR is valid only if CLKF_USERMODE is false.
340  */
341 struct clockframe {
342 	struct trapframe64 t;
343 };
344 
345 #define	CLKF_USERMODE(framep)	(((framep)->t.tf_tstate & TSTATE_PRIV) == 0)
346 #define	CLKF_PC(framep)		((framep)->t.tf_pc)
347 /* Since some files in sys/kern do not know BIAS, I'm using 0x7ff here */
348 #define	CLKF_INTR(framep)						\
349 	((!CLKF_USERMODE(framep))&&					\
350 		(((framep)->t.tf_out[6] & 1 ) ?				\
351 			(((vaddr_t)(framep)->t.tf_out[6] <		\
352 				(vaddr_t)EINTSTACK-0x7ff) &&		\
353 			((vaddr_t)(framep)->t.tf_out[6] >		\
354 				(vaddr_t)INTSTACK-0x7ff)) :		\
355 			(((vaddr_t)(framep)->t.tf_out[6] <		\
356 				(vaddr_t)EINTSTACK) &&			\
357 			((vaddr_t)(framep)->t.tf_out[6] >		\
358 				(vaddr_t)INTSTACK))))
359 
360 /*
361  * Give a profiling tick to the current process when the user profiling
362  * buffer pages are invalid.  On the sparc, request an ast to send us
363  * through trap(), marking the proc as needing a profiling tick.
364  */
365 #define	cpu_need_proftick(l)	((l)->l_pflag |= LP_OWEUPC, want_ast = 1)
366 
367 /*
368  * Notify an LWP that it has a signal pending, process as soon as possible.
369  */
370 void cpu_signotify(struct lwp *);
371 
372 
373 /*
374  * Interrupt handler chains.  Interrupt handlers should return 0 for
375  * ``not me'' or 1 (``I took care of it'').  intr_establish() inserts a
376  * handler into the list.  The handler is called with its (single)
377  * argument, or with a pointer to a clockframe if ih_arg is NULL.
378  */
379 struct intrhand {
380 	int			(*ih_fun)(void *);
381 	void			*ih_arg;
382 	/* if we have to take the biglock, we interpose a wrapper
383 	 * and need to save the original function and arg */
384 	int			(*ih_realfun)(void *);
385 	void			*ih_realarg;
386 	short			ih_number;	/* interrupt number */
387 						/* the H/W provides */
388 	char			ih_pil;		/* interrupt priority */
389 	struct intrhand		*ih_next;	/* global list */
390 	struct intrhand		*ih_pending;	/* interrupt queued */
391 	volatile uint64_t	*ih_map;	/* Interrupt map reg */
392 	volatile uint64_t	*ih_clr;	/* clear interrupt reg */
393 	void			(*ih_ack)(struct intrhand *); /* ack interrupt function */
394 	bus_space_tag_t		ih_bus;		/* parent bus */
395 	struct evcnt		ih_cnt;		/* counter for vmstat */
396 	uint32_t		ih_ivec;
397 	char			ih_name[32];	/* name for the above */
398 };
399 extern struct intrhand *intrhand[];
400 extern struct intrhand *intrlev[MAXINTNUM];
401 
402 void	intr_establish(int level, bool mpsafe, struct intrhand *);
403 void	*sparc_softintr_establish(int, int (*)(void *), void *);
404 void	sparc_softintr_schedule(void *);
405 void	sparc_softintr_disestablish(void *);
406 struct intrhand *intrhand_alloc(void);
407 
408 /* cpu.c */
409 int	cpu_myid(void);
410 
411 /* disksubr.c */
412 struct dkbad;
413 int isbad(struct dkbad *bt, int, int, int);
414 /* machdep.c */
415 void *	reserve_dumppages(void *);
416 /* clock.c */
417 struct timeval;
418 int	tickintr(void *);	/* level 10/14 (tick) interrupt code */
419 int	stickintr(void *);	/* system tick interrupt code */
420 int	stick2eintr(void *);	/* system tick interrupt code */
421 int	clockintr(void *);	/* level 10 (clock) interrupt code */
422 int	statintr(void *);	/* level 14 (statclock) interrupt code */
423 int	schedintr(void *);	/* level 10 (schedclock) interrupt code */
424 void	tickintr_establish(int, int (*)(void *));
425 void	stickintr_establish(int, int (*)(void *));
426 void	stick2eintr_establish(int, int (*)(void *));
427 
428 /* locore.s */
429 struct fpstate64;
430 void	savefpstate(struct fpstate64 *);
431 void	loadfpstate(struct fpstate64 *);
432 void	clearfpstate(void);
433 uint64_t	probeget(paddr_t, int, int);
434 int	probeset(paddr_t, int, int, uint64_t);
435 void	setcputyp(int);
436 
437 #define	 write_all_windows() __asm volatile("flushw" : : )
438 #define	 write_user_windows() __asm volatile("flushw" : : )
439 
440 struct pcb;
441 void	snapshot(struct pcb *);
442 struct frame *getfp(void);
443 void	switchtoctx_us(int);
444 void	switchtoctx_usiii(int);
445 void	next_tick(long);
446 void	next_stick(long);
447 void	next_stick_init(void);
448 /* trap.c */
449 void	cpu_vmspace_exec(struct lwp *, vaddr_t, vaddr_t);
450 int	rwindow_save(struct lwp *);
451 /* cons.c */
452 int	cnrom(void);
453 /* zs.c */
454 void zsconsole(struct tty *, int, int, void (**)(struct tty *, int));
455 /* fb.c */
456 void	fb_unblank(void);
457 /* kgdb_stub.c */
458 #ifdef KGDB
459 void kgdb_attach(int (*)(void *), void (*)(void *, int), void *);
460 void kgdb_connect(int);
461 void kgdb_panic(void);
462 #endif
463 /* emul.c */
464 int	fixalign(struct lwp *, struct trapframe64 *);
465 int	emulinstr(vaddr_t, struct trapframe64 *);
466 
467 #endif /* _KERNEL */
468 #endif /* _CPU_H_ */
469