xref: /openbsd-src/sys/arch/sh/include/cpu.h (revision fc405d53b73a2d73393cb97f684863d17b583e38)
1 /*	$OpenBSD: cpu.h,v 1.34 2022/12/06 01:19:35 cheloha Exp $	*/
2 /*	$NetBSD: cpu.h,v 1.41 2006/01/21 04:24:12 uwe Exp $	*/
3 
4 /*-
5  * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
6  * Copyright (c) 1990 The Regents of the University of California.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * William Jolitz.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)cpu.h	5.4 (Berkeley) 5/9/91
37  */
38 
39 /*
40  * SH3/SH4 support.
41  *
42  *  T.Horiuchi    Brains Corp.   5/22/98
43  */
44 
45 #ifndef _SH_CPU_H_
46 #define	_SH_CPU_H_
47 
48 #include <sh/psl.h>
49 #include <sh/frame.h>
50 
51 #ifdef _KERNEL
52 
53 /*
54  * Per-CPU information.
55  */
56 
57 #include <machine/intr.h>
58 #include <sys/clockintr.h>
59 #include <sys/sched.h>
60 
61 struct cpu_info {
62 	struct proc *ci_curproc;
63 
64 	struct schedstate_percpu ci_schedstate; /* scheduler state */
65 	u_int32_t ci_randseed;
66 #ifdef DIAGNOSTIC
67 	int	ci_mutex_level;
68 #endif
69 #ifdef GPROF
70 	struct gmonparam *ci_gmon;
71 #endif
72 
73 	int	ci_want_resched;
74 
75 	struct clockintr_queue ci_queue;
76 
77 	char	ci_panicbuf[512];
78 };
79 
80 extern struct cpu_info cpu_info_store;
81 #define	curcpu()	(&cpu_info_store)
82 #define cpu_number()	0
83 #define CPU_IS_PRIMARY(ci)	1
84 #define CPU_IS_RUNNING(ci)	1
85 #define CPU_INFO_ITERATOR	int
86 #define CPU_INFO_FOREACH(cii, ci) \
87 	for (cii = 0, ci = curcpu(); ci != NULL; ci = NULL)
88 #define CPU_INFO_UNIT(ci)	0
89 #define MAXCPUS	1
90 #define cpu_unidle(ci)
91 
92 #define CPU_BUSY_CYCLE()	do {} while (0)
93 
94 
95 /*
96  * Arguments to hardclock and gatherstats encapsulate the previous
97  * machine state in an opaque clockframe.
98  */
99 struct clockframe {
100 	int	spc;	/* program counter at time of interrupt */
101 	int	ssr;	/* status register at time of interrupt */
102 	int	ssp;	/* stack pointer at time of interrupt */
103 };
104 
105 #define	CLKF_USERMODE(cf)	(!KERNELMODE((cf)->ssr))
106 #define	CLKF_PC(cf)		((cf)->spc)
107 #define	CLKF_INTR(cf)		0	/* XXX */
108 
109 /*
110  * This is used during profiling to integrate system time.  It can safely
111  * assume that the process is resident.
112  */
113 #define	PROC_PC(p)	((p)->p_md.md_regs->tf_spc)
114 #define	PROC_STACK(p)	((p)->p_md.md_regs->tf_r15)
115 
116 /*
117  * Preempt the current process if in interrupt from user mode,
118  * or after the current trap/syscall if in system mode.
119  */
120 void need_resched(struct cpu_info *);
121 #define clear_resched(ci) 	(ci)->ci_want_resched = 0
122 
123 /*
124  * Give a profiling tick to the current process when the user profiling
125  * buffer pages are invalid.  On the MIPS, request an ast to send us
126  * through trap, marking the proc as needing a profiling tick.
127  */
128 #define	need_proftick(p)	aston(p)
129 
130 /*
131  * Notify the current process (p) that it has a signal pending,
132  * process as soon as possible.
133  */
134 #define	signotify(p)	aston(p)
135 
136 #define	aston(p)	((p)->p_md.md_astpending = 1)
137 
138 /*
139  * We need a machine-independent name for this.
140  */
141 #define	DELAY(x)		delay(x)
142 
143 #define	cpu_idle_enter()	do { /* nothing */ } while (0)
144 #define	cpu_idle_cycle()	__asm volatile("sleep")
145 #define	cpu_idle_leave()	do { /* nothing */ } while (0)
146 
147 #endif /* _KERNEL */
148 
149 /*
150  * Logical address space of SH3/SH4 CPU.
151  */
152 #define	SH3_PHYS_MASK	0x1fffffff
153 
154 #define	SH3_P0SEG_BASE	0x00000000	/* TLB mapped, also U0SEG */
155 #define	SH3_P0SEG_END	0x7fffffff
156 #define	SH3_P1SEG_BASE	0x80000000	/* pa == va */
157 #define	SH3_P1SEG_END	0x9fffffff
158 #define	SH3_P2SEG_BASE	0xa0000000	/* pa == va, non-cacheable */
159 #define	SH3_P2SEG_END	0xbfffffff
160 #define	SH3_P3SEG_BASE	0xc0000000	/* TLB mapped, kernel mode */
161 #define	SH3_P3SEG_END	0xdfffffff
162 #define	SH3_P4SEG_BASE	0xe0000000	/* peripheral space */
163 #define	SH3_P4SEG_END	0xffffffff
164 
165 #define	SH3_P1SEG_TO_PHYS(x)	((uint32_t)(x) & SH3_PHYS_MASK)
166 #define	SH3_P2SEG_TO_PHYS(x)	((uint32_t)(x) & SH3_PHYS_MASK)
167 #define	SH3_PHYS_TO_P1SEG(x)	((uint32_t)(x) | SH3_P1SEG_BASE)
168 #define	SH3_PHYS_TO_P2SEG(x)	((uint32_t)(x) | SH3_P2SEG_BASE)
169 #define	SH3_P1SEG_TO_P2SEG(x)	((uint32_t)(x) | 0x20000000)
170 #define	SH3_P2SEG_TO_P1SEG(x)	((uint32_t)(x) & ~0x20000000)
171 
172 #ifdef _KERNEL
173 #ifndef __lint__
174 
175 /*
176  * Switch from P1 (cached) to P2 (uncached).  This used to be written
177  * using gcc's assigned goto extension, but gcc4 aggressive optimizations
178  * tend to optimize that away under certain circumstances.
179  */
180 #define RUN_P2						\
181 	do {						\
182 		register uint32_t r0 asm("r0");		\
183 		uint32_t pc;				\
184 		__asm volatile(				\
185 			"	mov.l	1f, %1	;"	\
186 			"	mova	2f, %0	;"	\
187 			"	or	%0, %1	;"	\
188 			"	jmp	@%1	;"	\
189 			"	 nop		;"	\
190 			"	.align 2	;"	\
191 			"1:	.long	0x20000000;"	\
192 			"2:;"				\
193 			: "=r"(r0), "=r"(pc));		\
194 	} while (0)
195 
196 /*
197  * Switch from P2 (uncached) back to P1 (cached).  We need to be
198  * running on P2 to access cache control, memory-mapped cache and TLB
199  * arrays, etc. and after touching them at least 8 instructions are
200  * necessary before jumping to P1, so provide that padding here.
201  */
202 #define RUN_P1						\
203 	do {						\
204 		register uint32_t r0 asm("r0");		\
205 		uint32_t pc;				\
206 		__asm volatile(				\
207 		/*1*/	"	mov.l	1f, %1	;"	\
208 		/*2*/	"	mova	2f, %0	;"	\
209 		/*3*/	"	nop		;"	\
210 		/*4*/	"	and	%0, %1	;"	\
211 		/*5*/	"	nop		;"	\
212 		/*6*/	"	nop		;"	\
213 		/*7*/	"	nop		;"	\
214 		/*8*/	"	nop		;"	\
215 			"	jmp	@%1	;"	\
216 			"	 nop		;"	\
217 			"	.align 2	;"	\
218 			"1:	.long	~0x20000000;"	\
219 			"2:;"				\
220 			: "=r"(r0), "=r"(pc));		\
221 	} while (0)
222 
223 /*
224  * If RUN_P1 is the last thing we do in a function we can omit it, b/c
225  * we are going to return to a P1 caller anyway, but we still need to
226  * ensure there's at least 8 instructions before jump to P1.
227  */
228 #define PAD_P1_SWITCH	__asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop;")
229 
230 #else  /* __lint__ */
231 #define	RUN_P2		do {} while (/* CONSTCOND */ 0)
232 #define	RUN_P1		do {} while (/* CONSTCOND */ 0)
233 #define	PAD_P1_SWITCH	do {} while (/* CONSTCOND */ 0)
234 #endif
235 #endif
236 
237 #if defined(SH4)
238 /* SH4 Processor Version Register */
239 #define	SH4_PVR_ADDR	0xff000030	/* P4  address */
240 #define	SH4_PVR		(*(volatile uint32_t *) SH4_PVR_ADDR)
241 #define	SH4_PRR_ADDR	0xff000044	/* P4  address */
242 #define	SH4_PRR		(*(volatile uint32_t *) SH4_PRR_ADDR)
243 
244 #define	SH4_PVR_MASK	0xffffff00
245 #define	SH4_PVR_SH7750	0x04020500	/* SH7750  */
246 #define	SH4_PVR_SH7750S	0x04020600	/* SH7750S */
247 #define	SH4_PVR_SH775xR	0x04050000	/* SH775xR */
248 #define	SH4_PVR_SH7751	0x04110000	/* SH7751  */
249 
250 #define	SH4_PRR_MASK	0xfffffff0
251 #define SH4_PRR_7750R	0x00000100	/* SH7750R */
252 #define SH4_PRR_7751R	0x00000110	/* SH7751R */
253 #endif
254 
255 /*
256  * pull in #defines for kinds of processors
257  */
258 #include <machine/cputypes.h>
259 
260 #ifdef _KERNEL
261 void sh_cpu_init(int, int);
262 void sh_startup(void);
263 __dead void cpu_reset(void);	/* soft reset */
264 void _cpu_spin(uint32_t);	/* for delay loop. */
265 void delay(int);
266 struct pcb;
267 void savectx(struct pcb *);
268 struct fpreg;
269 void fpu_save(struct fpreg *);
270 void fpu_restore(struct fpreg *);
271 u_int cpu_dump(int (*)(dev_t, daddr_t, caddr_t, size_t), daddr_t *);
272 u_int cpu_dumpsize(void);
273 void dumpconf(void);
274 void dumpsys(void);
275 unsigned int cpu_rnd_messybits(void);
276 
277 static inline u_long
278 intr_disable(void)
279 {
280 	return (u_long)_cpu_intr_suspend();
281 }
282 
283 static inline void
284 intr_restore(u_long s)
285 {
286 	_cpu_intr_resume((int)s);
287 }
288 #endif /* _KERNEL */
289 #endif /* !_SH_CPU_H_ */
290