xref: /netbsd-src/sys/arch/sh3/include/cpu.h (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: cpu.h,v 1.53 2008/03/22 03:23:27 uwe Exp $	*/
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
5  * Copyright (c) 1990 The Regents of the University of California.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * William Jolitz.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)cpu.h	5.4 (Berkeley) 5/9/91
36  */
37 
38 /*
39  * SH3/SH4 support.
40  *
41  *  T.Horiuchi    Brains Corp.   5/22/98
42  */
43 
44 #ifndef _SH3_CPU_H_
45 #define	_SH3_CPU_H_
46 
47 #if defined(_KERNEL_OPT)
48 #include "opt_lockdebug.h"
49 #endif
50 
51 #include <sh3/psl.h>
52 #include <sh3/frame.h>
53 
54 #ifdef _KERNEL
55 #include <sys/cpu_data.h>
56 struct cpu_info {
57 	struct cpu_data ci_data;	/* MI per-cpu data */
58 	cpuid_t	ci_cpuid;
59 	int	ci_mtx_count;
60 	int	ci_mtx_oldspl;
61 	int	ci_want_resched;
62 	int	ci_idepth;
63 };
64 
65 extern struct cpu_info cpu_info_store;
66 #define	curcpu()			(&cpu_info_store)
67 
68 /*
69  * definitions of cpu-dependent requirements
70  * referenced in generic code
71  */
72 #define	cpu_number()			0
73 /*
74  * Can't swapout u-area, (__SWAP_BROKEN)
75  * since we use P1 converted address for trapframe.
76  */
77 #define	cpu_swapin(p)			/* nothing */
78 #define	cpu_swapout(p)			panic("cpu_swapout: can't get here");
79 #define	cpu_proc_fork(p1, p2)		/* nothing */
80 
81 /*
82  * Arguments to hardclock and gatherstats encapsulate the previous
83  * machine state in an opaque clockframe.
84  */
85 struct clockframe {
86 	int	spc;	/* program counter at time of interrupt */
87 	int	ssr;	/* status register at time of interrupt */
88 	int	ssp;	/* stack pointer at time of interrupt */
89 };
90 
91 
92 #define	CLKF_USERMODE(cf)	(!KERNELMODE((cf)->ssr))
93 #define	CLKF_PC(cf)		((cf)->spc)
94 #define	CLKF_INTR(cf)		(curcpu()->ci_idepth > 0)
95 
96 /*
97  * This is used during profiling to integrate system time.  It can safely
98  * assume that the process is resident.
99  */
100 #define	PROC_PC(p)							\
101 	(((struct trapframe *)(p)->p_md.md_regs)->tf_spc)
102 
103 /*
104  * Preempt the current process if in interrupt from user mode,
105  * or after the current trap/syscall if in system mode.
106  */
107 #define	cpu_need_resched(ci, flags)					\
108 do {									\
109 	ci->ci_want_resched = 1;					\
110 	if (curlwp != ci->ci_data.cpu_idlelwp)				\
111 		aston(curlwp);						\
112 } while (/*CONSTCOND*/0)
113 
114 /*
115  * Give a profiling tick to the current process when the user profiling
116  * buffer pages are invalid.  On the MIPS, request an ast to send us
117  * through trap, marking the proc as needing a profiling tick.
118  */
119 #define	cpu_need_proftick(l)						\
120 do {									\
121 	(l)->l_pflag |= LP_OWEUPC;					\
122 	aston(l);							\
123 } while (/*CONSTCOND*/0)
124 
125 /*
126  * Notify the current process (p) that it has a signal pending,
127  * process as soon as possible.
128  */
129 #define	cpu_signotify(l)	aston(l)
130 
131 #define	aston(l)		((l)->l_md.md_astpending = 1)
132 
133 /*
134  * We need a machine-independent name for this.
135  */
136 #define	DELAY(x)		delay(x)
137 #endif /* _KERNEL */
138 
139 /*
140  * Logical address space of SH3/SH4 CPU.
141  */
142 #define	SH3_PHYS_MASK	0x1fffffff
143 
144 #define	SH3_P0SEG_BASE	0x00000000	/* TLB mapped, also U0SEG */
145 #define	SH3_P0SEG_END	0x7fffffff
146 #define	SH3_P1SEG_BASE	0x80000000	/* pa == va */
147 #define	SH3_P1SEG_END	0x9fffffff
148 #define	SH3_P2SEG_BASE	0xa0000000	/* pa == va, non-cacheable */
149 #define	SH3_P2SEG_END	0xbfffffff
150 #define	SH3_P3SEG_BASE	0xc0000000	/* TLB mapped, kernel mode */
151 #define	SH3_P3SEG_END	0xdfffffff
152 #define	SH3_P4SEG_BASE	0xe0000000	/* peripheral space */
153 #define	SH3_P4SEG_END	0xffffffff
154 
155 #define	SH3_P1SEG_TO_PHYS(x)	((uint32_t)(x) & SH3_PHYS_MASK)
156 #define	SH3_P2SEG_TO_PHYS(x)	((uint32_t)(x) & SH3_PHYS_MASK)
157 #define	SH3_PHYS_TO_P1SEG(x)	((uint32_t)(x) | SH3_P1SEG_BASE)
158 #define	SH3_PHYS_TO_P2SEG(x)	((uint32_t)(x) | SH3_P2SEG_BASE)
159 #define	SH3_P1SEG_TO_P2SEG(x)	((uint32_t)(x) | 0x20000000)
160 #define	SH3_P2SEG_TO_P1SEG(x)	((uint32_t)(x) & ~0x20000000)
161 
162 #ifndef __lint__
163 
164 /*
165  * Switch from P1 (cached) to P2 (uncached).  This used to be written
166  * using gcc's assigned goto extension, but gcc4 aggressive optimizations
167  * tend to optimize that away under certain circumstances.
168  */
169 #define RUN_P2						\
170 	do {						\
171 		register uint32_t r0 asm("r0");		\
172 		uint32_t pc;				\
173 		__asm volatile(				\
174 			"	mov.l	1f, %1	;"	\
175 			"	mova	2f, %0	;"	\
176 			"	or	%0, %1	;"	\
177 			"	jmp	@%1	;"	\
178 			"	 nop		;"	\
179 			"	.align 2	;"	\
180 			"1:	.long	0x20000000;"	\
181 			"2:;"				\
182 			: "=r"(r0), "=r"(pc));		\
183 	} while (0)
184 
185 /*
186  * Switch from P2 (uncached) back to P1 (cached).  We need to be
187  * running on P2 to access cache control, memory-mapped cache and TLB
188  * arrays, etc. and after touching them at least 8 instructinos are
189  * necessary before jumping to P1, so provide that padding here.
190  */
191 #define RUN_P1						\
192 	do {						\
193 		register uint32_t r0 asm("r0");		\
194 		uint32_t pc;				\
195 		__asm volatile(				\
196 		/*1*/	"	mov.l	1f, %1	;"	\
197 		/*2*/	"	mova	2f, %0	;"	\
198 		/*3*/	"	nop		;"	\
199 		/*4*/	"	and	%0, %1	;"	\
200 		/*5*/	"	nop		;"	\
201 		/*6*/	"	nop		;"	\
202 		/*7*/	"	nop		;"	\
203 		/*8*/	"	nop		;"	\
204 			"	jmp	@%1	;"	\
205 			"	 nop		;"	\
206 			"	.align 2	;"	\
207 			"1:	.long	~0x20000000;"	\
208 			"2:;"				\
209 			: "=r"(r0), "=r"(pc));		\
210 	} while (0)
211 
212 /*
213  * If RUN_P1 is the last thing we do in a function we can omit it, b/c
214  * we are going to return to a P1 caller anyway, but we still need to
215  * ensure there's at least 8 instructions before jump to P1.
216  */
217 #define PAD_P1_SWITCH	__asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop;")
218 
219 #else  /* __lint__ */
220 #define	RUN_P2		do {} while (/* CONSTCOND */ 0)
221 #define	RUN_P1		do {} while (/* CONSTCOND */ 0)
222 #define	PAD_P1_SWITCH	do {} while (/* CONSTCOND */ 0)
223 #endif
224 
225 #if defined(SH4)
226 /* SH4 Processor Version Register */
227 #define	SH4_PVR_ADDR	0xff000030	/* P4  address */
228 #define	SH4_PVR		(*(volatile uint32_t *) SH4_PVR_ADDR)
229 #define	SH4_PRR_ADDR	0xff000044	/* P4  address */
230 #define	SH4_PRR		(*(volatile uint32_t *) SH4_PRR_ADDR)
231 
232 #define	SH4_PVR_MASK	0xffffff00
233 #define	SH4_PVR_SH7750	0x04020500	/* SH7750  */
234 #define	SH4_PVR_SH7750S	0x04020600	/* SH7750S */
235 #define	SH4_PVR_SH775xR	0x04050000	/* SH775xR */
236 #define	SH4_PVR_SH7751	0x04110000	/* SH7751  */
237 
238 #define	SH4_PRR_MASK	0xfffffff0
239 #define SH4_PRR_7750R	0x00000100	/* SH7750R */
240 #define SH4_PRR_7751R	0x00000110	/* SH7751R */
241 #endif
242 
243 /*
244  * pull in #defines for kinds of processors
245  */
246 #include <machine/cputypes.h>
247 
248 /*
249  * CTL_MACHDEP definitions.
250  */
251 #define	CPU_CONSDEV		1	/* dev_t: console terminal device */
252 #define	CPU_LOADANDRESET	2	/* load kernel image and reset */
253 #define	CPU_MAXID		3	/* number of valid machdep ids */
254 
255 #ifdef _KERNEL
256 void sh_cpu_init(int, int);
257 void sh_startup(void);
258 void cpu_reset(void) __attribute__((__noreturn__)); /* soft reset */
259 void _cpu_spin(uint32_t);	/* for delay loop. */
260 void delay(int);
261 struct pcb;
262 void savectx(struct pcb *);
263 void dumpsys(void);
264 #endif /* _KERNEL */
265 #endif /* !_SH3_CPU_H_ */
266