1 /* $NetBSD: cpu.h,v 1.54 2009/10/21 21:12:02 rmind Exp $ */ 2 3 /*- 4 * Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved. 5 * Copyright (c) 1990 The Regents of the University of California. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * William Jolitz. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)cpu.h 5.4 (Berkeley) 5/9/91 36 */ 37 38 /* 39 * SH3/SH4 support. 40 * 41 * T.Horiuchi Brains Corp. 5/22/98 42 */ 43 44 #ifndef _SH3_CPU_H_ 45 #define _SH3_CPU_H_ 46 47 #if defined(_KERNEL_OPT) 48 #include "opt_lockdebug.h" 49 #endif 50 51 #include <sh3/psl.h> 52 #include <sh3/frame.h> 53 54 #ifdef _KERNEL 55 #include <sys/cpu_data.h> 56 struct cpu_info { 57 struct cpu_data ci_data; /* MI per-cpu data */ 58 cpuid_t ci_cpuid; 59 int ci_mtx_count; 60 int ci_mtx_oldspl; 61 int ci_want_resched; 62 int ci_idepth; 63 }; 64 65 extern struct cpu_info cpu_info_store; 66 #define curcpu() (&cpu_info_store) 67 68 /* 69 * definitions of cpu-dependent requirements 70 * referenced in generic code 71 */ 72 #define cpu_number() 0 73 74 #define cpu_proc_fork(p1, p2) /* nothing */ 75 76 /* 77 * Arguments to hardclock and gatherstats encapsulate the previous 78 * machine state in an opaque clockframe. 79 */ 80 struct clockframe { 81 int spc; /* program counter at time of interrupt */ 82 int ssr; /* status register at time of interrupt */ 83 int ssp; /* stack pointer at time of interrupt */ 84 }; 85 86 87 #define CLKF_USERMODE(cf) (!KERNELMODE((cf)->ssr)) 88 #define CLKF_PC(cf) ((cf)->spc) 89 #define CLKF_INTR(cf) (curcpu()->ci_idepth > 0) 90 91 /* 92 * This is used during profiling to integrate system time. It can safely 93 * assume that the process is resident. 94 */ 95 #define PROC_PC(p) \ 96 (((struct trapframe *)(p)->p_md.md_regs)->tf_spc) 97 98 /* 99 * Preempt the current process if in interrupt from user mode, 100 * or after the current trap/syscall if in system mode. 101 */ 102 #define cpu_need_resched(ci, flags) \ 103 do { \ 104 ci->ci_want_resched = 1; \ 105 if (curlwp != ci->ci_data.cpu_idlelwp) \ 106 aston(curlwp); \ 107 } while (/*CONSTCOND*/0) 108 109 /* 110 * Give a profiling tick to the current process when the user profiling 111 * buffer pages are invalid. On the MIPS, request an ast to send us 112 * through trap, marking the proc as needing a profiling tick. 113 */ 114 #define cpu_need_proftick(l) \ 115 do { \ 116 (l)->l_pflag |= LP_OWEUPC; \ 117 aston(l); \ 118 } while (/*CONSTCOND*/0) 119 120 /* 121 * Notify the current process (p) that it has a signal pending, 122 * process as soon as possible. 123 */ 124 #define cpu_signotify(l) aston(l) 125 126 #define aston(l) ((l)->l_md.md_astpending = 1) 127 128 /* 129 * We need a machine-independent name for this. 130 */ 131 #define DELAY(x) delay(x) 132 #endif /* _KERNEL */ 133 134 /* 135 * Logical address space of SH3/SH4 CPU. 136 */ 137 #define SH3_PHYS_MASK 0x1fffffff 138 139 #define SH3_P0SEG_BASE 0x00000000 /* TLB mapped, also U0SEG */ 140 #define SH3_P0SEG_END 0x7fffffff 141 #define SH3_P1SEG_BASE 0x80000000 /* pa == va */ 142 #define SH3_P1SEG_END 0x9fffffff 143 #define SH3_P2SEG_BASE 0xa0000000 /* pa == va, non-cacheable */ 144 #define SH3_P2SEG_END 0xbfffffff 145 #define SH3_P3SEG_BASE 0xc0000000 /* TLB mapped, kernel mode */ 146 #define SH3_P3SEG_END 0xdfffffff 147 #define SH3_P4SEG_BASE 0xe0000000 /* peripheral space */ 148 #define SH3_P4SEG_END 0xffffffff 149 150 #define SH3_P1SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK) 151 #define SH3_P2SEG_TO_PHYS(x) ((uint32_t)(x) & SH3_PHYS_MASK) 152 #define SH3_PHYS_TO_P1SEG(x) ((uint32_t)(x) | SH3_P1SEG_BASE) 153 #define SH3_PHYS_TO_P2SEG(x) ((uint32_t)(x) | SH3_P2SEG_BASE) 154 #define SH3_P1SEG_TO_P2SEG(x) ((uint32_t)(x) | 0x20000000) 155 #define SH3_P2SEG_TO_P1SEG(x) ((uint32_t)(x) & ~0x20000000) 156 157 #ifndef __lint__ 158 159 /* 160 * Switch from P1 (cached) to P2 (uncached). This used to be written 161 * using gcc's assigned goto extension, but gcc4 aggressive optimizations 162 * tend to optimize that away under certain circumstances. 163 */ 164 #define RUN_P2 \ 165 do { \ 166 register uint32_t r0 asm("r0"); \ 167 uint32_t pc; \ 168 __asm volatile( \ 169 " mov.l 1f, %1 ;" \ 170 " mova 2f, %0 ;" \ 171 " or %0, %1 ;" \ 172 " jmp @%1 ;" \ 173 " nop ;" \ 174 " .align 2 ;" \ 175 "1: .long 0x20000000;" \ 176 "2:;" \ 177 : "=r"(r0), "=r"(pc)); \ 178 } while (0) 179 180 /* 181 * Switch from P2 (uncached) back to P1 (cached). We need to be 182 * running on P2 to access cache control, memory-mapped cache and TLB 183 * arrays, etc. and after touching them at least 8 instructinos are 184 * necessary before jumping to P1, so provide that padding here. 185 */ 186 #define RUN_P1 \ 187 do { \ 188 register uint32_t r0 asm("r0"); \ 189 uint32_t pc; \ 190 __asm volatile( \ 191 /*1*/ " mov.l 1f, %1 ;" \ 192 /*2*/ " mova 2f, %0 ;" \ 193 /*3*/ " nop ;" \ 194 /*4*/ " and %0, %1 ;" \ 195 /*5*/ " nop ;" \ 196 /*6*/ " nop ;" \ 197 /*7*/ " nop ;" \ 198 /*8*/ " nop ;" \ 199 " jmp @%1 ;" \ 200 " nop ;" \ 201 " .align 2 ;" \ 202 "1: .long ~0x20000000;" \ 203 "2:;" \ 204 : "=r"(r0), "=r"(pc)); \ 205 } while (0) 206 207 /* 208 * If RUN_P1 is the last thing we do in a function we can omit it, b/c 209 * we are going to return to a P1 caller anyway, but we still need to 210 * ensure there's at least 8 instructions before jump to P1. 211 */ 212 #define PAD_P1_SWITCH __asm volatile ("nop;nop;nop;nop;nop;nop;nop;nop;") 213 214 #else /* __lint__ */ 215 #define RUN_P2 do {} while (/* CONSTCOND */ 0) 216 #define RUN_P1 do {} while (/* CONSTCOND */ 0) 217 #define PAD_P1_SWITCH do {} while (/* CONSTCOND */ 0) 218 #endif 219 220 #if defined(SH4) 221 /* SH4 Processor Version Register */ 222 #define SH4_PVR_ADDR 0xff000030 /* P4 address */ 223 #define SH4_PVR (*(volatile uint32_t *) SH4_PVR_ADDR) 224 #define SH4_PRR_ADDR 0xff000044 /* P4 address */ 225 #define SH4_PRR (*(volatile uint32_t *) SH4_PRR_ADDR) 226 227 #define SH4_PVR_MASK 0xffffff00 228 #define SH4_PVR_SH7750 0x04020500 /* SH7750 */ 229 #define SH4_PVR_SH7750S 0x04020600 /* SH7750S */ 230 #define SH4_PVR_SH775xR 0x04050000 /* SH775xR */ 231 #define SH4_PVR_SH7751 0x04110000 /* SH7751 */ 232 233 #define SH4_PRR_MASK 0xfffffff0 234 #define SH4_PRR_7750R 0x00000100 /* SH7750R */ 235 #define SH4_PRR_7751R 0x00000110 /* SH7751R */ 236 #endif 237 238 /* 239 * pull in #defines for kinds of processors 240 */ 241 #include <machine/cputypes.h> 242 243 /* 244 * CTL_MACHDEP definitions. 245 */ 246 #define CPU_CONSDEV 1 /* dev_t: console terminal device */ 247 #define CPU_LOADANDRESET 2 /* load kernel image and reset */ 248 #define CPU_MAXID 3 /* number of valid machdep ids */ 249 250 #ifdef _KERNEL 251 void sh_cpu_init(int, int); 252 void sh_startup(void); 253 void cpu_reset(void) __attribute__((__noreturn__)); /* soft reset */ 254 void _cpu_spin(uint32_t); /* for delay loop. */ 255 void delay(int); 256 struct pcb; 257 void savectx(struct pcb *); 258 void dumpsys(void); 259 #endif /* _KERNEL */ 260 #endif /* !_SH3_CPU_H_ */ 261