1 /* $NetBSD: mips_machdep.c,v 1.306 2024/01/06 07:27:35 simonb Exp $ */
2
3 /*
4 * Copyright 2002 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed for the NetBSD Project by
20 * Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 * or promote products derived from this software without specific prior
23 * written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 /*
39 * Copyright 2000, 2001
40 * Broadcom Corporation. All rights reserved.
41 *
42 * This software is furnished under license and may be used and copied only
43 * in accordance with the following terms and conditions. Subject to these
44 * conditions, you may download, copy, install, use, modify and distribute
45 * modified or unmodified copies of this software in source and/or binary
46 * form. No title or ownership is transferred hereby.
47 *
48 * 1) Any source code used, modified or distributed must reproduce and
49 * retain this copyright notice and list of conditions as they appear in
50 * the source file.
51 *
52 * 2) No right is granted to use any trade name, trademark, or logo of
53 * Broadcom Corporation. The "Broadcom Corporation" name may not be
54 * used to endorse or promote products derived from this software
55 * without the prior written permission of Broadcom Corporation.
56 *
57 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
58 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
59 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
60 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
61 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
62 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
63 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
64 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
65 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
66 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
67 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68 */
69
70 /*-
71 * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
72 * All rights reserved.
73 *
74 * This code is derived from software contributed to The NetBSD Foundation
75 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
76 * NASA Ames Research Center and by Chris Demetriou.
77 *
78 * Redistribution and use in source and binary forms, with or without
79 * modification, are permitted provided that the following conditions
80 * are met:
81 * 1. Redistributions of source code must retain the above copyright
82 * notice, this list of conditions and the following disclaimer.
83 * 2. Redistributions in binary form must reproduce the above copyright
84 * notice, this list of conditions and the following disclaimer in the
85 * documentation and/or other materials provided with the distribution.
86 *
87 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
88 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
89 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
90 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
91 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
92 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
93 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
94 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
95 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
96 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
97 * POSSIBILITY OF SUCH DAMAGE.
98 */
99
100 /*
101 * Copyright 1996 The Board of Trustees of The Leland Stanford
102 * Junior University. All Rights Reserved.
103 *
104 * Permission to use, copy, modify, and distribute this
105 * software and its documentation for any purpose and without
106 * fee is hereby granted, provided that the above copyright
107 * notice appear in all copies. Stanford University
108 * makes no representations about the suitability of this
109 * software for any purpose. It is provided "as is" without
110 * express or implied warranty.
111 */
112
113 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
114 __KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.306 2024/01/06 07:27:35 simonb Exp $");
115
116 #define __INTR_PRIVATE
117 #include "opt_cputype.h"
118 #include "opt_compat_netbsd32.h"
119 #include "opt_multiprocessor.h"
120
121 #include <sys/param.h>
122 #include <sys/systm.h>
123 #include <sys/proc.h>
124 #include <sys/intr.h>
125 #include <sys/exec.h>
126 #include <sys/reboot.h>
127 #include <sys/module.h>
128 #include <sys/mount.h> /* fsid_t for syscallargs */
129 #include <sys/lwp.h>
130 #include <sys/sysctl.h>
131 #include <sys/msgbuf.h>
132 #include <sys/conf.h>
133 #include <sys/core.h>
134 #include <sys/device.h>
135 #include <sys/kcore.h>
136 #include <sys/kmem.h>
137 #include <sys/ras.h>
138 #include <sys/cpu.h>
139 #include <sys/atomic.h>
140 #include <sys/ucontext.h>
141 #include <sys/bitops.h>
142
143 #include <mips/kcore.h>
144
145 #ifdef COMPAT_NETBSD32
146 #include <compat/netbsd32/netbsd32.h>
147 #endif
148
149 #include <uvm/uvm.h>
150 #include <uvm/uvm_physseg.h>
151
152 #include <dev/cons.h>
153 #include <dev/mm.h>
154
155 #include <mips/pcb.h>
156 #include <mips/cache.h>
157 #include <mips/frame.h>
158 #include <mips/regnum.h>
159 #include <mips/mips_opcode.h>
160
161 #include <mips/cpu.h>
162 #include <mips/locore.h>
163 #include <mips/psl.h>
164 #include <mips/pte.h>
165 #include <mips/userret.h>
166
167 #ifdef __HAVE_BOOTINFO_H
168 #include <machine/bootinfo.h>
169 #endif
170
171 #ifdef MIPS64_OCTEON
172 #include <mips/cavium/octeonvar.h>
173 #endif
174
175 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
176 #include <mips/mipsNN.h> /* MIPS32/MIPS64 registers */
177
178 #define _MKINSN(a,b,c,d,e) ((uint32_t)(((a) << 26)|((b) << 21)|((c) << 16)|((d) << 11)|(e)))
179
180 #ifdef _LP64
181 #define _LOAD_V0_L_PRIVATE_A0 _MKINSN(OP_LD, _R_A0, _R_V0, 0, offsetof(lwp_t, l_private))
182 #define _MTC0_V0_USERLOCAL _MKINSN(OP_COP0, OP_DMT, _R_V0, MIPS_COP_0_TLB_CONTEXT, 2)
183 #else
184 #define _LOAD_V0_L_PRIVATE_A0 _MKINSN(OP_LW, _R_A0, _R_V0, 0, offsetof(lwp_t, l_private))
185 #define _MTC0_V0_USERLOCAL _MKINSN(OP_COP0, OP_MT, _R_V0, MIPS_COP_0_TLB_CONTEXT, 2)
186 #endif
187 #define JR_RA _MKINSN(OP_SPECIAL, _R_RA, 0, 0, OP_JR)
188
189 #endif
190
191 /* Internal routines. */
192 int cpu_dumpsize(void);
193 u_long cpu_dump_mempagecnt(void);
194 int cpu_dump(void);
195
196 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
197 static void mips_watchpoint_init(void);
198 #endif
199
200 #if defined(_LP64) && defined(ENABLE_MIPS_16KB_PAGE)
201 vaddr_t mips_vm_maxuser_address = MIPS_VM_MAXUSER_ADDRESS;
202 #endif
203
204 #if defined(MIPS3_PLUS)
205 uint32_t mips3_cp0_tlb_page_mask_probe(void);
206 uint64_t mips3_cp0_tlb_entry_hi_probe(void);
207 uint64_t mips3_cp0_tlb_entry_lo_probe(void);
208
209 static void mips3_tlb_probe(void);
210 #endif
211
212 #if defined(MIPS1)
213 static void mips1_vector_init(const struct splsw *);
214 extern const struct locoresw mips1_locoresw;
215 extern const mips_locore_jumpvec_t mips1_locore_vec;
216 #endif
217
218 #if defined(MIPS3)
219 static void mips3_vector_init(const struct splsw *);
220 extern const struct locoresw mips3_locoresw;
221 extern const mips_locore_jumpvec_t mips3_locore_vec;
222 #endif
223
224 #if defined(MIPS3_LOONGSON2)
225 static void loongson2_vector_init(const struct splsw *);
226 extern const struct locoresw loongson2_locoresw;
227 extern const mips_locore_jumpvec_t loongson2_locore_vec;
228 #endif
229
230 #if defined(MIPS32)
231 static void mips32_vector_init(const struct splsw *);
232 extern const struct locoresw mips32_locoresw;
233 extern const mips_locore_jumpvec_t mips32_locore_vec;
234 #endif
235
236 #if defined(MIPS32R2)
237 static void mips32r2_vector_init(const struct splsw *);
238 extern const struct locoresw mips32r2_locoresw;
239 extern const mips_locore_jumpvec_t mips32r2_locore_vec;
240 #endif
241
242 #if defined(MIPS64)
243 static void mips64_vector_init(const struct splsw *);
244 extern const struct locoresw mips64_locoresw;
245 extern const mips_locore_jumpvec_t mips64_locore_vec;
246 #endif
247
248 #if defined(MIPS64R2)
249 extern const struct locoresw mips64r2_locoresw;
250 extern const mips_locore_jumpvec_t mips64r2_locore_vec;
251 #endif
252
253 #if defined(PARANOIA)
254 void std_splsw_test(void);
255 #endif
256
257 mips_locore_jumpvec_t mips_locore_jumpvec;
258
259 struct locoresw mips_locoresw;
260
261 extern const struct splsw std_splsw;
262 struct splsw mips_splsw;
263
264 struct mips_options mips_options = {
265 .mips_cpu_id = 0xffffffff,
266 .mips_fpu_id = 0xffffffff,
267 };
268
269 void * msgbufaddr;
270
271 /* the following is used by DDB to reset the system */
272 void (*cpu_reset_address)(void);
273
274 /* the following is used externally (sysctl_hw) */
275 char machine[] = MACHINE; /* from <machine/param.h> */
276 char machine_arch[] = MACHINE_ARCH; /* from <machine/param.h> */
277
278 /*
279 * Assumptions:
280 * - All MIPS3+ have an r4k-style MMU. _Many_ assumptions throughout
281 * much of the mips code about this. Includes overloaded usage of
282 * MIPS3_PLUS.
283 * - All MIPS3+ use the same exception model (cp0 status, cause bits,
284 * etc). _Many_ assumptions throughout much of the mips code about
285 * this. Includes overloaded usage of MIPS3_PLUS.
286 * - All MIPS3+ have a count register. MIPS_HAS_CLOCK in <mips/cpu.h>
287 * will need to be revised if this is false.
288 */
289 #define MIPS32_FLAGS CPU_MIPS_R4K_MMU | CPU_MIPS_CAUSE_IV | CPU_MIPS_USE_WAIT
290 #define MIPS64_FLAGS MIPS32_FLAGS /* same as MIPS32 flags (for now) */
291
292 static const struct pridtab cputab[] = {
293 { 0, MIPS_R2000, -1, -1, CPU_ARCH_MIPS1, 64,
294 CPU_MIPS_NO_LLSC, 0, 0, "MIPS R2000 CPU" },
295 { 0, MIPS_R3000, MIPS_REV_R2000A, -1, CPU_ARCH_MIPS1, 64,
296 CPU_MIPS_NO_LLSC, 0, 0, "MIPS R2000A CPU" },
297 { 0, MIPS_R3000, MIPS_REV_R3000, -1, CPU_ARCH_MIPS1, 64,
298 CPU_MIPS_NO_LLSC, 0, 0, "MIPS R3000 CPU" },
299 { 0, MIPS_R3000, MIPS_REV_R3000A, -1, CPU_ARCH_MIPS1, 64,
300 CPU_MIPS_NO_LLSC, 0, 0, "MIPS R3000A CPU" },
301 { 0, MIPS_R6000, -1, -1, CPU_ARCH_MIPS2, 32,
302 MIPS_NOT_SUPP, 0, 0, "MIPS R6000 CPU" },
303
304 /*
305 * rev 0x00, 0x22 and 0x30 are R4000, 0x40, 0x50 and 0x60 are R4400.
306 * should we allow ranges and use 0x00 - 0x3f for R4000 and
307 * 0x40 - 0xff for R4400?
308 */
309 { 0, MIPS_R4000, MIPS_REV_R4000_A, -1, CPU_ARCH_MIPS3, 48,
310 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
311 "MIPS R4000 CPU" },
312 { 0, MIPS_R4000, MIPS_REV_R4000_B, -1, CPU_ARCH_MIPS3, 48,
313 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
314 "MIPS R4000 CPU" },
315 { 0, MIPS_R4000, MIPS_REV_R4000_C, -1, CPU_ARCH_MIPS3, 48,
316 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
317 "MIPS R4000 CPU" },
318 { 0, MIPS_R4000, MIPS_REV_R4400_A, -1, CPU_ARCH_MIPS3, 48,
319 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
320 "MIPS R4400 CPU" },
321 { 0, MIPS_R4000, MIPS_REV_R4400_B, -1, CPU_ARCH_MIPS3, 48,
322 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
323 "MIPS R4400 CPU" },
324 { 0, MIPS_R4000, MIPS_REV_R4400_C, -1, CPU_ARCH_MIPS3, 48,
325 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
326 "MIPS R4400 CPU" },
327
328 { 0, MIPS_R3LSI, -1, -1, CPU_ARCH_MIPS1, -1,
329 MIPS_NOT_SUPP, 0, 0, "LSI Logic R3000 derivative" },
330 { 0, MIPS_R6000A, -1, -1, CPU_ARCH_MIPS2, 32,
331 MIPS_NOT_SUPP, 0, 0, "MIPS R6000A CPU" },
332 { 0, MIPS_R3IDT, -1, -1, CPU_ARCH_MIPS1, -1,
333 MIPS_NOT_SUPP, 0, 0, "IDT R3041 or RC36100 CPU" },
334 { 0, MIPS_R4100, -1, -1, CPU_ARCH_MIPS3, 32,
335 CPU_MIPS_R4K_MMU | CPU_MIPS_NO_LLSC, 0, 0,
336 "NEC VR4100 CPU" },
337 { 0, MIPS_R4200, -1, -1, CPU_ARCH_MIPS3, -1,
338 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
339 "NEC VR4200 CPU" },
340 { 0, MIPS_R4300, -1, -1, CPU_ARCH_MIPS3, 32,
341 CPU_MIPS_R4K_MMU, 0, 0, "NEC VR4300 CPU" },
342 { 0, MIPS_R4600, -1, -1, CPU_ARCH_MIPS3, 48,
343 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
344 "QED R4600 Orion CPU" },
345 { 0, MIPS_R4700, -1, -1, CPU_ARCH_MIPS3, 48,
346 CPU_MIPS_R4K_MMU, 0, 0, "QED R4700 Orion CPU" },
347
348 { 0, MIPS_R8000, -1, -1, CPU_ARCH_MIPS4, 384,
349 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
350 "MIPS R8000 Blackbird/TFP CPU" },
351 { 0, MIPS_R10000, -1, -1, CPU_ARCH_MIPS4, 64,
352 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
353 "MIPS R10000 CPU" },
354 { 0, MIPS_R12000, -1, -1, CPU_ARCH_MIPS4, 64,
355 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
356 "MIPS R12000 CPU" },
357 { 0, MIPS_R14000, -1, -1, CPU_ARCH_MIPS4, 64,
358 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
359 "MIPS R14000 CPU" },
360
361 /* XXX
362 * If the Processor Revision ID of the 4650 isn't 0, the following
363 * entry needs to be adjusted. Can't use a wildcard match because
364 * the TX39 series processors share the same Processor ID value.
365 * Or maybe put TX39 CPUs first if the revid doesn't overlap with
366 * the 4650...
367 */
368 { 0, MIPS_R4650, 0, -1, CPU_ARCH_MIPS3, -1,
369 MIPS_NOT_SUPP /* no MMU! */, 0, 0, "QED R4650 CPU" },
370 { 0, MIPS_TX3900, MIPS_REV_TX3912, -1, CPU_ARCH_MIPS1, 32,
371 CPU_MIPS_NO_LLSC, 0, 0, "Toshiba TX3912 CPU" },
372 { 0, MIPS_TX3900, MIPS_REV_TX3922, -1, CPU_ARCH_MIPS1, 64,
373 CPU_MIPS_NO_LLSC, 0, 0, "Toshiba TX3922 CPU" },
374 { 0, MIPS_TX3900, MIPS_REV_TX3927, -1, CPU_ARCH_MIPS1, 64,
375 CPU_MIPS_NO_LLSC, 0, 0, "Toshiba TX3927 CPU" },
376 { 0, MIPS_R5000, -1, -1, CPU_ARCH_MIPS4, 48,
377 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
378 "MIPS R5000 CPU" },
379 { 0, MIPS_RM5200, -1, -1, CPU_ARCH_MIPS4, 48,
380 CPU_MIPS_R4K_MMU | CPU_MIPS_CAUSE_IV | CPU_MIPS_DOUBLE_COUNT |
381 CPU_MIPS_USE_WAIT, 0, 0, "QED RM5200 CPU" },
382
383 /* XXX
384 * The rm7000 rev 2.0 can have 64 tlbs, and has 6 extra interrupts. See
385 * "Migrating to the RM7000 from other MIPS Microprocessors"
386 * for more details.
387 */
388 { 0, MIPS_RM7000, -1, -1, CPU_ARCH_MIPS4, 48,
389 MIPS_NOT_SUPP | CPU_MIPS_CAUSE_IV | CPU_MIPS_DOUBLE_COUNT |
390 CPU_MIPS_USE_WAIT, 0, 0, "QED RM7000 CPU" },
391
392 /*
393 * IDT RC32300 core is a 32 bit MIPS2 processor with
394 * MIPS3/MIPS4 extensions. It has an R4000-style TLB,
395 * while all registers are 32 bits and any 64 bit
396 * instructions like ld/sd/dmfc0/dmtc0 are not allowed.
397 *
398 * note that the Config register has a non-standard base
399 * for IC and DC (2^9 instead of 2^12).
400 *
401 */
402 { 0, MIPS_RC32300, -1, -1, CPU_ARCH_MIPS3, 16,
403 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
404 "IDT RC32300 CPU" },
405 { 0, MIPS_RC32364, -1, -1, CPU_ARCH_MIPS3, 16,
406 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
407 "IDT RC32364 CPU" },
408 { 0, MIPS_RC64470, -1, -1, CPU_ARCH_MIPSx, -1,
409 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
410 "IDT RC64474/RC64475 CPU" },
411
412 { 0, MIPS_R5400, -1, -1, CPU_ARCH_MIPSx, -1,
413 MIPS_NOT_SUPP | CPU_MIPS_R4K_MMU, 0, 0,
414 "NEC VR5400 CPU" },
415 { 0, MIPS_R5900, -1, -1, CPU_ARCH_MIPS3, 48,
416 CPU_MIPS_NO_LLSC | CPU_MIPS_R4K_MMU, 0, 0,
417 "Toshiba R5900 CPU" },
418
419 { 0, MIPS_TX4900, MIPS_REV_TX4927, -1, CPU_ARCH_MIPS3, 48,
420 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
421 "Toshiba TX4927 CPU" },
422
423 { 0, MIPS_TX4900, -1, -1, CPU_ARCH_MIPS3, 48,
424 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT, 0, 0,
425 "Toshiba TX4900 CPU" },
426
427 /*
428 * ICT Loongson2 is a MIPS64 CPU with a few quirks. For some reason
429 * the virtual aliases present with 4KB pages make the caches misbehave
430 * so we make all accesses uncached. With 16KB pages, no virtual
431 * aliases are possible so we can use caching.
432 */
433 #ifdef ENABLE_MIPS_16KB_PAGE
434 #define MIPS_LOONGSON2_CCA 0
435 #else
436 #define MIPS_LOONGSON2_CCA (CPU_MIPS_HAVE_SPECIAL_CCA | \
437 (2 << CPU_MIPS_CACHED_CCA_SHIFT))
438 #endif
439 { 0, MIPS_LOONGSON2, MIPS_REV_LOONGSON2E, -1, CPU_ARCH_MIPS3, 64,
440 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT | CPU_MIPS_LOONGSON2
441 | MIPS_LOONGSON2_CCA, 0, 0, "ICT Loongson 2E CPU" },
442 { 0, MIPS_LOONGSON2, MIPS_REV_LOONGSON2F, -1, CPU_ARCH_MIPS3, 64,
443 CPU_MIPS_R4K_MMU | CPU_MIPS_DOUBLE_COUNT | CPU_MIPS_LOONGSON2
444 | MIPS_LOONGSON2_CCA, 0, 0, "ICT Loongson 2F CPU" },
445
446 #if 0 /* ID collisions : can we use a CU1 test or similar? */
447 { 0, MIPS_R3SONY, -1, -1, CPU_ARCH_MIPS1, -1,
448 MIPS_NOT_SUPP, 0, 0, "SONY R3000 derivative" }, /* 0x21; crash R4700? */
449 { 0, MIPS_R3NKK, -1, -1, CPU_ARCH_MIPS1, -1,
450 MIPS_NOT_SUPP, 0, 0, "NKK R3000 derivative" }, /* 0x23; crash R5000? */
451 #endif
452
453 { MIPS_PRID_CID_MTI, MIPS_4Kc, -1, -1, -1, 0,
454 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4Kc" },
455 { MIPS_PRID_CID_MTI, MIPS_4KEc, -1, -1, -1, 0,
456 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4KEc" },
457 { MIPS_PRID_CID_MTI, MIPS_4KEc_R2, -1, -1, -1, 0,
458 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4KEc (Rev 2)" },
459 { MIPS_PRID_CID_MTI, MIPS_4KSc, -1, -1, -1, 0,
460 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "4KSc" },
461 { MIPS_PRID_CID_MTI, MIPS_5Kc, -1, -1, -1, 0,
462 MIPS64_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "5Kc" },
463 { MIPS_PRID_CID_MTI, MIPS_20Kc, -1, -1, -1, 0,
464 MIPS64_FLAGS, 0, 0, "20Kc" },
465 { MIPS_PRID_CID_MTI, MIPS_25Kf, -1, -1, -1, 0,
466 MIPS64_FLAGS, 0, 0, "25Kf" },
467 { MIPS_PRID_CID_MTI, MIPS_24K, -1, -1, -1, 0,
468 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
469 MIPS_CP0FL_USE |
470 MIPS_CP0FL_EBASE |
471 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
472 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG7,
473 0, "24K" },
474 { MIPS_PRID_CID_MTI, MIPS_24KE, -1, -1, -1, 0,
475 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
476 MIPS_CP0FL_USE |
477 MIPS_CP0FL_EBASE |
478 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
479 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG7,
480 0, "24KE" },
481 { MIPS_PRID_CID_MTI, MIPS_34K, -1, -1, -1, 0,
482 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
483 MIPS_CP0FL_USE |
484 MIPS_CP0FL_EBASE |
485 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
486 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG7,
487 0, "34K" },
488 { MIPS_PRID_CID_MTI, MIPS_74K, -1, -1, -1, 0,
489 CPU_MIPS_HAVE_SPECIAL_CCA | (0 << CPU_MIPS_CACHED_CCA_SHIFT) |
490 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
491 MIPS_CP0FL_USE |
492 MIPS_CP0FL_EBASE |
493 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
494 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG6 | MIPS_CP0FL_CONFIG7,
495 0, "74K" },
496 { MIPS_PRID_CID_MTI, MIPS_1004K, -1, -1, -1, 0,
497 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
498 MIPS_CP0FL_USE |
499 MIPS_CP0FL_EBASE |
500 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
501 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG6 | MIPS_CP0FL_CONFIG7,
502 0, "1004K" },
503 { MIPS_PRID_CID_MTI, MIPS_1074K, -1, -1, -1, 0,
504 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT,
505 MIPS_CP0FL_USE |
506 MIPS_CP0FL_EBASE |
507 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
508 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG6 | MIPS_CP0FL_CONFIG7,
509 0, "1074K" },
510
511 { MIPS_PRID_CID_BROADCOM, MIPS_BCM3302, -1, -1, -1, 0,
512 MIPS32_FLAGS | CPU_MIPS_DOUBLE_COUNT, 0, 0, "BCM3302" },
513
514 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV1, -1, MIPS_AU1000, -1, 0,
515 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
516 "Au1000 (Rev 1 core)" },
517 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1000, -1, 0,
518 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
519 "Au1000 (Rev 2 core)" },
520
521 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV1, -1, MIPS_AU1100, -1, 0,
522 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
523 "Au1100 (Rev 1 core)" },
524 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1100, -1, 0,
525 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
526 "Au1100 (Rev 2 core)" },
527
528 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV1, -1, MIPS_AU1500, -1, 0,
529 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
530 "Au1500 (Rev 1 core)" },
531 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1500, -1, 0,
532 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
533 "Au1500 (Rev 2 core)" },
534
535 { MIPS_PRID_CID_ALCHEMY, MIPS_AU_REV2, -1, MIPS_AU1550, -1, 0,
536 MIPS32_FLAGS | CPU_MIPS_NO_WAIT | CPU_MIPS_I_D_CACHE_COHERENT, 0, 0,
537 "Au1550 (Rev 2 core)" },
538
539 /* The SB-1 CPU uses a CCA of 5 - "Cacheable Coherent Shareable" */
540 { MIPS_PRID_CID_SIBYTE, MIPS_SB1, -1, -1, -1, 0,
541 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT |
542 CPU_MIPS_HAVE_SPECIAL_CCA |
543 (CCA_SB_CACHEABLE_COHERENT << CPU_MIPS_CACHED_CCA_SHIFT), 0, 0,
544 "SB-1" },
545 { MIPS_PRID_CID_SIBYTE, MIPS_SB1_11, -1, -1, -1, 0,
546 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT |
547 CPU_MIPS_HAVE_SPECIAL_CCA |
548 (CCA_SB_CACHEABLE_COHERENT << CPU_MIPS_CACHED_CCA_SHIFT), 0, 0,
549 "SB-1 (0x11)" },
550
551 { MIPS_PRID_CID_RMI, MIPS_XLR732B, -1, -1, -1, 0,
552 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
553 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
554 MIPS_CP0FL_USE |
555 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
556 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
557 CIDFL_RMI_TYPE_XLR|MIPS_CIDFL_RMI_CPUS(8,4)|MIPS_CIDFL_RMI_L2(2MB),
558 "XLR732B" },
559
560 { MIPS_PRID_CID_RMI, MIPS_XLR732C, -1, -1, -1, 0,
561 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
562 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
563 MIPS_CP0FL_USE |
564 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
565 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
566 CIDFL_RMI_TYPE_XLR|MIPS_CIDFL_RMI_CPUS(8,4)|MIPS_CIDFL_RMI_L2(2MB),
567 "XLR732C" },
568
569 { MIPS_PRID_CID_RMI, MIPS_XLS616, -1, -1, -1, 0,
570 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
571 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
572 MIPS_CP0FL_USE |
573 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
574 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
575 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(4,4)|MIPS_CIDFL_RMI_L2(1MB),
576 "XLS616" },
577
578 { MIPS_PRID_CID_RMI, MIPS_XLS416, -1, -1, -1, 0,
579 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
580 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
581 MIPS_CP0FL_USE |
582 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
583 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
584 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(4,4)|MIPS_CIDFL_RMI_L2(1MB),
585 "XLS416" },
586
587 { MIPS_PRID_CID_RMI, MIPS_XLS408, -1, -1, -1, 0,
588 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
589 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
590 MIPS_CP0FL_USE |
591 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
592 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
593 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(1MB),
594 "XLS408" },
595
596 { MIPS_PRID_CID_RMI, MIPS_XLS408LITE, -1, -1, -1, 0,
597 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
598 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
599 MIPS_CP0FL_USE |
600 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
601 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
602 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(1MB),
603 "XLS408lite" },
604
605 { MIPS_PRID_CID_RMI, MIPS_XLS404LITE, -1, -1, -1, 0,
606 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
607 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
608 MIPS_CP0FL_USE |
609 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
610 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
611 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(1,4)|MIPS_CIDFL_RMI_L2(512KB),
612 "XLS404lite" },
613
614 { MIPS_PRID_CID_RMI, MIPS_XLS208, -1, -1, -1, 0,
615 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
616 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
617 MIPS_CP0FL_USE |
618 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
619 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
620 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(512KB),
621 "XLS208" },
622
623 { MIPS_PRID_CID_RMI, MIPS_XLS204, -1, -1, -1, 0,
624 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
625 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
626 MIPS_CP0FL_USE |
627 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
628 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
629 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(1,4)|MIPS_CIDFL_RMI_L2(256KB),
630 "XLS204" },
631
632 { MIPS_PRID_CID_RMI, MIPS_XLS108, -1, -1, -1, 0,
633 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
634 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
635 MIPS_CP0FL_USE |
636 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
637 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
638 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(2,4)|MIPS_CIDFL_RMI_L2(512KB),
639 "XLS108" },
640
641 { MIPS_PRID_CID_RMI, MIPS_XLS104, -1, -1, -1, 0,
642 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR |
643 CPU_MIPS_I_D_CACHE_COHERENT | CPU_MIPS_HAVE_MxCR,
644 MIPS_CP0FL_USE |
645 MIPS_CP0FL_EIRR | MIPS_CP0FL_EIMR | MIPS_CP0FL_EBASE |
646 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG7,
647 CIDFL_RMI_TYPE_XLS|MIPS_CIDFL_RMI_CPUS(1,4)|MIPS_CIDFL_RMI_L2(256KB),
648 "XLS104" },
649
650 { MIPS_PRID_CID_CAVIUM, MIPS_CN31XX, -1, -1, -1, 0,
651 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
652 MIPS_CP0FL_USE |
653 MIPS_CP0FL_EBASE | MIPS_CP0FL_CONFIG |
654 MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 | MIPS_CP0FL_CONFIG3,
655 0,
656 "CN31xx" },
657
658 { MIPS_PRID_CID_CAVIUM, MIPS_CN30XX, -1, -1, -1, 0,
659 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
660 MIPS_CP0FL_USE |
661 MIPS_CP0FL_EBASE | MIPS_CP0FL_CONFIG |
662 MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 | MIPS_CP0FL_CONFIG3,
663 0,
664 "CN30xx" },
665
666 { MIPS_PRID_CID_CAVIUM, MIPS_CN50XX, -1, -1, -1, 0,
667 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
668 MIPS_CP0FL_USE |
669 MIPS_CP0FL_EBASE | MIPS_CP0FL_CONFIG |
670 MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 | MIPS_CP0FL_CONFIG3,
671 0,
672 "CN50xx" },
673
674 { MIPS_PRID_CID_CAVIUM, MIPS_CN68XX, -1, -1, -1, 0,
675 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
676 MIPS_CP0FL_USE |
677 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
678 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG4,
679 0,
680 "CN68xx" },
681
682 { MIPS_PRID_CID_CAVIUM, MIPS_CN70XX, -1, -1, -1, 0,
683 MIPS64_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_NO_LLADDR,
684 MIPS_CP0FL_USE | MIPS_CP0FL_EBASE |
685 MIPS_CP0FL_CONFIG | MIPS_CP0FL_CONFIG1 | MIPS_CP0FL_CONFIG2 |
686 MIPS_CP0FL_CONFIG3 | MIPS_CP0FL_CONFIG4 | MIPS_CP0FL_CONFIG5 |
687 MIPS_CP0FL_CONFIG6 | MIPS_CP0FL_CONFIG7,
688 0,
689 "CN70xx/CN71xx" },
690
691 /* Microsoft Research' extensible MIPS */
692 { MIPS_PRID_CID_MICROSOFT, MIPS_eMIPS, 1, -1, CPU_ARCH_MIPS1, 64,
693 CPU_MIPS_NO_WAIT, 0, 0, "eMIPS CPU" },
694
695 /* Ingenic XBurst */
696 { MIPS_PRID_CID_INGENIC, MIPS_XBURST, -1, -1, -1, 0,
697 MIPS32_FLAGS | CPU_MIPS_D_CACHE_COHERENT | CPU_MIPS_DOUBLE_COUNT,
698 0, 0, "XBurst" },
699
700 { 0, 0, 0, 0, 0, 0,
701 0, 0, 0, NULL }
702 };
703
704 static const struct pridtab fputab[] = {
705 { 0, MIPS_SOFT, -1, 0, 0, 0, 0, 0, 0, "software emulated floating point" },
706 { 0, MIPS_R2360, -1, 0, 0, 0, 0, 0, 0, "MIPS R2360 Floating Point Board" },
707 { 0, MIPS_R2010, -1, 0, 0, 0, 0, 0, 0, "MIPS R2010 FPC" },
708 { 0, MIPS_R3010, -1, 0, 0, 0, 0, 0, 0, "MIPS R3010 FPC" },
709 { 0, MIPS_R6010, -1, 0, 0, 0, 0, 0, 0, "MIPS R6010 FPC" },
710 { 0, MIPS_R4010, -1, 0, 0, 0, 0, 0, 0, "MIPS R4010 FPC" },
711 };
712
713 /*
714 * Company ID's are not sparse (yet), this array is indexed directly
715 * by pridtab->cpu_cid.
716 */
717 static const char * const cidnames[] = {
718 "Prehistoric",
719 "MIPS", /* or "MIPS Technologies, Inc. */
720 "Broadcom", /* or "Broadcom Corp." */
721 "Alchemy", /* or "Alchemy Semiconductor" */
722 "SiByte", /* or "Broadcom Corp. (SiByte)" */
723 "SandCraft",
724 "Phillips",
725 "Toshiba or Microsoft",
726 "LSI",
727 "(unannounced)",
728 "(unannounced)",
729 "Lexra",
730 "RMI",
731 "Cavium",
732 };
733 #define ncidnames __arraycount(cidnames)
734
735 #if defined(MIPS1)
736 /*
737 * MIPS-I locore function vector
738 */
739
740 static void
mips1_vector_init(const struct splsw * splsw)741 mips1_vector_init(const struct splsw *splsw)
742 {
743 extern char mips1_utlb_miss[], mips1_utlb_miss_end[];
744 extern char mips1_exception[], mips1_exception_end[];
745
746 /*
747 * Copy down exception vector code.
748 */
749 if (mips1_utlb_miss_end - mips1_utlb_miss > 0x80)
750 panic("startup: UTLB vector code too large");
751 if (mips1_exception_end - mips1_exception > 0x80)
752 panic("startup: general exception vector code too large");
753 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips1_utlb_miss,
754 mips1_exception_end - mips1_utlb_miss);
755
756 /*
757 * Copy locore-function vector.
758 */
759 mips_locore_jumpvec = mips1_locore_vec;
760
761 /*
762 * Clear out the I and D caches.
763 */
764 mips_icache_sync_all();
765 mips_dcache_wbinv_all();
766 }
767 #endif /* MIPS1 */
768
769 #if defined(MIPS3)
770 static void
mips3_vector_init(const struct splsw * splsw)771 mips3_vector_init(const struct splsw *splsw)
772 {
773 /* r4000 exception handler address and end */
774 extern char mips3_exception[], mips3_exception_end[];
775
776 /* TLB miss handler address and end */
777 extern char mips3_tlb_miss[];
778 extern char mips3_xtlb_miss[];
779
780 /* Cache error handler */
781 extern char mips3_cache[];
782 /*
783 * Copy down exception vector code.
784 */
785
786 if (mips3_xtlb_miss - mips3_tlb_miss != 0x80)
787 panic("startup: %s vector code not 128 bytes in length",
788 "UTLB");
789 if (mips3_cache - mips3_xtlb_miss != 0x80)
790 panic("startup: %s vector code not 128 bytes in length",
791 "XTLB");
792 if (mips3_exception - mips3_cache != 0x80)
793 panic("startup: %s vector code not 128 bytes in length",
794 "Cache error");
795 if (mips3_exception_end - mips3_exception > 0x80)
796 panic("startup: %s vector code too large",
797 "General exception");
798
799 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips3_tlb_miss,
800 mips3_exception_end - mips3_tlb_miss);
801
802 /*
803 * Copy locore-function vector.
804 */
805 mips_locore_jumpvec = mips3_locore_vec;
806
807 mips_icache_sync_all();
808 mips_dcache_wbinv_all();
809
810 /* Clear BEV in SR so we start handling our own exceptions */
811 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
812 }
813 #endif /* MIPS3 */
814
815 #if defined(MIPS3_LOONGSON2)
816 static void
loongson2_vector_init(const struct splsw * splsw)817 loongson2_vector_init(const struct splsw *splsw)
818 {
819 /* r4000 exception handler address and end */
820 extern char loongson2_exception[], loongson2_exception_end[];
821
822 /* TLB miss handler address and end */
823 extern char loongson2_tlb_miss[];
824 extern char loongson2_xtlb_miss[];
825
826 /* Cache error handler */
827 extern char loongson2_cache[];
828
829 /*
830 * Copy down exception vector code.
831 */
832
833 if (loongson2_xtlb_miss - loongson2_tlb_miss != 0x80)
834 panic("startup: %s vector code not 128 bytes in length",
835 "UTLB");
836 if (loongson2_cache - loongson2_xtlb_miss != 0x80)
837 panic("startup: %s vector code not 128 bytes in length",
838 "XTLB");
839 if (loongson2_exception - loongson2_cache != 0x80)
840 panic("startup: %s vector code not 128 bytes in length",
841 "Cache error");
842 if (loongson2_exception_end - loongson2_exception > 0x80)
843 panic("startup: %s vector code too large",
844 "General exception");
845
846 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, loongson2_tlb_miss,
847 loongson2_exception_end - loongson2_tlb_miss);
848
849 /*
850 * Copy locore-function vector.
851 */
852 mips_locore_jumpvec = loongson2_locore_vec;
853
854 mips_icache_sync_all();
855 mips_dcache_wbinv_all();
856
857 /* Clear BEV in SR so we start handling our own exceptions */
858 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
859 }
860 #endif /* MIPS3_LOONGSON2 */
861
862 #if defined(MIPS32)
863 static void
mips32_vector_init(const struct splsw * splsw)864 mips32_vector_init(const struct splsw *splsw)
865 {
866 /* r4000 exception handler address */
867 extern char mips32_exception[];
868
869 /* TLB miss handler addresses */
870 extern char mips32_tlb_miss[];
871
872 /* Cache error handler */
873 extern char mips32_cache[];
874
875 /* MIPS32 interrupt exception handler */
876 extern char mips32_intr[], mips32_intr_end[];
877
878 /*
879 * Copy down exception vector code.
880 */
881
882 if (mips32_cache - mips32_tlb_miss != 0x100)
883 panic("startup: %s vector code not 128 bytes in length",
884 "UTLB");
885 if (mips32_exception - mips32_cache != 0x80)
886 panic("startup: %s vector code not 128 bytes in length",
887 "Cache error");
888 if (mips32_intr - mips32_exception != 0x80)
889 panic("startup: %s vector code not 128 bytes in length",
890 "General exception");
891 if (mips32_intr_end - mips32_intr > 0x80)
892 panic("startup: %s vector code too large",
893 "interrupt exception");
894
895 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips32_tlb_miss,
896 mips32_intr_end - mips32_tlb_miss);
897
898 /*
899 * Copy locore-function vector.
900 */
901 mips_locore_jumpvec = mips32_locore_vec;
902
903 mips_icache_sync_all();
904 mips_dcache_wbinv_all();
905
906 /* Clear BEV in SR so we start handling our own exceptions */
907 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
908
909 mips_watchpoint_init();
910 }
911 #endif /* MIPS32 */
912
913 #if defined(MIPS32R2)
914 static void
mips32r2_vector_init(const struct splsw * splsw)915 mips32r2_vector_init(const struct splsw *splsw)
916 {
917 /* r4000 exception handler address */
918 extern char mips32r2_exception[];
919
920 /* TLB miss handler addresses */
921 extern char mips32r2_tlb_miss[];
922
923 /* Cache error handler */
924 extern char mips32r2_cache[];
925
926 /* MIPS32 interrupt exception handler */
927 extern char mips32r2_intr[], mips32r2_intr_end[];
928
929 /*
930 * Copy down exception vector code.
931 */
932 if (mips32r2_cache - mips32r2_tlb_miss != 0x100)
933 panic("startup: %s vector code not 128 bytes in length",
934 "UTLB");
935 if (mips32r2_exception - mips32r2_cache != 0x80)
936 panic("startup: %s vector code not 128 bytes in length",
937 "Cache error");
938 if (mips32r2_intr - mips32r2_exception != 0x80)
939 panic("startup: %s vector code not 128 bytes in length",
940 "General exception");
941 if (mips32r2_intr_end - mips32r2_intr > 0x80)
942 panic("startup: %s vector code too large",
943 "interrupt exception");
944
945 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips32r2_tlb_miss,
946 mips32r2_intr_end - mips32r2_tlb_miss);
947
948 /*
949 * Let's see if this cpu has USERLOCAL or DSP V2 ASE...
950 */
951 if (mipsNN_cp0_config2_read() & MIPSNN_CFG2_M) {
952 const uint32_t cfg3 = mipsNN_cp0_config3_read();
953 if (cfg3 & MIPSNN_CFG3_ULRI) {
954 mips_options.mips_cpu_flags |= CPU_MIPS_HAVE_USERLOCAL;
955 }
956 if (cfg3 & MIPSNN_CFG3_DSP2P) {
957 mips_options.mips_cpu_flags |= CPU_MIPS_HAVE_DSP;
958 }
959 }
960
961 /*
962 * If this CPU doesn't have a COP0 USERLOCAL register, at the end
963 * of cpu_switch resume overwrite the instructions which update it.
964 */
965 if (!MIPS_HAS_USERLOCAL) {
966 extern uint32_t mips32r2_cpu_switch_resume[];
967 for (uint32_t *insnp = mips32r2_cpu_switch_resume;; insnp++) {
968 KASSERT(insnp[0] != JR_RA);
969 if (insnp[0] == _LOAD_V0_L_PRIVATE_A0
970 && insnp[1] == _MTC0_V0_USERLOCAL) {
971 insnp[0] = JR_RA;
972 insnp[1] = 0; /* NOP */
973 break;
974 }
975 }
976 }
977
978 /*
979 * Copy locore-function vector.
980 */
981 mips_locore_jumpvec = mips32r2_locore_vec;
982
983 mips_icache_sync_all();
984 mips_dcache_wbinv_all();
985
986 /* Clear BEV in SR so we start handling our own exceptions */
987 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
988
989 mips_watchpoint_init();
990 }
991 #endif /* MIPS32R2 */
992
993 #if defined(MIPS64)
994 static void
mips64_vector_init(const struct splsw * splsw)995 mips64_vector_init(const struct splsw *splsw)
996 {
997 /* r4000 exception handler address */
998 extern char mips64_exception[];
999
1000 /* TLB miss handler addresses */
1001 extern char mips64_tlb_miss[];
1002 extern char mips64_xtlb_miss[];
1003
1004 /* Cache error handler */
1005 extern char mips64_cache[];
1006
1007 /* MIPS64 interrupt exception handler */
1008 extern char mips64_intr[], mips64_intr_end[];
1009
1010 /*
1011 * Copy down exception vector code.
1012 */
1013
1014 if (mips64_xtlb_miss - mips64_tlb_miss != 0x80)
1015 panic("startup: %s vector code not 128 bytes in length",
1016 "UTLB");
1017 if (mips64_cache - mips64_xtlb_miss != 0x80)
1018 panic("startup: %s vector code not 128 bytes in length",
1019 "XTLB");
1020 if (mips64_exception - mips64_cache != 0x80)
1021 panic("startup: %s vector code not 128 bytes in length",
1022 "Cache error");
1023 if (mips64_intr - mips64_exception != 0x80)
1024 panic("startup: %s vector code not 128 bytes in length",
1025 "General exception");
1026 if (mips64_intr_end - mips64_intr > 0x80)
1027 panic("startup: %s vector code too large",
1028 "interrupt exception");
1029
1030 memcpy((void *)MIPS_UTLB_MISS_EXC_VEC, mips64_tlb_miss,
1031 mips64_intr_end - mips64_tlb_miss);
1032
1033 /*
1034 * Copy locore-function vector.
1035 */
1036 mips_locore_jumpvec = mips64_locore_vec;
1037
1038 mips_icache_sync_all();
1039 mips_dcache_wbinv_all();
1040
1041 /* Clear BEV in SR so we start handling our own exceptions */
1042 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
1043
1044 mips_watchpoint_init();
1045 }
1046 #endif /* MIPS64 */
1047
1048 #if defined(MIPS64R2)
1049 void
mips64r2_vector_init(const struct splsw * splsw)1050 mips64r2_vector_init(const struct splsw *splsw)
1051 {
1052 /* r4000 exception handler address */
1053 extern char mips64r2_exception[];
1054
1055 /* TLB miss handler addresses */
1056 extern char mips64r2_tlb_miss[];
1057 extern char mips64r2_xtlb_miss[];
1058
1059 /* Cache error handler */
1060 extern char mips64r2_cache[];
1061
1062 /* MIPS64 interrupt exception handler */
1063 extern char mips64r2_intr[], mips64r2_intr_end[];
1064
1065 /*
1066 * Copy down exception vector code.
1067 */
1068
1069 if (mips64r2_xtlb_miss - mips64r2_tlb_miss != 0x80)
1070 panic("startup: %s vector code not 128 bytes in length",
1071 "UTLB");
1072 if (mips64r2_cache - mips64r2_xtlb_miss != 0x80)
1073 panic("startup: %s vector code not 128 bytes in length",
1074 "XTLB");
1075 if (mips64r2_exception - mips64r2_cache != 0x80)
1076 panic("startup: %s vector code not 128 bytes in length",
1077 "Cache error");
1078 if (mips64r2_intr - mips64r2_exception != 0x80)
1079 panic("startup: %s vector code not 128 bytes in length",
1080 "General exception");
1081 if (mips64r2_intr_end - mips64r2_intr > 0x80)
1082 panic("startup: %s vector code too large",
1083 "interrupt exception");
1084
1085 const intptr_t ebase = (intptr_t)mipsNN_cp0_ebase_read();
1086 const int cpunum = ebase & MIPS_EBASE_CPUNUM;
1087
1088 // This may need to be on CPUs other CPU0 so use EBASE to fetch
1089 // the appropriate address for exception code. EBASE also contains
1090 // the cpunum so remove that.
1091 memcpy((void *)(intptr_t)(ebase & ~MIPS_EBASE_CPUNUM), mips64r2_tlb_miss,
1092 mips64r2_intr_end - mips64r2_tlb_miss);
1093
1094 /*
1095 * Let's see if this cpu has USERLOCAL or DSP V2 ASE...
1096 */
1097 if (mipsNN_cp0_config2_read() & MIPSNN_CFG2_M) {
1098 const uint32_t cfg3 = mipsNN_cp0_config3_read();
1099 if (cfg3 & MIPSNN_CFG3_ULRI) {
1100 mips_options.mips_cpu_flags |= CPU_MIPS_HAVE_USERLOCAL;
1101 }
1102 if (cfg3 & MIPSNN_CFG3_DSP2P) {
1103 mips_options.mips_cpu_flags |= CPU_MIPS_HAVE_DSP;
1104 }
1105 }
1106
1107 /*
1108 * If this CPU doesn't have a COP0 USERLOCAL register, at the end
1109 * of cpu_switch resume overwrite the instructions which update it.
1110 */
1111 if (!MIPS_HAS_USERLOCAL && cpunum == 0) {
1112 extern uint32_t mips64r2_cpu_switch_resume[];
1113 for (uint32_t *insnp = mips64r2_cpu_switch_resume;; insnp++) {
1114 KASSERT(insnp[0] != JR_RA);
1115 if (insnp[0] == _LOAD_V0_L_PRIVATE_A0
1116 && insnp[1] == _MTC0_V0_USERLOCAL) {
1117 insnp[0] = JR_RA;
1118 insnp[1] = 0; /* NOP */
1119 break;
1120 }
1121 }
1122 }
1123
1124 /*
1125 * Copy locore-function vector.
1126 */
1127 if (cpunum == 0)
1128 mips_locore_jumpvec = mips64r2_locore_vec;
1129
1130 mips_icache_sync_all();
1131 mips_dcache_wbinv_all();
1132
1133 /* Clear BEV in SR so we start handling our own exceptions */
1134 mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
1135
1136 mips_watchpoint_init();
1137 }
1138 #endif /* MIPS64R2 */
1139
1140 /*
1141 * Do all the stuff that locore normally does before calling main(),
1142 * that is common to all mips-CPU NetBSD ports.
1143 *
1144 * The principal purpose of this function is to examine the
1145 * variable cpu_id, into which the kernel locore start code
1146 * writes the CPU ID register, and to then copy appropriate
1147 * code into the CPU exception-vector entries and the jump tables
1148 * used to hide the differences in cache and TLB handling in
1149 * different MIPS CPUs.
1150 *
1151 * This should be the very first thing called by each port's
1152 * init_main() function.
1153 */
1154
1155 /*
1156 * Initialize the hardware exception vectors, and the jump table used to
1157 * call locore cache and TLB management functions, based on the kind
1158 * of CPU the kernel is running on.
1159 */
1160 void
mips_vector_init(const struct splsw * splsw,bool multicpu_p)1161 mips_vector_init(const struct splsw *splsw, bool multicpu_p)
1162 {
1163 struct mips_options * const opts = &mips_options;
1164 const struct pridtab *ct;
1165 const mips_prid_t cpu_id = opts->mips_cpu_id;
1166
1167 for (ct = cputab; ct->cpu_name != NULL; ct++) {
1168 if (MIPS_PRID_CID(cpu_id) != ct->cpu_cid ||
1169 MIPS_PRID_IMPL(cpu_id) != ct->cpu_pid)
1170 continue;
1171 if (ct->cpu_rev >= 0 &&
1172 MIPS_PRID_REV(cpu_id) != ct->cpu_rev)
1173 continue;
1174 if (ct->cpu_copts >= 0 &&
1175 MIPS_PRID_COPTS(cpu_id) != ct->cpu_copts)
1176 continue;
1177
1178 opts->mips_cpu = ct;
1179 opts->mips_cpu_arch = ct->cpu_isa;
1180 opts->mips_num_tlb_entries = ct->cpu_ntlb;
1181 break;
1182 }
1183
1184 if (opts->mips_cpu == NULL)
1185 panic("CPU type (0x%x) not supported", cpu_id);
1186
1187 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
1188 if (MIPS_PRID_CID(cpu_id) != 0) {
1189 /* MIPS32/MIPS64, use coprocessor 0 config registers */
1190 uint32_t cfg, cfg1, cfg4;
1191
1192 cfg = mips3_cp0_config_read();
1193 cfg1 = mipsNN_cp0_config1_read();
1194 if (opts->mips_cpu->cpu_cp0flags & MIPS_CP0FL_CONFIG4)
1195 cfg4 = mipsNN_cp0_config4_read();
1196 else
1197 cfg4 = 0;
1198
1199 /* pick CPU type */
1200 switch (MIPSNN_GET(CFG_AT, cfg)) {
1201 case MIPSNN_CFG_AT_MIPS32:
1202 opts->mips_cpu_arch = CPU_ARCH_MIPS32;
1203 break;
1204 case MIPSNN_CFG_AT_MIPS64:
1205 opts->mips_cpu_arch = CPU_ARCH_MIPS64;
1206 break;
1207 case MIPSNN_CFG_AT_MIPS64S:
1208 default:
1209 panic("MIPS32/64 architecture type %d not supported",
1210 MIPSNN_GET(CFG_AT, cfg));
1211 }
1212
1213 switch (MIPSNN_GET(CFG_AR, cfg)) {
1214 case MIPSNN_CFG_AR_REV1:
1215 break;
1216 case MIPSNN_CFG_AR_REV2:
1217 switch (opts->mips_cpu_arch) {
1218 case CPU_ARCH_MIPS32:
1219 opts->mips_cpu_arch = CPU_ARCH_MIPS32R2;
1220 break;
1221 case CPU_ARCH_MIPS64:
1222 opts->mips_cpu_arch = CPU_ARCH_MIPS64R2;
1223 break;
1224 default:
1225 printf("WARNING: MIPS32/64 arch %d revision %d "
1226 "unknown!\n", opts->mips_cpu_arch,
1227 MIPSNN_GET(CFG_AR, cfg));
1228 break;
1229 }
1230 break;
1231 default:
1232 printf("WARNING: MIPS32/64 arch revision %d "
1233 "unknown!\n", MIPSNN_GET(CFG_AR, cfg));
1234 break;
1235 }
1236
1237 /* figure out MMU type (and number of TLB entries) */
1238 switch (MIPSNN_GET(CFG_MT, cfg)) {
1239 case MIPSNN_CFG_MT_TLB:
1240 /*
1241 * Config1[MMUSize-1] defines the number of TLB
1242 * entries minus 1, allowing up to 64 TLBs to be
1243 * defined. For MIPS32R2 and MIPS64R2 and later
1244 * if the Config4[MMUExtDef] field is 1 then the
1245 * Config4[MMUSizeExt] field is an extension of
1246 * Config1[MMUSize-1] field.
1247 */
1248 opts->mips_num_tlb_entries = MIPSNN_CFG1_MS(cfg1);
1249 if (__SHIFTOUT(cfg4, MIPSNN_CFG4_MMU_EXT_DEF) ==
1250 MIPSNN_CFG4_MMU_EXT_DEF_MMU) {
1251 opts->mips_num_tlb_entries +=
1252 __SHIFTOUT(cfg4, MIPSNN_CFG4_MMU_SIZE_EXT) <<
1253 popcount(MIPSNN_CFG1_MS_MASK);
1254 }
1255 break;
1256 case MIPSNN_CFG_MT_NONE:
1257 case MIPSNN_CFG_MT_BAT:
1258 case MIPSNN_CFG_MT_FIXED:
1259 default:
1260 panic("MIPS32/64 MMU type %d not supported",
1261 MIPSNN_GET(CFG_MT, cfg));
1262 }
1263 }
1264 #endif /* (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
1265
1266 if (opts->mips_cpu_arch < 1)
1267 panic("Unknown CPU ISA for CPU type 0x%x", cpu_id);
1268 if (opts->mips_num_tlb_entries < 1)
1269 panic("Unknown number of TLBs for CPU type 0x%x", cpu_id);
1270
1271 /*
1272 * Check CPU-specific flags.
1273 */
1274 opts->mips_cpu_flags = opts->mips_cpu->cpu_flags;
1275 opts->mips_has_r4k_mmu = (opts->mips_cpu_flags & CPU_MIPS_R4K_MMU) != 0;
1276 opts->mips_has_llsc = (opts->mips_cpu_flags & CPU_MIPS_NO_LLSC) == 0;
1277 #if defined(MIPS3_4100)
1278 if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4100)
1279 opts->mips3_pg_shift = MIPS3_4100_PG_SHIFT;
1280 else
1281 #endif
1282 opts->mips3_pg_shift = MIPS3_DEFAULT_PG_SHIFT;
1283
1284 opts->mips3_cca_devmem = CCA_UNCACHED;
1285 if (opts->mips_cpu_flags & CPU_MIPS_HAVE_SPECIAL_CCA) {
1286 uint32_t cca;
1287
1288 cca = (opts->mips_cpu_flags & CPU_MIPS_CACHED_CCA_MASK) >>
1289 CPU_MIPS_CACHED_CCA_SHIFT;
1290 opts->mips3_pg_cached = MIPS3_CCA_TO_PG(cca);
1291 #ifndef __mips_o32
1292 opts->mips3_xkphys_cached = MIPS_PHYS_TO_XKPHYS(cca, 0);
1293 #endif
1294 } else {
1295 opts->mips3_pg_cached = MIPS3_DEFAULT_PG_CACHED;
1296 #ifndef __mips_o32
1297 opts->mips3_xkphys_cached = MIPS3_DEFAULT_XKPHYS_CACHED;
1298 #endif
1299 }
1300
1301 #ifdef __HAVE_MIPS_MACHDEP_CACHE_CONFIG
1302 mips_machdep_cache_config();
1303 #endif
1304
1305 /*
1306 * if 'splsw' is NULL, use standard SPL with COP0 status/cause
1307 * otherwise use chip-specific splsw
1308 */
1309 if (splsw == NULL) {
1310 mips_splsw = std_splsw;
1311 #ifdef PARANOIA
1312 std_splsw_test(); /* only works with std_splsw */
1313 #endif
1314 } else {
1315 mips_splsw = *splsw;
1316 }
1317
1318 /*
1319 * Determine cache configuration and initialize our cache
1320 * frobbing routine function pointers.
1321 */
1322 mips_config_cache();
1323
1324 /*
1325 * We default to RAS atomic ops since they are the lowest overhead.
1326 */
1327 #ifdef MULTIPROCESSOR
1328 if (multicpu_p) {
1329 /*
1330 * If we could have multiple CPUs active,
1331 * use the ll/sc variants.
1332 */
1333 mips_locore_atomicvec = mips_llsc_locore_atomicvec;
1334 }
1335 #endif
1336 /*
1337 * Now initialize our ISA-dependent function vector.
1338 */
1339 switch (opts->mips_cpu_arch) {
1340 #if defined(MIPS1)
1341 case CPU_ARCH_MIPS1:
1342 (*mips1_locore_vec.ljv_tlb_invalidate_all)();
1343 mips1_vector_init(splsw);
1344 mips_locoresw = mips1_locoresw;
1345 break;
1346 #endif
1347 #if defined(MIPS3)
1348 case CPU_ARCH_MIPS3:
1349 case CPU_ARCH_MIPS4:
1350 mips3_tlb_probe();
1351 #if defined(MIPS3_4100)
1352 if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4100)
1353 mips3_cp0_pg_mask_write(MIPS4100_PG_SIZE_TO_MASK(PAGE_SIZE));
1354 else
1355 #endif
1356 mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
1357 mips3_cp0_wired_write(0);
1358 #if defined(MIPS3_LOONGSON2)
1359 if (opts->mips_cpu_flags & CPU_MIPS_LOONGSON2) {
1360 (*loongson2_locore_vec.ljv_tlb_invalidate_all)();
1361 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1362 loongson2_vector_init(splsw);
1363 mips_locoresw = loongson2_locoresw;
1364 opts->mips3_cca_devmem = CCA_ACCEL;
1365 break;
1366 }
1367 #endif /* MIPS3_LOONGSON2 */
1368 (*mips3_locore_vec.ljv_tlb_invalidate_all)();
1369 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1370 mips3_vector_init(splsw);
1371 mips_locoresw = mips3_locoresw;
1372 break;
1373
1374 #endif /* MIPS3 */
1375 #if defined(MIPS32)
1376 case CPU_ARCH_MIPS32:
1377 mips3_tlb_probe();
1378 mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
1379 mips3_cp0_wired_write(0);
1380 (*mips32_locore_vec.ljv_tlb_invalidate_all)();
1381 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1382 mips32_vector_init(splsw);
1383 mips_locoresw = mips32_locoresw;
1384 break;
1385 #endif
1386 #if defined(MIPS32R2)
1387 case CPU_ARCH_MIPS32R2:
1388 mips3_tlb_probe();
1389 mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
1390 mips3_cp0_wired_write(0);
1391 (*mips32r2_locore_vec.ljv_tlb_invalidate_all)();
1392 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1393 mips32r2_vector_init(splsw);
1394 mips_locoresw = mips32r2_locoresw;
1395 break;
1396 #endif
1397 #if defined(MIPS64)
1398 case CPU_ARCH_MIPS64: {
1399 mips3_tlb_probe();
1400 mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
1401 mips3_cp0_wired_write(0);
1402 (*mips64_locore_vec.ljv_tlb_invalidate_all)();
1403 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1404 mips64_vector_init(splsw);
1405 mips_locoresw = mips64_locoresw;
1406 break;
1407 }
1408 #endif
1409 #if defined(MIPS64R2)
1410 case CPU_ARCH_MIPS64R2: {
1411 mips3_tlb_probe();
1412 mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_TO_MASK(PAGE_SIZE));
1413 mips3_cp0_wired_write(0);
1414 (*mips64r2_locore_vec.ljv_tlb_invalidate_all)();
1415 mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
1416 mips64r2_vector_init(splsw);
1417 mips_locoresw = mips64r2_locoresw;
1418 break;
1419 }
1420 #endif
1421 default:
1422 printf("cpu_arch 0x%x: not supported\n", opts->mips_cpu_arch);
1423 cpu_reboot(RB_HALT, NULL);
1424 }
1425
1426 /*
1427 * Now that the splsw and locoresw have been filled in, fixup the
1428 * jumps to any stubs to actually jump to the real routines.
1429 */
1430 extern uint32_t _ftext[];
1431 extern uint32_t _etext[];
1432 mips_fixup_stubs(_ftext, _etext);
1433
1434 #if (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
1435 /*
1436 * Install power-saving idle routines.
1437 */
1438 if ((opts->mips_cpu_flags & CPU_MIPS_USE_WAIT) &&
1439 !(opts->mips_cpu_flags & CPU_MIPS_NO_WAIT))
1440 mips_locoresw.lsw_cpu_idle = mips_wait_idle;
1441 #endif /* (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
1442 }
1443
1444 void
mips_set_wbflush(void (* flush_fn)(void))1445 mips_set_wbflush(void (*flush_fn)(void))
1446 {
1447 mips_locoresw.lsw_wbflush = flush_fn;
1448 (*flush_fn)();
1449 }
1450
1451 #if defined(MIPS3_PLUS)
1452 static void
mips3_tlb_probe(void)1453 mips3_tlb_probe(void)
1454 {
1455 struct mips_options * const opts = &mips_options;
1456 opts->mips3_tlb_pg_mask = mips3_cp0_tlb_page_mask_probe();
1457 if (CPUIS64BITS) {
1458 opts->mips3_tlb_vpn_mask = mips3_cp0_tlb_entry_hi_probe();
1459 opts->mips3_tlb_vpn_mask |= PAGE_MASK;
1460 opts->mips3_tlb_vpn_mask <<= 2;
1461 opts->mips3_tlb_vpn_mask >>= 2;
1462 opts->mips3_tlb_pfn_mask = mips3_cp0_tlb_entry_lo_probe();
1463 #if defined(_LP64) && defined(ENABLE_MIPS_16KB_PAGE)
1464 /*
1465 * 16KB pages could cause our page table being able to address
1466 * a larger address space than the actual chip supports. So
1467 * we need to limit the address space to what it can really
1468 * address.
1469 */
1470 if (mips_vm_maxuser_address > opts->mips3_tlb_vpn_mask + 1)
1471 mips_vm_maxuser_address = opts->mips3_tlb_vpn_mask + 1;
1472 #endif
1473 }
1474 }
1475 #endif
1476
1477 static const char *
wayname(int ways)1478 wayname(int ways)
1479 {
1480 static char buf[sizeof("xxx-way set-associative")];
1481
1482 #ifdef DIAGNOSTIC
1483 if (ways > 999)
1484 panic("mips cache - too many ways (%d)", ways);
1485 #endif
1486
1487 switch (ways) {
1488 case 0:
1489 return "fully set-associative";
1490 case 1:
1491 return "direct-mapped";
1492 default:
1493 snprintf(buf, sizeof(buf), "%d-way set-associative", ways);
1494 return buf;
1495 }
1496 }
1497
1498 /*
1499 * Identify product revision IDs of CPU and FPU.
1500 */
1501 void
cpu_identify(device_t dev)1502 cpu_identify(device_t dev)
1503 {
1504 const struct mips_options * const opts = &mips_options;
1505 const struct mips_cache_info * const mci = &mips_cache_info;
1506 const mips_prid_t cpu_id = opts->mips_cpu_id;
1507 const mips_prid_t fpu_id = opts->mips_fpu_id;
1508 static const char * const wtnames[] = {
1509 "write-back",
1510 "write-through",
1511 };
1512 const char *cpuname, *fpuname;
1513 int i;
1514
1515 cpuname = opts->mips_cpu->cpu_name;
1516 #ifdef MIPS64_OCTEON
1517 if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_CAVIUM) {
1518 cpuname = octeon_cpu_model(cpu_id);
1519 }
1520 #endif
1521
1522 fpuname = NULL;
1523 for (i = 0; i < sizeof(fputab)/sizeof(fputab[0]); i++) {
1524 if (MIPS_PRID_CID(fpu_id) == fputab[i].cpu_cid &&
1525 MIPS_PRID_IMPL(fpu_id) == fputab[i].cpu_pid) {
1526 fpuname = fputab[i].cpu_name;
1527 break;
1528 }
1529 }
1530 if (fpuname == NULL && MIPS_PRID_IMPL(fpu_id) == MIPS_PRID_IMPL(cpu_id))
1531 fpuname = "built-in FPU";
1532 if (MIPS_PRID_IMPL(cpu_id) == MIPS_R4700) /* FPU PRid is 0x20 */
1533 fpuname = "built-in FPU";
1534 if (MIPS_PRID_IMPL(cpu_id) == MIPS_RC64470) /* FPU PRid is 0x21 */
1535 fpuname = "built-in FPU";
1536 #ifdef MIPSNN
1537 if (CPUISMIPSNN) {
1538 uint32_t cfg1;
1539
1540 switch (MIPS_PRID_CID(cpu_id)) {
1541 /*
1542 * CPUs from the following companies have a built-in
1543 * FPU if Config1[FP] is set.
1544 */
1545 case MIPS_PRID_CID_SIBYTE:
1546 case MIPS_PRID_CID_CAVIUM:
1547 cfg1 = mipsNN_cp0_config1_read();
1548 if (cfg1 & MIPSNN_CFG1_FP)
1549 fpuname = "built-in FPU";
1550 break;
1551 }
1552 }
1553 #endif
1554
1555 if (opts->mips_cpu->cpu_cid != 0) {
1556 if (opts->mips_cpu->cpu_cid <= ncidnames)
1557 aprint_normal("%s ", cidnames[opts->mips_cpu->cpu_cid]);
1558 else if (opts->mips_cpu->cpu_cid == MIPS_PRID_CID_INGENIC) {
1559 aprint_normal("Ingenic ");
1560 } else {
1561 aprint_normal("Unknown Company ID - 0x%x",
1562 opts->mips_cpu->cpu_cid);
1563 aprint_normal_dev(dev, "");
1564 }
1565 }
1566 if (cpuname != NULL)
1567 aprint_normal("%s (0x%x)", cpuname, cpu_id);
1568 else
1569 aprint_normal("unknown CPU type (0x%x)", cpu_id);
1570 if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_PREHISTORIC)
1571 aprint_normal(" Rev. %d.%d", MIPS_PRID_REV_MAJ(cpu_id),
1572 MIPS_PRID_REV_MIN(cpu_id));
1573 else
1574 aprint_normal(" Rev. %d", MIPS_PRID_REV(cpu_id));
1575
1576 if (fpuname != NULL)
1577 aprint_normal(" with %s", fpuname);
1578 else
1579 aprint_normal(" with unknown FPC type (0x%x)", fpu_id);
1580 if (opts->mips_fpu_id != 0) {
1581 if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_PREHISTORIC)
1582 aprint_normal(" Rev. %d.%d", MIPS_PRID_REV_MAJ(fpu_id),
1583 MIPS_PRID_REV_MIN(fpu_id));
1584 else
1585 aprint_normal(" Rev. %d", MIPS_PRID_REV(fpu_id));
1586 }
1587 if (opts->mips_cpu_flags & MIPS_HAS_DSP) {
1588 aprint_normal(" and DSPv2");
1589 }
1590 aprint_normal("\n");
1591
1592 if (MIPS_PRID_CID(cpu_id) == MIPS_PRID_CID_PREHISTORIC &&
1593 MIPS_PRID_RSVD(cpu_id) != 0) {
1594 aprint_normal_dev(dev,
1595 "NOTE: top 8 bits of prehistoric PRID not 0!\n");
1596 aprint_normal_dev(dev, "Please mail port-mips@NetBSD.org "
1597 "with %s dmesg lines.\n", device_xname(dev));
1598 }
1599
1600 switch (opts->mips_cpu_arch) {
1601 #if defined(MIPS1)
1602 case CPU_ARCH_MIPS1:
1603 if (mci->mci_picache_size)
1604 aprint_normal_dev(dev, "%dKB/%dB %s Instruction cache, "
1605 "%d TLB entries\n", mci->mci_picache_size / 1024,
1606 mci->mci_picache_line_size,
1607 wayname(mci->mci_picache_ways),
1608 opts->mips_num_tlb_entries);
1609 else
1610 aprint_normal_dev(dev, "%d TLB entries\n",
1611 opts->mips_num_tlb_entries);
1612 if (mci->mci_pdcache_size)
1613 aprint_normal_dev(dev, "%dKB/%dB %s %s Data cache\n",
1614 mci->mci_pdcache_size / 1024,
1615 mci->mci_pdcache_line_size,
1616 wayname(mci->mci_pdcache_ways),
1617 wtnames[mci->mci_pdcache_write_through]);
1618 break;
1619 #endif /* MIPS1 */
1620 #if (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
1621 case CPU_ARCH_MIPS3:
1622 case CPU_ARCH_MIPS4:
1623 case CPU_ARCH_MIPS32:
1624 case CPU_ARCH_MIPS32R2:
1625 case CPU_ARCH_MIPS64:
1626 case CPU_ARCH_MIPS64R2: {
1627 const char *sufx = "KMGTPE";
1628 uint32_t pg_mask;
1629 aprint_normal_dev(dev, "%d TLB entries",
1630 opts->mips_num_tlb_entries);
1631 #if !defined(__mips_o32)
1632 if (CPUIS64BITS) {
1633 int64_t pfn_mask;
1634 i = ffs(~(opts->mips3_tlb_vpn_mask >> 31)) + 30;
1635 aprint_normal(", %d%cB (%d-bit) VAs",
1636 1 << (i % 10), sufx[(i / 10) - 1], i);
1637 for (i = 64, pfn_mask = opts->mips3_tlb_pfn_mask << 6;
1638 pfn_mask > 0; i--, pfn_mask <<= 1)
1639 ;
1640 aprint_normal(", %d%cB (%d-bit) PAs",
1641 1 << (i % 10), sufx[(i / 10) - 1], i);
1642 }
1643 #endif
1644 for (i = 4, pg_mask = opts->mips3_tlb_pg_mask >> 13;
1645 pg_mask != 0; ) {
1646 if ((pg_mask & 3) != 3)
1647 break;
1648 pg_mask >>= 2;
1649 i *= 4;
1650 if (i == 1024) {
1651 i = 1;
1652 sufx++;
1653 }
1654 }
1655 aprint_normal(", %d%cB max page size\n", i, sufx[0]);
1656 if (mci->mci_picache_size)
1657 aprint_normal_dev(dev,
1658 "%dKB/%dB %s L1 instruction cache\n",
1659 mci->mci_picache_size / 1024,
1660 mci->mci_picache_line_size,
1661 wayname(mci->mci_picache_ways));
1662 if (mci->mci_pdcache_size)
1663 aprint_normal_dev(dev,
1664 "%dKB/%dB %s %s %sL1 data cache\n",
1665 mci->mci_pdcache_size / 1024,
1666 mci->mci_pdcache_line_size,
1667 wayname(mci->mci_pdcache_ways),
1668 wtnames[mci->mci_pdcache_write_through],
1669 ((opts->mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
1670 ? "coherent " : ""));
1671 if (mci->mci_sdcache_line_size)
1672 aprint_normal_dev(dev,
1673 "%dKB/%dB %s %s L2 %s cache\n",
1674 mci->mci_sdcache_size / 1024,
1675 mci->mci_sdcache_line_size,
1676 wayname(mci->mci_sdcache_ways),
1677 wtnames[mci->mci_sdcache_write_through],
1678 mci->mci_scache_unified ? "unified" : "data");
1679 break;
1680 }
1681 #endif /* (MIPS3 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0 */
1682 default:
1683 panic("cpu_identify: impossible");
1684 }
1685 }
1686
1687 /*
1688 * Set registers on exec.
1689 * Clear all registers except sp, pc, and t9.
1690 * $sp is set to the stack pointer passed in. $pc is set to the entry
1691 * point given by the exec_package passed in, as is $t9 (used for PIC
1692 * code by the MIPS elf abi).
1693 */
1694 void
setregs(struct lwp * l,struct exec_package * pack,vaddr_t stack)1695 setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack)
1696 {
1697 struct trapframe * const tf = l->l_md.md_utf;
1698 struct proc * const p = l->l_proc;
1699
1700 memset(tf, 0, sizeof(struct trapframe));
1701 tf->tf_regs[_R_SP] = (intptr_t)stack;
1702 tf->tf_regs[_R_PC] = (intptr_t)pack->ep_entry & ~3;
1703 tf->tf_regs[_R_T9] = (intptr_t)pack->ep_entry & ~3; /* abicall requirement */
1704 tf->tf_regs[_R_SR] = PSL_USERSET;
1705 #if !defined(__mips_o32)
1706 /*
1707 * allow 64bit ops in userland for non-O32 ABIs
1708 */
1709 if (p->p_md.md_abi == _MIPS_BSD_API_N32
1710 && (CPUISMIPS64 || CPUISMIPS64R2)) {
1711 tf->tf_regs[_R_SR] |= MIPS_SR_PX;
1712 } else if (p->p_md.md_abi != _MIPS_BSD_API_O32) {
1713 tf->tf_regs[_R_SR] |= MIPS_SR_UX;
1714 }
1715 if (_MIPS_SIM_NEWABI_P(p->p_md.md_abi))
1716 tf->tf_regs[_R_SR] |= MIPS3_SR_FR;
1717 #endif
1718 #ifdef _LP64
1719 /*
1720 * If we are using a 32-bit ABI on a 64-bit kernel, mark the process
1721 * that way. If we aren't, clear it.
1722 */
1723 if (p->p_md.md_abi == _MIPS_BSD_API_N32
1724 || p->p_md.md_abi == _MIPS_BSD_API_O32) {
1725 p->p_flag |= PK_32;
1726 } else {
1727 p->p_flag &= ~PK_32;
1728 }
1729 #endif
1730 /*
1731 * Set up arguments for _start():
1732 * _start(stack, obj, cleanup, ps_strings);
1733 *
1734 * Notes:
1735 * - obj and cleanup are the auxiliary and termination
1736 * vectors. They are fixed up by ld.elf_so.
1737 * - ps_strings is a NetBSD extension.
1738 */
1739 tf->tf_regs[_R_A0] = (intptr_t)stack;
1740 tf->tf_regs[_R_A1] = 0;
1741 tf->tf_regs[_R_A2] = 0;
1742 tf->tf_regs[_R_A3] = p->p_psstrp;
1743
1744 l->l_md.md_ss_addr = 0;
1745 }
1746
1747 #ifdef __HAVE_BOOTINFO_H
1748 /*
1749 * Machine dependent system variables.
1750 */
1751 static int
sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)1752 sysctl_machdep_booted_kernel(SYSCTLFN_ARGS)
1753 {
1754 struct btinfo_bootpath *bibp;
1755 struct sysctlnode node;
1756
1757 bibp = lookup_bootinfo(BTINFO_BOOTPATH);
1758 if(!bibp)
1759 return(ENOENT); /* ??? */
1760
1761 node = *rnode;
1762 node.sysctl_data = bibp->bootpath;
1763 node.sysctl_size = sizeof(bibp->bootpath);
1764 return (sysctl_lookup(SYSCTLFN_CALL(&node)));
1765 }
1766 #endif
1767
1768 SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup")
1769 {
1770
1771 sysctl_createv(clog, 0, NULL, NULL,
1772 CTLFLAG_PERMANENT,
1773 CTLTYPE_NODE, "machdep", NULL,
1774 NULL, 0, NULL, 0,
1775 CTL_MACHDEP, CTL_EOL);
1776
1777 sysctl_createv(clog, 0, NULL, NULL,
1778 CTLFLAG_PERMANENT,
1779 CTLTYPE_STRUCT, "console_device", NULL,
1780 sysctl_consdev, 0, NULL, sizeof(dev_t),
1781 CTL_MACHDEP, CPU_CONSDEV, CTL_EOL);
1782 #ifdef __HAVE_BOOTINFO_H
1783 sysctl_createv(clog, 0, NULL, NULL,
1784 CTLFLAG_PERMANENT,
1785 CTLTYPE_STRING, "booted_kernel", NULL,
1786 sysctl_machdep_booted_kernel, 0, NULL, 0,
1787 CTL_MACHDEP, CPU_BOOTED_KERNEL, CTL_EOL);
1788 #endif
1789 sysctl_createv(clog, 0, NULL, NULL,
1790 CTLFLAG_PERMANENT,
1791 CTLTYPE_STRING, "root_device", NULL,
1792 sysctl_root_device, 0, NULL, 0,
1793 CTL_MACHDEP, CPU_ROOT_DEVICE, CTL_EOL);
1794 sysctl_createv(clog, 0, NULL, NULL,
1795 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
1796 CTLTYPE_INT, "llsc", NULL,
1797 NULL, MIPS_HAS_LLSC, NULL, 0,
1798 CTL_MACHDEP, CPU_LLSC, CTL_EOL);
1799 #ifdef MIPS3_LOONGSON2
1800 sysctl_createv(clog, 0, NULL, NULL,
1801 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
1802 CTLTYPE_INT, "loongson-mmi", NULL,
1803 NULL, MIPS_HAS_LMMI, NULL, 0,
1804 CTL_MACHDEP, CPU_LMMI, CTL_EOL);
1805 #endif
1806 sysctl_createv(clog, 0, NULL, NULL,
1807 CTLFLAG_PERMANENT|CTLFLAG_IMMEDIATE,
1808 CTLTYPE_INT, "fpu_present", NULL,
1809 NULL,
1810 #ifdef NOFPU
1811 0,
1812 #else
1813 1,
1814 #endif
1815 NULL, 0, CTL_MACHDEP, CTL_CREATE, CTL_EOL);
1816 }
1817
1818 /*
1819 * These are imported from platform-specific code.
1820 * XXX Should be declared in a header file.
1821 */
1822 extern phys_ram_seg_t mem_clusters[];
1823 extern int mem_cluster_cnt;
1824
1825 /*
1826 * These variables are needed by /sbin/savecore.
1827 */
1828 u_int32_t dumpmag = 0x8fca0101; /* magic number */
1829 int dumpsize = 0; /* pages */
1830 long dumplo = 0; /* blocks */
1831
1832 struct pcb dumppcb;
1833
1834 /*
1835 * cpu_dumpsize: calculate size of machine-dependent kernel core dump headers.
1836 */
1837 int
cpu_dumpsize(void)1838 cpu_dumpsize(void)
1839 {
1840 int size;
1841
1842 size = ALIGN(sizeof(kcore_seg_t)) + ALIGN(sizeof(cpu_kcore_hdr_t)) +
1843 ALIGN(mem_cluster_cnt * sizeof(phys_ram_seg_t));
1844 if (roundup(size, dbtob(1)) != dbtob(1))
1845 return (-1);
1846
1847 return (1);
1848 }
1849
1850 /*
1851 * cpu_dump_mempagecnt: calculate size of RAM (in pages) to be dumped.
1852 */
1853 u_long
cpu_dump_mempagecnt(void)1854 cpu_dump_mempagecnt(void)
1855 {
1856 u_long i, n;
1857
1858 n = 0;
1859 for (i = 0; i < mem_cluster_cnt; i++)
1860 n += atop(mem_clusters[i].size);
1861 return (n);
1862 }
1863
1864 /*
1865 * cpu_dump: dump machine-dependent kernel core dump headers.
1866 */
1867 int
cpu_dump(void)1868 cpu_dump(void)
1869 {
1870 int (*dump)(dev_t, daddr_t, void *, size_t);
1871 char buf[dbtob(1)];
1872 kcore_seg_t *segp;
1873 cpu_kcore_hdr_t *cpuhdrp;
1874 phys_ram_seg_t *memsegp;
1875 const struct bdevsw *bdev;
1876 int i;
1877
1878 bdev = bdevsw_lookup(dumpdev);
1879 if (bdev == NULL)
1880 return (ENXIO);
1881
1882 dump = bdev->d_dump;
1883
1884 memset(buf, 0, sizeof buf);
1885 segp = (kcore_seg_t *)buf;
1886 cpuhdrp = (cpu_kcore_hdr_t *)&buf[ALIGN(sizeof(*segp))];
1887 memsegp = (phys_ram_seg_t *)&buf[ ALIGN(sizeof(*segp)) +
1888 ALIGN(sizeof(*cpuhdrp))];
1889
1890 /*
1891 * Generate a segment header.
1892 */
1893 CORE_SETMAGIC(*segp, KCORE_MAGIC, MID_MACHINE, CORE_CPU);
1894 segp->c_size = dbtob(1) - ALIGN(sizeof(*segp));
1895
1896 /*
1897 * Add the machine-dependent header info.
1898 */
1899 if (MIPS_HAS_R4K_MMU) {
1900 cpuhdrp->archlevel = 3;
1901 cpuhdrp->pg_shift = MIPS3_PG_SHIFT;
1902 cpuhdrp->pg_frame = MIPS3_PG_FRAME;
1903 cpuhdrp->pg_v = MIPS3_PG_V;
1904 } else {
1905 cpuhdrp->archlevel = 1;
1906 cpuhdrp->pg_shift = MIPS1_PG_SHIFT;
1907 cpuhdrp->pg_frame = MIPS1_PG_FRAME;
1908 cpuhdrp->pg_v = MIPS1_PG_V;
1909 }
1910 cpuhdrp->sysmappa = MIPS_KSEG0_TO_PHYS(curcpu()->ci_pmap_kern_segtab);
1911 cpuhdrp->nmemsegs = mem_cluster_cnt;
1912
1913 /*
1914 * Fill in the memory segment descriptors.
1915 */
1916 for (i = 0; i < mem_cluster_cnt; i++) {
1917 memsegp[i].start = mem_clusters[i].start;
1918 memsegp[i].size = mem_clusters[i].size;
1919 }
1920
1921 return (dump(dumpdev, dumplo, (void *)buf, dbtob(1)));
1922 }
1923
1924 /*
1925 * This is called by main to set dumplo and dumpsize.
1926 * Dumps always skip the first CLBYTES of disk space
1927 * in case there might be a disk label stored there.
1928 * If there is extra space, put dump at the end to
1929 * reduce the chance that swapping trashes it.
1930 */
1931 void
cpu_dumpconf(void)1932 cpu_dumpconf(void)
1933 {
1934 int nblks, dumpblks; /* size of dump area */
1935
1936 if (dumpdev == NODEV)
1937 goto bad;
1938 nblks = bdev_size(dumpdev);
1939 if (nblks <= ctod(1))
1940 goto bad;
1941
1942 dumpblks = cpu_dumpsize();
1943 if (dumpblks < 0)
1944 goto bad;
1945 dumpblks += ctod(cpu_dump_mempagecnt());
1946
1947 /* If dump won't fit (incl. room for possible label), punt. */
1948 if (dumpblks > (nblks - ctod(1)))
1949 goto bad;
1950
1951 /* Put dump at end of partition */
1952 dumplo = nblks - dumpblks;
1953
1954 /* dumpsize is in page units, and doesn't include headers. */
1955 dumpsize = cpu_dump_mempagecnt();
1956 return;
1957
1958 bad:
1959 dumpsize = 0;
1960 }
1961
1962 /*
1963 * Dump the kernel's image to the swap partition.
1964 */
1965 #define BYTES_PER_DUMP PAGE_SIZE
1966
1967 void
dumpsys(void)1968 dumpsys(void)
1969 {
1970 u_long totalbytesleft, bytes, i, n, memcl;
1971 u_long maddr;
1972 int psize;
1973 daddr_t blkno;
1974 const struct bdevsw *bdev;
1975 int (*dump)(dev_t, daddr_t, void *, size_t);
1976 int error;
1977
1978 /* Save registers. */
1979 savectx(&dumppcb);
1980
1981 if (dumpdev == NODEV)
1982 return;
1983 bdev = bdevsw_lookup(dumpdev);
1984 if (bdev == NULL || bdev->d_psize == NULL)
1985 return;
1986
1987 /*
1988 * For dumps during autoconfiguration,
1989 * if dump device has already configured...
1990 */
1991 if (dumpsize == 0)
1992 cpu_dumpconf();
1993 if (dumplo <= 0) {
1994 printf("\ndump to dev %u,%u not possible\n", major(dumpdev),
1995 minor(dumpdev));
1996 return;
1997 }
1998 printf("\ndumping to dev %u,%u offset %ld\n", major(dumpdev),
1999 minor(dumpdev), dumplo);
2000
2001 psize = bdev_size(dumpdev);
2002 printf("dump ");
2003 if (psize == -1) {
2004 printf("area unavailable\n");
2005 return;
2006 }
2007
2008 /* XXX should purge all outstanding keystrokes. */
2009
2010 if ((error = cpu_dump()) != 0)
2011 goto err;
2012
2013 totalbytesleft = ptoa(cpu_dump_mempagecnt());
2014 blkno = dumplo + cpu_dumpsize();
2015 dump = bdev->d_dump;
2016 error = 0;
2017
2018 for (memcl = 0; memcl < mem_cluster_cnt; memcl++) {
2019 maddr = mem_clusters[memcl].start;
2020 bytes = mem_clusters[memcl].size;
2021
2022 for (i = 0; i < bytes; i += n, totalbytesleft -= n) {
2023 void *maddr_va;
2024
2025 /* Print out how many MBs we have left to go. */
2026 if ((totalbytesleft % (1024*1024)) == 0)
2027 printf_nolog("%ld ",
2028 totalbytesleft / (1024 * 1024));
2029
2030 /* Limit size for next transfer. */
2031 n = bytes - i;
2032 if (n > BYTES_PER_DUMP)
2033 n = BYTES_PER_DUMP;
2034
2035 #ifdef _LP64
2036 maddr_va = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(maddr);
2037 #else
2038 maddr_va = (void *)MIPS_PHYS_TO_KSEG0(maddr);
2039 #endif
2040 error = (*dump)(dumpdev, blkno, maddr_va, n);
2041 if (error)
2042 goto err;
2043 maddr += n;
2044 blkno += btodb(n); /* XXX? */
2045
2046 /* XXX should look for keystrokes, to cancel. */
2047 }
2048 }
2049
2050 err:
2051 switch (error) {
2052
2053 case ENXIO:
2054 printf("device bad\n");
2055 break;
2056
2057 case EFAULT:
2058 printf("device not ready\n");
2059 break;
2060
2061 case EINVAL:
2062 printf("area improper\n");
2063 break;
2064
2065 case EIO:
2066 printf("i/o error\n");
2067 break;
2068
2069 case EINTR:
2070 printf("aborted from console\n");
2071 break;
2072
2073 case 0:
2074 printf("succeeded\n");
2075 break;
2076
2077 default:
2078 printf("error %d\n", error);
2079 break;
2080 }
2081 printf("\n\n");
2082 delay(5000000); /* 5 seconds */
2083 }
2084
2085 void
mips_init_msgbuf(void)2086 mips_init_msgbuf(void)
2087 {
2088 vsize_t sz = (vsize_t)round_page(MSGBUFSIZE);
2089 vsize_t reqsz = sz;
2090 uvm_physseg_t bank = uvm_physseg_get_last();
2091 #ifndef _LP64
2092 /*
2093 * First the physical segment that can be mapped to KSEG0
2094 */
2095 for (; uvm_physseg_valid_p(bank); bank = uvm_physseg_get_prev(bank)) {
2096 if (uvm_physseg_get_avail_start(bank) + atop(sz) <= atop(MIPS_PHYS_MASK))
2097 break;
2098 }
2099 #endif
2100
2101 paddr_t start = uvm_physseg_get_start(bank);
2102 paddr_t end = uvm_physseg_get_end(bank);
2103
2104 /* shrink so that it'll fit in the last segment */
2105 if ((end - start) < atop(sz))
2106 sz = ptoa(end - start);
2107
2108 end -= atop(sz);
2109 uvm_physseg_unplug(end, atop(sz));
2110
2111 #ifdef _LP64
2112 msgbufaddr = (void *) MIPS_PHYS_TO_XKPHYS_CACHED(ptoa(end));
2113 #else
2114 msgbufaddr = (void *) MIPS_PHYS_TO_KSEG0(ptoa(end));
2115 #endif
2116 initmsgbuf(msgbufaddr, sz);
2117
2118 /* warn if the message buffer had to be shrunk */
2119 if (sz != reqsz)
2120 printf("WARNING: %"PRIdVSIZE" bytes not available for msgbuf "
2121 "in last cluster (%"PRIdVSIZE" used)\n", reqsz, sz);
2122 }
2123
2124 void
mips_init_lwp0_uarea(void)2125 mips_init_lwp0_uarea(void)
2126 {
2127 struct lwp * const l = &lwp0;
2128 vaddr_t v;
2129
2130 if (l->l_addr == NULL) {
2131 v = uvm_pageboot_alloc(USPACE);
2132 uvm_lwp_setuarea(&lwp0, v);
2133 } else {
2134 v = (vaddr_t)l->l_addr;
2135 }
2136
2137 l->l_md.md_utf = (struct trapframe *)(v + USPACE) - 1;
2138 struct pcb * const pcb = lwp_getpcb(l);
2139 /*
2140 * Now zero out the only two areas of the uarea that we care about.
2141 */
2142 memset(l->l_md.md_utf, 0, sizeof(*l->l_md.md_utf));
2143 memset(pcb, 0, sizeof(*pcb));
2144
2145 pcb->pcb_context.val[_L_SR] = MIPS_SR_INT_IE
2146 | (ipl_sr_map.sr_bits[IPL_SCHED] ^ MIPS_INT_MASK);
2147 #ifdef __mips_n32
2148 pcb->pcb_context.val[_L_SR] |= MIPS_SR_KX;
2149 l->l_md.md_utf->tf_regs[_R_SR] = MIPS_SR_KX;
2150 #endif
2151 #ifdef _LP64
2152 pcb->pcb_context.val[_L_SR] |= MIPS_SR_KX | MIPS_SR_UX;
2153 l->l_md.md_utf->tf_regs[_R_SR] = MIPS_SR_KX | MIPS_SR_UX;
2154 #endif
2155 }
2156
2157 int mips_poolpage_vmfreelist = VM_FREELIST_DEFAULT;
2158
2159 #define HALFGIG ((paddr_t)512 * 1024 * 1024)
2160 #define FOURGIG ((paddr_t)4 * 1024 * 1024 * 1024)
2161
2162 void
mips_page_physload(vaddr_t vkernstart,vaddr_t vkernend,const phys_ram_seg_t * segs,size_t nseg,const struct mips_vmfreelist * flp,size_t nfl)2163 mips_page_physload(vaddr_t vkernstart, vaddr_t vkernend,
2164 const phys_ram_seg_t *segs, size_t nseg,
2165 const struct mips_vmfreelist *flp, size_t nfl)
2166 {
2167 const paddr_t kernstart = MIPS_KSEG0_TO_PHYS(trunc_page(vkernstart));
2168 const paddr_t kernend = MIPS_KSEG0_TO_PHYS(round_page(vkernend));
2169 #if defined(VM_FREELIST_FIRST4G) || defined(VM_FREELIST_FIRST512M)
2170 #ifdef VM_FREELIST_FIRST512M
2171 bool need512m = false;
2172 #endif
2173 #ifdef VM_FREELIST_FIRST4G
2174 bool need4g = false;
2175 #endif
2176
2177 /*
2178 * Do a first pass and see what ranges memory we have to deal with.
2179 */
2180 for (size_t i = 0; i < nseg; i++) {
2181 #ifdef VM_FREELIST_FIRST4G
2182 if (round_page(segs[i].start + segs[i].size) > FOURGIG) {
2183 need4g = true;
2184 }
2185 #endif
2186 #ifdef VM_FREELIST_FIRST512M
2187 if (round_page(segs[i].start + segs[i].size) > HALFGIG) {
2188 need512m = true;
2189 #if !defined(_LP64)
2190 mips_poolpage_vmfreelist = VM_FREELIST_FIRST512M;
2191 #endif
2192 }
2193 #endif
2194 }
2195 #endif /* VM_FREELIST_FIRST512M || VM_FREELIST_FIRST4G */
2196
2197 for (; nseg-- > 0; segs++) {
2198 /*
2199 * Make sure everything is in page units.
2200 */
2201 paddr_t segstart = round_page(segs->start);
2202 const paddr_t segfinish = trunc_page(segs->start + segs->size);
2203
2204 if (segstart >= segfinish) {
2205 /*
2206 * This is purely cosmetic, to avoid output like
2207 * phys segment: 0xffffffffffffe000 @ 0xffb6000
2208 * when a segment starts and finishes in the same page.
2209 */
2210 printf("phys segment: %#"PRIxPADDR" @ %#"PRIxPADDR
2211 " (short)\n", (paddr_t)segs->size, segstart);
2212 continue;
2213 }
2214
2215 printf("phys segment: %#"PRIxPADDR" @ %#"PRIxPADDR"\n",
2216 segfinish - segstart, segstart);
2217
2218 /*
2219 * Page 0 is reserved for exception vectors.
2220 */
2221 if (segstart == 0) {
2222 segstart = PAGE_SIZE;
2223 }
2224 while (segstart < segfinish) {
2225 int freelist = -1; /* unknown freelist */
2226 paddr_t segend = segfinish;
2227 for (size_t i = 0; i < nfl; i++) {
2228 /*
2229 * If this segment doesn't overlap the freelist
2230 * at all, skip it.
2231 */
2232 if (segstart >= flp[i].fl_end
2233 || segend <= flp[i].fl_start)
2234 continue;
2235 /*
2236 * If the start of this segment starts before
2237 * the start of the freelist, then limit the
2238 * segment to loaded to the part that doesn't
2239 * match this freelist and fall back to normal
2240 * freelist matching.
2241 */
2242 if (segstart < flp[i].fl_start) {
2243 segstart = flp[i].fl_start;
2244 break;
2245 }
2246
2247 /*
2248 * We've matched this freelist so remember it.
2249 */
2250 freelist = flp->fl_freelist;
2251
2252 /*
2253 * If this segment extends past the end of this
2254 * freelist, bound to segment to the freelist.
2255 */
2256 if (segend > flp[i].fl_end)
2257 segend = flp[i].fl_end;
2258 break;
2259 }
2260 /*
2261 * If we didn't match one of the port dependent
2262 * freelists, let's try the common ones.
2263 */
2264 if (freelist == -1) {
2265 #ifdef VM_FREELIST_FIRST512M
2266 if (need512m && segstart < HALFGIG) {
2267 freelist = VM_FREELIST_FIRST512M;
2268 if (segend > HALFGIG)
2269 segend = HALFGIG;
2270 } else
2271 #endif
2272 #ifdef VM_FREELIST_FIRST4G
2273 if (need4g && segstart < FOURGIG) {
2274 freelist = VM_FREELIST_FIRST4G;
2275 if (segend > FOURGIG)
2276 segend = FOURGIG;
2277 } else
2278 #endif
2279 freelist = VM_FREELIST_DEFAULT;
2280 }
2281
2282 /*
2283 * Make sure the memory we provide to uvm doesn't
2284 * include the kernel.
2285 */
2286 if (segstart < kernend && segend > kernstart) {
2287 if (segstart < kernstart) {
2288 /*
2289 * Only add the memory before the
2290 * kernel.
2291 */
2292 segend = kernstart;
2293 } else if (segend > kernend) {
2294 /*
2295 * Only add the memory after the
2296 * kernel.
2297 */
2298 segstart = kernend;
2299 } else {
2300 /*
2301 * Just skip the segment entirely since
2302 * it's completely inside the kernel.
2303 */
2304 printf("skipping %#"PRIxPADDR" @ %#"PRIxPADDR" (kernel)\n",
2305 segend - segstart, segstart);
2306 break;
2307 }
2308 }
2309
2310 /*
2311 * Now we give this segment to uvm.
2312 */
2313 printf("adding %#"PRIxPADDR" @ %#"PRIxPADDR" to freelist %d\n",
2314 segend - segstart, segstart, freelist);
2315 paddr_t first = atop(segstart);
2316 paddr_t last = atop(segend);
2317 uvm_page_physload(first, last, first, last, freelist);
2318
2319 /*
2320 * Start where we finished.
2321 */
2322 segstart = segend;
2323 }
2324 }
2325 }
2326
2327 /*
2328 * Start a new LWP
2329 */
2330 void
startlwp(void * arg)2331 startlwp(void *arg)
2332 {
2333 ucontext_t * const uc = arg;
2334 lwp_t * const l = curlwp;
2335 int error __diagused;
2336
2337 error = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
2338 KASSERT(error == 0);
2339
2340 kmem_free(uc, sizeof(ucontext_t));
2341 userret(l);
2342 }
2343
2344 #ifdef COMPAT_NETBSD32
2345 /*
2346 * Start a new LWP
2347 */
2348 void
startlwp32(void * arg)2349 startlwp32(void *arg)
2350 {
2351 ucontext32_t * const uc = arg;
2352 lwp_t * const l = curlwp;
2353 int error __diagused;
2354
2355 error = cpu_setmcontext32(l, &uc->uc_mcontext, uc->uc_flags);
2356 KASSERT(error == 0);
2357
2358 /* Note: we are freeing ucontext_t, not ucontext32_t. */
2359 kmem_free(uc, sizeof(ucontext_t));
2360 userret(l);
2361 }
2362 #endif /* COMPAT_NETBSD32 */
2363
2364 #ifdef PARANOIA
2365 void
std_splsw_test(void)2366 std_splsw_test(void)
2367 {
2368 struct cpu_info * const ci = curcpu();
2369 const uint32_t * const sr_map = ipl_sr_map.sr_bits;
2370 uint32_t status = mips_cp0_status_read();
2371 uint32_t sr_bits;
2372 int s;
2373
2374 KASSERT((status & MIPS_SR_INT_IE) == 0);
2375
2376 sr_bits = sr_map[IPL_NONE];
2377
2378 splx(IPL_NONE);
2379 status = mips_cp0_status_read() & MIPS_INT_MASK;
2380 KASSERT(status == MIPS_INT_MASK);
2381 KASSERT(ci->ci_cpl == IPL_NONE);
2382
2383 s = splsoftclock();
2384 status = mips_cp0_status_read() & MIPS_INT_MASK;
2385 KASSERT((status ^ sr_map[IPL_SOFTCLOCK]) == MIPS_INT_MASK);
2386 KASSERT(ci->ci_cpl == IPL_SOFTCLOCK);
2387 KASSERT(s == IPL_NONE);
2388
2389 s = splsoftbio();
2390 status = mips_cp0_status_read() & MIPS_INT_MASK;
2391 KASSERT((status ^ sr_map[IPL_SOFTBIO]) == MIPS_INT_MASK);
2392 KASSERT(ci->ci_cpl == IPL_SOFTBIO);
2393 KASSERT(s == IPL_SOFTCLOCK);
2394
2395 s = splsoftnet();
2396 status = mips_cp0_status_read() & MIPS_INT_MASK;
2397 KASSERT((status ^ sr_map[IPL_SOFTNET]) == MIPS_INT_MASK);
2398 KASSERT(ci->ci_cpl == IPL_SOFTNET);
2399 KASSERT(s == IPL_SOFTBIO);
2400
2401 s = splsoftserial();
2402 status = mips_cp0_status_read() & MIPS_INT_MASK;
2403 KASSERT((status ^ sr_map[IPL_SOFTSERIAL]) == MIPS_INT_MASK);
2404 KASSERT(ci->ci_cpl == IPL_SOFTSERIAL);
2405 KASSERT(s == IPL_SOFTNET);
2406
2407 s = splvm();
2408 status = mips_cp0_status_read() & MIPS_INT_MASK;
2409 KASSERT((status ^ sr_map[IPL_VM]) == MIPS_INT_MASK);
2410 KASSERT(ci->ci_cpl == IPL_VM);
2411 KASSERT(s == IPL_SOFTSERIAL);
2412
2413 s = splsched();
2414 status = mips_cp0_status_read() & MIPS_INT_MASK;
2415 KASSERT((status ^ sr_map[IPL_SCHED]) == MIPS_INT_MASK);
2416 KASSERT(ci->ci_cpl == IPL_SCHED);
2417 KASSERT(s == IPL_VM);
2418
2419 s = splhigh();
2420 status = mips_cp0_status_read() & MIPS_INT_MASK;
2421 KASSERT((status ^ sr_map[IPL_HIGH]) == MIPS_INT_MASK);
2422 KASSERT(ci->ci_cpl == IPL_HIGH);
2423 KASSERT(s == IPL_SCHED);
2424
2425 splx(IPL_NONE);
2426 status = mips_cp0_status_read() & MIPS_INT_MASK;
2427 KASSERT(status == MIPS_INT_MASK);
2428 KASSERT(ci->ci_cpl == IPL_NONE);
2429
2430 for (int r = IPL_SOFTCLOCK; r <= IPL_HIGH; r++) {
2431 /*
2432 * As IPL increases, more intrs may be masked but no intrs
2433 * may become unmasked.
2434 */
2435 KASSERT((sr_map[r] & sr_bits) == sr_bits);
2436 sr_bits |= sr_map[r];
2437 s = splraise(r);
2438 KASSERT(s == IPL_NONE);
2439
2440 for (int t = r; t <= IPL_HIGH; t++) {
2441 int o = splraise(t);
2442 status = mips_cp0_status_read() & MIPS_INT_MASK;
2443 KASSERT((status ^ sr_map[t]) == MIPS_INT_MASK);
2444 KASSERT(ci->ci_cpl == t);
2445 KASSERT(o == r);
2446
2447 splx(o);
2448 status = mips_cp0_status_read() & MIPS_INT_MASK;
2449 KASSERT((status ^ sr_map[r]) == MIPS_INT_MASK);
2450 KASSERT(ci->ci_cpl == r);
2451 }
2452
2453 splx(s);
2454 status = mips_cp0_status_read() & MIPS_INT_MASK;
2455 KASSERT((status ^ sr_map[s]) == MIPS_INT_MASK);
2456 KASSERT(ci->ci_cpl == s);
2457 }
2458
2459 status = mips_cp0_status_read() & MIPS_INT_MASK;
2460 KASSERT(status == MIPS_INT_MASK);
2461 KASSERT(ci->ci_cpl == IPL_NONE);
2462 }
2463
2464 #endif /* PARANOIA */
2465
2466 #ifdef MODULAR
2467 /*
2468 * Push any modules loaded by the boot loader.
2469 */
2470 void
module_init_md(void)2471 module_init_md(void)
2472 {
2473
2474 /* XXX Do something board/machine specific here one day... */
2475 }
2476 #endif /* MODULAR */
2477
2478 bool
mm_md_direct_mapped_phys(paddr_t pa,vaddr_t * vap)2479 mm_md_direct_mapped_phys(paddr_t pa, vaddr_t *vap)
2480 {
2481 #ifdef _LP64
2482 if (MIPS_XKSEG_P(pa)) {
2483 *vap = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
2484 return true;
2485 }
2486 #endif
2487 if (MIPS_KSEG0_P(pa)) {
2488 *vap = MIPS_PHYS_TO_KSEG0(pa);
2489 return true;
2490 }
2491 return false;
2492 }
2493
2494 bool
mm_md_page_color(paddr_t pa,int * colorp)2495 mm_md_page_color(paddr_t pa, int *colorp)
2496 {
2497 if (MIPS_CACHE_VIRTUAL_ALIAS) {
2498 struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
2499 KASSERT(pg != NULL);
2500 struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
2501 *colorp = atop(mdpg->mdpg_first.pv_va);
2502 return !mips_cache_badalias(pa, mdpg->mdpg_first.pv_va);
2503 }
2504 *colorp = 0;
2505 return true;
2506 }
2507
2508 int
mm_md_physacc(paddr_t pa,vm_prot_t prot)2509 mm_md_physacc(paddr_t pa, vm_prot_t prot)
2510 {
2511
2512 return (pa < ctob(physmem)) ? 0 : EFAULT;
2513 }
2514
2515 int
mm_md_kernacc(void * ptr,vm_prot_t prot,bool * handled)2516 mm_md_kernacc(void *ptr, vm_prot_t prot, bool *handled)
2517 {
2518 const vaddr_t v = (vaddr_t)ptr;
2519
2520 #ifdef _LP64
2521 extern char end[];
2522
2523 /* For any address < XKPHYS cached address 0, fault */
2524 if (v < MIPS_PHYS_TO_XKPHYS_CACHED(0)) {
2525 return EFAULT;
2526 }
2527
2528 /* If address < XKPHY(end of message buffer), good! */
2529 if (v < MIPS_PHYS_TO_XKPHYS_CACHED(pmap_limits.avail_end +
2530 mips_round_page(MSGBUFSIZE))) {
2531 /* XXX holes in RAM (eg, EdgeRouter 4) */
2532 *handled = true;
2533 return 0;
2534 }
2535
2536 /* If address in KSEG0 and is before end of kernel, good! */
2537 if (MIPS_KSEG0_P(v) && v < (vaddr_t)end) {
2538 *handled = true;
2539 return 0;
2540 }
2541
2542 /* Otherwise, fall back to the uvm_kernacc() check. */
2543 #else
2544 if (v < MIPS_KSEG0_START) {
2545 return EFAULT;
2546 }
2547 if (v < MIPS_PHYS_TO_KSEG0(pmap_limits.avail_end +
2548 mips_round_page(MSGBUFSIZE))) {
2549 *handled = true;
2550 return 0;
2551 }
2552 if (v < MIPS_KSEG2_START) {
2553 return EFAULT;
2554 }
2555 #endif
2556 *handled = false;
2557 return 0;
2558 }
2559
2560 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
2561 static void
mips_watchpoint_init(void)2562 mips_watchpoint_init(void)
2563 {
2564 /*
2565 * determine number of CPU watchpoints
2566 */
2567 curcpu()->ci_cpuwatch_count = cpuwatch_discover();
2568 }
2569 #endif
2570
2571
2572 /*
2573 * Process the tail end of a posix_spawn() for the child.
2574 */
2575 void
cpu_spawn_return(struct lwp * l)2576 cpu_spawn_return(struct lwp *l)
2577 {
2578 userret(l);
2579 }
2580