xref: /openbsd-src/sys/arch/mips64/include/cpu.h (revision f1dd7b858388b4a23f4f67a4957ec5ff656ebbe8)
1 /*	$OpenBSD: cpu.h,v 1.132 2021/05/05 15:29:19 visa Exp $	*/
2 
3 /*-
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * Ralph Campbell and Rick Macklem.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	Copyright (C) 1989 Digital Equipment Corporation.
35  *	Permission to use, copy, modify, and distribute this software and
36  *	its documentation for any purpose and without fee is hereby granted,
37  *	provided that the above copyright notice appears in all copies.
38  *	Digital Equipment Corporation makes no representations about the
39  *	suitability of this software for any purpose.  It is provided "as is"
40  *	without express or implied warranty.
41  *
42  *	from: @(#)cpu.h	8.4 (Berkeley) 1/4/94
43  */
44 
45 #ifndef _MIPS64_CPU_H_
46 #define	_MIPS64_CPU_H_
47 
48 #ifndef _LOCORE
49 
50 /*
51  * MIPS32-style segment definitions.
52  * They only cover the first 512MB of physical addresses.
53  */
54 #define	CKSEG0_BASE		0xffffffff80000000UL
55 #define	CKSEG1_BASE		0xffffffffa0000000UL
56 #define	CKSSEG_BASE		0xffffffffc0000000UL
57 #define	CKSEG3_BASE		0xffffffffe0000000UL
58 #define	CKSEG_SIZE		0x0000000020000000UL
59 
60 #define	CKSEG0_TO_PHYS(x)	((u_long)(x) & (CKSEG_SIZE - 1))
61 #define	CKSEG1_TO_PHYS(x)	((u_long)(x) & (CKSEG_SIZE - 1))
62 #define	PHYS_TO_CKSEG0(x)	((u_long)(x) | CKSEG0_BASE)
63 #define	PHYS_TO_CKSEG1(x)	((u_long)(x) | CKSEG1_BASE)
64 
65 /*
66  * MIPS64-style segment definitions.
67  * These allow for 36 bits of addressable physical memory, thus 64GB.
68  */
69 
70 /*
71  * Cache Coherency Attributes.
72  */
73 /* r8k only */
74 #define	CCA_NC_COPROCESSOR	0UL	/* uncached, coprocessor ordered */
75 /* common to r4, r5k, r8k and r1xk */
76 #define	CCA_NC			2UL	/* uncached, write-around */
77 #define	CCA_NONCOHERENT		3UL	/* cached, non-coherent, write-back */
78 /* r8k, r1xk only */
79 #define	CCA_COHERENT_EXCL	4UL	/* cached, coherent, exclusive */
80 #define	CCA_COHERENT_EXCLWRITE	5UL	/* cached, coherent, exclusive write */
81 /* r4k only */
82 #define	CCA_COHERENT_UPDWRITE	6UL	/* cached, coherent, update on write */
83 /* r1xk only */
84 #define	CCA_NC_ACCELERATED	7UL	/* uncached accelerated */
85 
86 #ifdef TGT_COHERENT
87 #define	CCA_CACHED		CCA_COHERENT_EXCLWRITE
88 #else
89 #define	CCA_CACHED		CCA_NONCOHERENT
90 #endif
91 
92 #define	XKSSSEG_BASE		0x4000000000000000UL
93 #define	XKPHYS_BASE		0x8000000000000000UL
94 #define	XKSSEG_BASE		0xc000000000000000UL
95 
96 #define	XKPHYS_TO_PHYS(x)	((paddr_t)(x) & 0x0000000fffffffffUL)
97 #define	PHYS_TO_XKPHYS(x,c)	((paddr_t)(x) | XKPHYS_BASE | ((c) << 59))
98 #define	IS_XKPHYS(va)		(((va) >> 62) == 2)
99 #define	XKPHYS_TO_CCA(x)	(((x) >> 59) & 0x07)
100 
101 #endif	/* _LOCORE */
102 
103 /*
104  * Exported definitions unique to mips cpu support.
105  */
106 
107 #if defined(_KERNEL) && !defined(_LOCORE)
108 
109 #include <sys/device.h>
110 #include <machine/intr.h>
111 #include <sys/sched.h>
112 #include <sys/srp.h>
113 
114 struct cpu_hwinfo {
115 	uint32_t	c0prid;
116 	uint32_t	c1prid;
117 	uint32_t	clock;	/* Hz */
118 	uint32_t	tlbsize;
119 	uint		type;
120 	uint32_t	l2size;
121 };
122 
123 /*
124  * Cache memory configuration. One struct per cache.
125  */
126 struct cache_info {
127 	uint		size;		/* total cache size */
128 	uint		linesize;	/* line size */
129 	uint		setsize;	/* set size */
130 	uint		sets;		/* number of sets */
131 };
132 
133 struct cpu_info {
134 	struct device	*ci_dev;	/* our device */
135 	struct cpu_info	*ci_self;	/* pointer to this structure */
136 	struct cpu_info	*ci_next;	/* next cpu */
137 	struct proc	*ci_curproc;
138 	struct user	*ci_curprocpaddr;
139 	struct proc	*ci_fpuproc;	/* pointer to last proc to use FP */
140 	uint32_t	 ci_delayconst;
141 	struct cpu_hwinfo
142 			ci_hw;
143 
144 #if defined(MULTIPROCESSOR)
145 	struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM];
146 #endif
147 
148 	/* cache information and pending flush state */
149 	uint		ci_cacheconfiguration;
150 	uint64_t	ci_cachepending_l1i;
151 	struct cache_info
152 			ci_l1inst,
153 			ci_l1data,
154 			ci_l2,
155 			ci_l3;
156 
157 	/* function pointers for the cache handling routines */
158 	void		(*ci_SyncCache)(struct cpu_info *);
159 	void		(*ci_InvalidateICache)(struct cpu_info *, vaddr_t,
160 			    size_t);
161 	void		(*ci_InvalidateICachePage)(struct cpu_info *, vaddr_t);
162 	void		(*ci_SyncICache)(struct cpu_info *);
163 	void		(*ci_SyncDCachePage)(struct cpu_info *, vaddr_t,
164 			    paddr_t);
165 	void		(*ci_HitSyncDCachePage)(struct cpu_info *, vaddr_t,
166 			    paddr_t);
167 	void		(*ci_HitSyncDCache)(struct cpu_info *, vaddr_t, size_t);
168 	void		(*ci_HitInvalidateDCache)(struct cpu_info *, vaddr_t,
169 			    size_t);
170 	void		(*ci_IOSyncDCache)(struct cpu_info *, vaddr_t, size_t,
171 			    int);
172 
173 	struct schedstate_percpu
174 			ci_schedstate;
175 	int		ci_want_resched;	/* need_resched() invoked */
176 	cpuid_t		ci_cpuid;		/* our CPU ID */
177 	uint32_t	ci_randseed;		/* per cpu random seed */
178 	volatile int	ci_ipl;			/* software IPL */
179 	uint32_t	ci_softpending;		/* pending soft interrupts */
180 	int		ci_clock_started;
181 	u_int32_t	ci_cpu_counter_last;	/* last compare value loaded */
182 	u_int32_t	ci_cpu_counter_interval; /* # of counter ticks/tick */
183 
184 	u_int32_t	ci_pendingticks;
185 
186 #ifdef TGT_ORIGIN
187 	u_int16_t	ci_nasid;
188 	u_int16_t	ci_slice;
189 #endif
190 
191 	struct pmap	*ci_curpmap;
192 	uint		ci_intrdepth;		/* interrupt depth */
193 #ifdef MULTIPROCESSOR
194 	u_long		ci_flags;		/* flags; see below */
195 #endif
196 	volatile int    ci_ddb;
197 #define	CI_DDB_RUNNING		0
198 #define	CI_DDB_SHOULDSTOP	1
199 #define	CI_DDB_STOPPED		2
200 #define	CI_DDB_ENTERDDB		3
201 #define	CI_DDB_INDDB		4
202 
203 #ifdef DIAGNOSTIC
204 	int	ci_mutex_level;
205 #endif
206 #ifdef GPROF
207 	struct gmonparam *ci_gmon;
208 #endif
209 };
210 
211 #define	CPUF_PRIMARY	0x01		/* CPU is primary CPU */
212 #define	CPUF_PRESENT	0x02		/* CPU is present */
213 #define	CPUF_RUNNING	0x04		/* CPU is running */
214 
215 extern struct cpu_info cpu_info_primary;
216 extern struct cpu_info *cpu_info_list;
217 #define CPU_INFO_ITERATOR		int
218 #define	CPU_INFO_FOREACH(cii, ci)	for (cii = 0, ci = cpu_info_list; \
219 					    ci != NULL; ci = ci->ci_next)
220 
221 #define CPU_INFO_UNIT(ci)               ((ci)->ci_dev ? (ci)->ci_dev->dv_unit : 0)
222 
223 extern void (*cpu_idle_cycle_func)(void);
224 #define cpu_idle_cycle()		(*cpu_idle_cycle_func)()
225 
226 #ifdef MULTIPROCESSOR
227 #define getcurcpu()			hw_getcurcpu()
228 #define setcurcpu(ci)			hw_setcurcpu(ci)
229 extern struct cpu_info *get_cpu_info(int);
230 #define curcpu() getcurcpu()
231 #define	CPU_IS_PRIMARY(ci)		((ci)->ci_flags & CPUF_PRIMARY)
232 #define cpu_number()			(curcpu()->ci_cpuid)
233 
234 extern struct cpuset cpus_running;
235 void cpu_unidle(struct cpu_info *);
236 void cpu_boot_secondary_processors(void);
237 #define cpu_boot_secondary(ci)          hw_cpu_boot_secondary(ci)
238 #define cpu_hatch(ci)                   hw_cpu_hatch(ci)
239 
240 vaddr_t alloc_contiguous_pages(size_t);
241 
242 #define MIPS64_IPI_NOP		0x00000001
243 #define MIPS64_IPI_RENDEZVOUS	0x00000002
244 #define MIPS64_IPI_DDB		0x00000004
245 #define MIPS64_NIPIS		3	/* must not exceed 32 */
246 
247 void	mips64_ipi_init(void);
248 void	mips64_send_ipi(unsigned int, unsigned int);
249 void	smp_rendezvous_cpus(unsigned long, void (*)(void *), void *arg);
250 
251 #include <sys/mplock.h>
252 #else
253 #define MAXCPUS				1
254 #define curcpu()			(&cpu_info_primary)
255 #define	CPU_IS_PRIMARY(ci)		1
256 #define cpu_number()			0UL
257 #define cpu_unidle(ci)
258 #define get_cpu_info(i)			(&cpu_info_primary)
259 #endif
260 
261 #define CPU_BUSY_CYCLE()	do {} while (0)
262 
263 extern void (*md_startclock)(struct cpu_info *);
264 void	cp0_calibrate(struct cpu_info *);
265 
266 unsigned int cpu_rnd_messybits(void);
267 
268 #include <machine/frame.h>
269 
270 /*
271  * Arguments to hardclock encapsulate the previous machine state in
272  * an opaque clockframe.
273  */
274 #define	clockframe trapframe	/* Use normal trap frame */
275 
276 #define	SR_KSU_USER		0x00000010
277 #define	CLKF_USERMODE(framep)	((framep)->sr & SR_KSU_USER)
278 #define	CLKF_PC(framep)		((framep)->pc)
279 #define	CLKF_INTR(framep)	(curcpu()->ci_intrdepth > 1)	/* XXX */
280 
281 /*
282  * This is used during profiling to integrate system time.
283  */
284 #define	PROC_PC(p)	((p)->p_md.md_regs->pc)
285 #define	PROC_STACK(p)	((p)->p_md.md_regs->sp)
286 
287 /*
288  * Preempt the current process if in interrupt from user mode,
289  * or after the current trap/syscall if in system mode.
290  */
291 void	need_resched(struct cpu_info *);
292 #define	clear_resched(ci) 	(ci)->ci_want_resched = 0
293 
294 /*
295  * Give a profiling tick to the current process when the user profiling
296  * buffer pages are invalid.  On MIPS designs, request an ast to send us
297  * through trap, marking the proc as needing a profiling tick.
298  */
299 #define	need_proftick(p)	aston(p)
300 
301 /*
302  * Notify the current process (p) that it has a signal pending,
303  * process as soon as possible.
304  */
305 void	signotify(struct proc *);
306 
307 #define	aston(p)		((p)->p_md.md_astpending = 1)
308 
309 #define	mips_sync()		__asm__ volatile ("sync" ::: "memory")
310 
311 #endif /* _KERNEL && !_LOCORE */
312 
313 #ifdef _KERNEL
314 /*
315  * Values for the code field in a break instruction.
316  */
317 #define	BREAK_INSTR		0x0000000d
318 #define	BREAK_VAL_MASK		0x03ff0000
319 #define	BREAK_VAL_SHIFT		16
320 #define	BREAK_KDB_VAL		512
321 #define	BREAK_SSTEP_VAL		513
322 #define	BREAK_BRKPT_VAL		514
323 #define	BREAK_SOVER_VAL		515
324 #define	BREAK_DDB_VAL		516
325 #define	BREAK_FPUEMUL_VAL	517
326 #define	BREAK_KDB	(BREAK_INSTR | (BREAK_KDB_VAL << BREAK_VAL_SHIFT))
327 #define	BREAK_SSTEP	(BREAK_INSTR | (BREAK_SSTEP_VAL << BREAK_VAL_SHIFT))
328 #define	BREAK_BRKPT	(BREAK_INSTR | (BREAK_BRKPT_VAL << BREAK_VAL_SHIFT))
329 #define	BREAK_SOVER	(BREAK_INSTR | (BREAK_SOVER_VAL << BREAK_VAL_SHIFT))
330 #define	BREAK_DDB	(BREAK_INSTR | (BREAK_DDB_VAL << BREAK_VAL_SHIFT))
331 #define	BREAK_FPUEMUL	(BREAK_INSTR | (BREAK_FPUEMUL_VAL << BREAK_VAL_SHIFT))
332 
333 #endif /* _KERNEL */
334 
335 /*
336  * CTL_MACHDEP definitions.
337  */
338 #define	CPU_ALLOWAPERTURE	1	/* allow mmap of /dev/xf86 */
339 		/*		2	   formerly: keyboard reset */
340 		/*		3	   formerly: CPU_LIDSUSPEND */
341 #define CPU_LIDACTION		4	/* action caused by lid close */
342 #define	CPU_MAXID		5	/* number of valid machdep ids */
343 
344 #define	CTL_MACHDEP_NAMES {			\
345 	{ 0, 0 },				\
346 	{ "allowaperture", CTLTYPE_INT },	\
347 	{ 0, 0 },				\
348 	{ 0, 0 },				\
349 	{ "lidaction", CTLTYPE_INT },		\
350 }
351 
352 /*
353  * MIPS CPU types (cp_imp).
354  */
355 #define	MIPS_R2000	0x01	/* MIPS R2000 CPU		ISA I   */
356 #define	MIPS_R3000	0x02	/* MIPS R3000 CPU		ISA I   */
357 #define	MIPS_R6000	0x03	/* MIPS R6000 CPU		ISA II	*/
358 #define	MIPS_R4000	0x04	/* MIPS R4000/4400 CPU		ISA III	*/
359 #define	MIPS_R3LSI	0x05	/* LSI Logic R3000 derivate	ISA I	*/
360 #define	MIPS_R6000A	0x06	/* MIPS R6000A CPU		ISA II	*/
361 #define	MIPS_CN50XX	0x06	/* Cavium OCTEON CN50xx		MIPS64R2*/
362 #define	MIPS_R3IDT	0x07	/* IDT R3000 derivate		ISA I	*/
363 #define	MIPS_R10000	0x09	/* MIPS R10000/T5 CPU		ISA IV  */
364 #define	MIPS_R4200	0x0a	/* MIPS R4200 CPU (ICE)		ISA III */
365 #define	MIPS_R4300	0x0b	/* NEC VR4300 CPU		ISA III */
366 #define	MIPS_R4100	0x0c	/* NEC VR41xx CPU MIPS-16	ISA III */
367 #define	MIPS_R12000	0x0e	/* MIPS R12000			ISA IV  */
368 #define	MIPS_R14000	0x0f	/* MIPS R14000			ISA IV  */
369 #define	MIPS_R8000	0x10	/* MIPS R8000 Blackbird/TFP	ISA IV  */
370 #define	MIPS_R4600	0x20	/* PMCS R4600 Orion		ISA III */
371 #define	MIPS_R4700	0x21	/* PMCS R4700 Orion		ISA III */
372 #define	MIPS_R3TOSH	0x22	/* Toshiba R3000 based CPU	ISA I	*/
373 #define	MIPS_R5000	0x23	/* MIPS R5000 CPU		ISA IV  */
374 #define	MIPS_RM7000	0x27	/* PMCS RM7000 CPU		ISA IV  */
375 #define	MIPS_RM52X0	0x28	/* PMCS RM52X0 CPU		ISA IV  */
376 #define	MIPS_RM9000	0x34	/* PMCS RM9000 CPU		ISA IV  */
377 #define	MIPS_LOONGSON	0x42	/* STC LoongSon CPU		ISA III */
378 #define	MIPS_VR5400	0x54	/* NEC Vr5400 CPU		ISA IV+ */
379 #define	MIPS_LOONGSON2	0x63	/* STC LoongSon2/3 CPU		ISA III+ */
380 #define	MIPS_CN63XX	0x90	/* Cavium OCTEON II CN6[23]xx	MIPS64R2 */
381 #define	MIPS_CN68XX	0x91	/* Cavium OCTEON II CN68xx	MIPS64R2 */
382 #define	MIPS_CN66XX	0x92	/* Cavium OCTEON II CN66xx	MIPS64R2 */
383 #define	MIPS_CN61XX	0x93	/* Cavium OCTEON II CN6[01]xx	MIPS64R2 */
384 #define	MIPS_CN78XX	0x95	/* Cavium OCTEON III CN7[678]xx	MIPS64R2 */
385 #define	MIPS_CN71XX	0x96	/* Cavium OCTEON III CN7[01]xx	MIPS64R2 */
386 #define	MIPS_CN73XX	0x97	/* Cavium OCTEON III CN7[23]xx	MIPS64R2 */
387 
388 /*
389  * MIPS FPU types. Only soft, rest is the same as cpu type.
390  */
391 #define	MIPS_SOFT	0x00	/* Software emulation		ISA I   */
392 
393 
394 #if defined(_KERNEL) && !defined(_LOCORE)
395 
396 extern register_t protosr;
397 extern int cpu_has_synced_cp0_count;
398 extern int cpu_has_userlocal;
399 
400 #ifdef FPUEMUL
401 #define	CPU_HAS_FPU(ci)	((ci)->ci_hw.c1prid != 0)
402 #else
403 #define	CPU_HAS_FPU(ci)	1
404 #endif
405 
406 struct exec_package;
407 struct user;
408 
409 void	tlb_asid_wrap(struct cpu_info *);
410 void	tlb_flush(int);
411 void	tlb_flush_addr(vaddr_t);
412 void	tlb_init(unsigned int);
413 int64_t	tlb_probe(vaddr_t);
414 void	tlb_set_page_mask(uint32_t);
415 void	tlb_set_pid(u_int);
416 void	tlb_set_wired(uint32_t);
417 int	tlb_update(vaddr_t, register_t);
418 void	tlb_update_indexed(vaddr_t, register_t, register_t, uint);
419 
420 void	build_trampoline(vaddr_t, vaddr_t);
421 void	cpu_switchto_asm(struct proc *, struct proc *);
422 int	exec_md_map(struct proc *, struct exec_package *);
423 void	savectx(struct user *, int);
424 
425 void	enable_fpu(struct proc *);
426 void	save_fpu(void);
427 int	fpe_branch_emulate(struct proc *, struct trapframe *, uint32_t,
428 	    vaddr_t);
429 void	MipsSaveCurFPState(struct proc *);
430 void	MipsSaveCurFPState16(struct proc *);
431 void	MipsSwitchFPState(struct proc *, struct trapframe *);
432 void	MipsSwitchFPState16(struct proc *, struct trapframe *);
433 
434 int	guarded_read_1(paddr_t, uint8_t *);
435 int	guarded_read_2(paddr_t, uint16_t *);
436 int	guarded_read_4(paddr_t, uint32_t *);
437 int	guarded_write_4(paddr_t, uint32_t);
438 
439 void	MipsFPTrap(struct trapframe *);
440 register_t MipsEmulateBranch(struct trapframe *, vaddr_t, uint32_t, uint32_t);
441 
442 int	classify_insn(uint32_t);
443 #define	INSNCLASS_NEUTRAL	0
444 #define	INSNCLASS_CALL		1
445 #define	INSNCLASS_BRANCH	2
446 
447 /*
448  * Low level access routines to CPU registers
449  */
450 
451 void	setsoftintr0(void);
452 void	clearsoftintr0(void);
453 void	setsoftintr1(void);
454 void	clearsoftintr1(void);
455 register_t enableintr(void);
456 register_t disableintr(void);
457 register_t getsr(void);
458 register_t setsr(register_t);
459 
460 u_int	cp0_get_count(void);
461 register_t cp0_get_config(void);
462 uint32_t cp0_get_config_1(void);
463 uint32_t cp0_get_config_2(void);
464 uint32_t cp0_get_config_3(void);
465 uint32_t cp0_get_config_4(void);
466 uint32_t cp0_get_pagegrain(void);
467 register_t cp0_get_prid(void);
468 void	cp0_reset_cause(register_t);
469 void	cp0_set_compare(u_int);
470 void	cp0_set_config(register_t);
471 void	cp0_set_pagegrain(uint32_t);
472 void	cp0_set_trapbase(register_t);
473 u_int	cp1_get_prid(void);
474 
475 static inline uint32_t
476 cp0_get_hwrena(void)
477 {
478 	uint32_t value;
479 	__asm__ volatile ("mfc0 %0, $7" : "=r" (value));
480 	return value;
481 }
482 
483 static inline void
484 cp0_set_hwrena(uint32_t value)
485 {
486 	__asm__ volatile ("mtc0 %0, $7" : : "r" (value));
487 }
488 
489 static inline void
490 cp0_set_userlocal(void *value)
491 {
492 	__asm__ volatile (
493 	"	.set	push\n"
494 	"	.set	mips64r2\n"
495 	"	dmtc0	%0, $4, 2\n"
496 	"	.set	pop\n"
497 	: : "r" (value));
498 }
499 
500 static inline u_long
501 intr_disable(void)
502 {
503 	return disableintr();
504 }
505 
506 static inline void
507 intr_restore(u_long sr)
508 {
509 	setsr(sr);
510 }
511 
512 /*
513  * Cache routines (may be overridden)
514  */
515 
516 #ifndef	Mips_SyncCache
517 #define	Mips_SyncCache(ci) \
518 	((ci)->ci_SyncCache)(ci)
519 #endif
520 #ifndef	Mips_InvalidateICache
521 #define	Mips_InvalidateICache(ci, va, l) \
522 	((ci)->ci_InvalidateICache)(ci, va, l)
523 #endif
524 #ifndef	Mips_InvalidateICachePage
525 #define	Mips_InvalidateICachePage(ci, va) \
526 	((ci)->ci_InvalidateICachePage)(ci, va)
527 #endif
528 #ifndef	Mips_SyncICache
529 #define	Mips_SyncICache(ci) \
530 	((ci)->ci_SyncICache)(ci)
531 #endif
532 #ifndef	Mips_SyncDCachePage
533 #define	Mips_SyncDCachePage(ci, va, pa) \
534 	((ci)->ci_SyncDCachePage)(ci, va, pa)
535 #endif
536 #ifndef	Mips_HitSyncDCachePage
537 #define	Mips_HitSyncDCachePage(ci, va, pa) \
538 	((ci)->ci_HitSyncDCachePage)(ci, va, pa)
539 #endif
540 #ifndef	Mips_HitSyncDCache
541 #define	Mips_HitSyncDCache(ci, va, l) \
542 	((ci)->ci_HitSyncDCache)(ci, va, l)
543 #endif
544 #ifndef	Mips_HitInvalidateDCache
545 #define	Mips_HitInvalidateDCache(ci, va, l) \
546 	((ci)->ci_HitInvalidateDCache)(ci, va, l)
547 #endif
548 #ifndef	Mips_IOSyncDCache
549 #define	Mips_IOSyncDCache(ci, va, l, h) \
550 	((ci)->ci_IOSyncDCache)(ci, va, l, h)
551 #endif
552 
553 #endif /* _KERNEL && !_LOCORE */
554 #endif /* !_MIPS64_CPU_H_ */
555