1 /* $NetBSD: mips_fixup.c,v 1.23 2022/01/02 16:03:46 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2010 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas of 3am Software Foundry.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: mips_fixup.c,v 1.23 2022/01/02 16:03:46 christos Exp $");
34
35 #include "opt_mips3_wired.h"
36 #include "opt_multiprocessor.h"
37 #include <sys/param.h>
38
39 #include <uvm/uvm_extern.h>
40
41 #include <mips/locore.h>
42 #include <mips/cache.h>
43 #include <mips/mips3_pte.h>
44 #include <mips/regnum.h>
45 #include <mips/mips_opcode.h>
46
47 bool
mips_fixup_exceptions(mips_fixup_callback_t callback,void * arg)48 mips_fixup_exceptions(mips_fixup_callback_t callback, void *arg)
49 {
50 #if (MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
51 int32_t ebase = mipsNN_cp0_ebase_read();
52 uint32_t *start;
53 if (ebase == mips_options.mips_cpu_id
54 || (ebase & __BITS(31,30)) != __BIT(31)) {
55 start = (uint32_t *)MIPS_KSEG0_START;
56 } else {
57 start = (uint32_t *)(intptr_t)(ebase & ~MIPS_EBASE_CPUNUM);
58 }
59 #else
60 uint32_t * const start = (uint32_t *)MIPS_KSEG0_START;
61 #endif
62 uint32_t * const end = start + (5 * 128) / sizeof(uint32_t);
63 const int32_t addr = (intptr_t)&cpu_info_store;
64 const size_t size = sizeof(cpu_info_store);
65 uint32_t new_insns[2];
66 uint32_t *lui_insnp = NULL;
67 int32_t lui_offset = 0;
68 bool fixed = false;
69 size_t lui_reg = 0;
70 #ifdef DEBUG_VERBOSE
71 printf("%s: fixing %p..%p\n", __func__, start, end);
72 #endif
73 /*
74 * If this was allocated so that bit 15 of the value/address is 1, then
75 * %hi will add 1 to the immediate (or 0x10000 to the value loaded)
76 * to compensate for using a negative offset for the lower half of
77 * the value.
78 */
79 const int32_t upper_start = (addr + 32768) & ~0xffff;
80 const int32_t upper_end = (addr + size - 1 + 32768) & ~0xffff;
81
82 #ifndef MIPS64_OCTEON
83 KASSERT((addr & ~0xfff) == ((addr + size - 1) & ~0xfff));
84 #endif
85
86 uint32_t lui_insn = 0;
87 for (uint32_t *insnp = start; insnp < end; insnp++) {
88 const uint32_t insn = *insnp;
89 if (INSN_LUI_P(insn)) {
90 const int32_t offset = insn << 16;
91 lui_reg = (insn >> 16) & 31;
92 #ifdef DEBUG_VERBOSE
93 printf("%s: %#x: insn %08x: lui r%zu, %%hi(%#x)",
94 __func__, (int32_t)(intptr_t)insnp,
95 insn, lui_reg, offset);
96 #endif
97 KASSERT(lui_reg == _R_K0 || lui_reg == _R_K1);
98 if (upper_start == offset || upper_end == offset) {
99 lui_insnp = insnp;
100 lui_insn = insn;
101 lui_offset = offset;
102 #ifdef DEBUG_VERBOSE
103 printf(" (maybe)");
104 #endif
105 } else {
106 lui_insnp = NULL;
107 lui_insn = 0;
108 lui_offset = 0;
109 }
110 #ifdef DEBUG_VERBOSE
111 printf("\n");
112 #endif
113 } else if (lui_insn != 0
114 && (INSN_LOAD_P(insn) || INSN_STORE_P(insn))) {
115 size_t base = (insn >> 21) & 31;
116 #if defined(DIAGNOSTIC) || defined(DEBUG_VERBOSE)
117 size_t rt = (insn >> 16) & 31;
118 #endif
119 int32_t load_addr = lui_offset + (int16_t)insn;
120 if (addr <= load_addr
121 && load_addr < addr + size
122 && base == lui_reg) {
123 #if defined(DIAGNOSTIC) || defined(DEBUG_VERBOSE)
124 KASSERT(rt == _R_K0 || rt == _R_K1);
125 #ifdef DEBUG_VERBOSE
126 printf("%s: %#x: insn %08x: %s r%zu, %%lo(%08x)(r%zu)\n",
127 __func__, (int32_t)(intptr_t)insnp,
128 insn,
129 INSN_LOAD_P(insn)
130 ? INSN_LW_P(insn) ? "lw" : "ld"
131 : INSN_SW_P(insn) ? "sw" : "sd",
132 rt, load_addr, base);
133 #endif
134 #endif
135 new_insns[0] = lui_insn;
136 new_insns[1] = *insnp;
137 if ((callback)(load_addr, new_insns, arg)) {
138 if (lui_insnp) {
139 *lui_insnp = new_insns[0];
140 *insnp = new_insns[1];
141 } else if (new_insns[1] == 0) {
142 *insnp = new_insns[0];
143 } else {
144 *insnp = new_insns[1];
145 }
146 fixed = true;
147 }
148 lui_insnp = NULL;
149 }
150 } else if (INSN_LOAD_P(insn)) {
151 /*
152 * If we are loading the register used in the LUI,
153 * then that LUI is meaningless now.
154 */
155 size_t rt = (insn >> 16) & 31;
156 if (lui_reg == rt)
157 lui_insn = 0;
158 }
159 }
160
161 if (fixed)
162 mips_icache_sync_range((intptr_t)start,
163 sizeof(start[0]) * (end - start));
164
165 return fixed;
166 }
167
168 #ifdef MIPS3_PLUS
169 bool
mips_fixup_zero_relative(int32_t load_addr,uint32_t new_insns[2],void * arg)170 mips_fixup_zero_relative(int32_t load_addr, uint32_t new_insns[2], void *arg)
171 {
172 struct cpu_info * const ci = curcpu();
173 struct pmap_tlb_info * const ti = ci->ci_tlb_info;
174
175 KASSERT(MIPS_KSEG0_P(load_addr));
176 KASSERT(!MIPS_CACHE_VIRTUAL_ALIAS);
177 #ifdef MULTIPROCESSOR
178 KASSERT(CPU_IS_PRIMARY(ci));
179 #endif
180 KASSERT((intptr_t)ci <= load_addr);
181 KASSERT(load_addr < (intptr_t)(ci + 1));
182 KASSERT(MIPS_HAS_R4K_MMU);
183
184 /*
185 * Use the load instruction as a prototype and it make use $0
186 * as base and the new negative offset. The second instruction
187 * is a NOP.
188 */
189 new_insns[0] =
190 (new_insns[1] & (0xfc1f0000|PAGE_MASK)) | (0xffff & ~PAGE_MASK);
191 new_insns[1] = 0;
192 #ifdef DEBUG_VERBOSE
193 printf("%s: %08x: insn#1 %08x: %s r%u, %d(r%u)\n",
194 __func__, (int32_t)load_addr, new_insns[0],
195 INSN_LOAD_P(new_insns[0])
196 ? INSN_LW_P(new_insns[0]) ? "lw" : "ld"
197 : INSN_LW_P(new_insns[0]) ? "sw" : "sd",
198 (new_insns[0] >> 16) & 31,
199 (int16_t)new_insns[0],
200 (new_insns[0] >> 21) & 31);
201 #endif
202 /*
203 * Construct the TLB_LO entry needed to map cpu_info_store.
204 */
205
206 /*
207 * Now allocate a TLB entry in the primary TLB for the mapping and
208 * enter the mapping into the TLB.
209 */
210 TLBINFO_LOCK(ti);
211 if (ci->ci_tlb_slot < 0) {
212 uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V|MIPS3_PG_D
213 | mips3_paddr_to_tlbpfn(MIPS_KSEG0_TO_PHYS(trunc_page(load_addr)));
214 struct tlbmask tlbmask = {
215 .tlb_hi = -PAGE_SIZE | KERNEL_PID,
216 #if PGSHIFT & 1
217 .tlb_lo1 = tlb_lo,
218 .tlb_lo1 = tlb_lo + MIPS3_PG_NEXT,
219 #else
220 .tlb_lo0 = 0,
221 .tlb_lo1 = tlb_lo,
222 #endif
223 .tlb_mask = -1,
224 };
225 ci->ci_tlb_slot = ti->ti_wired++;
226 mips3_cp0_wired_write(ti->ti_wired);
227 tlb_invalidate_addr(-PAGE_SIZE, KERNEL_PID);
228 tlb_write_entry(ci->ci_tlb_slot, &tlbmask);
229 }
230 TLBINFO_UNLOCK(ti);
231
232 return true;
233 }
234 #endif /* MIPS3_PLUS */
235
236 #define OPCODE_J 002
237 #define OPCODE_JAL 003
238
239 static inline void
fixup_mips_jump(uint32_t * insnp,const struct mips_jump_fixup_info * jfi)240 fixup_mips_jump(uint32_t *insnp, const struct mips_jump_fixup_info *jfi)
241 {
242 uint32_t insn = *insnp;
243
244 KASSERT((insn >> (26+1)) == (OPCODE_J >> 1));
245 KASSERT((insn << 6) == (jfi->jfi_stub << 6));
246
247 insn ^= (jfi->jfi_stub ^ jfi->jfi_real);
248
249 KASSERT((insn << 6) == (jfi->jfi_real << 6));
250
251 #ifdef DEBUG
252 #if 0
253 int32_t va = ((intptr_t) insnp >> 26) << 26;
254 printf("%s: %08x: [%08x] %s %08x -> [%08x] %s %08x\n",
255 __func__, (int32_t)(intptr_t)insnp,
256 insn, opcode == OPCODE_J ? "j" : "jal",
257 va | (jfi->jfo_stub << 2),
258 *insnp, opcode == OPCODE_J ? "j" : "jal",
259 va | (jfi->jfi_real << 2));
260 #endif
261 #endif
262 *insnp = insn;
263 }
264
265 intptr_t
mips_fixup_addr(const uint32_t * stubp)266 mips_fixup_addr(const uint32_t *stubp)
267 {
268 /*
269 * Stubs typically look like:
270 * lui v0, %hi(sym)
271 * lX t9, %lo(sym)(v0)
272 * [nop]
273 * jr t9
274 * nop
275 *
276 * Or for loongson2 (
277 * lui v0, %hi(sym)
278 * lX t9, %lo(sym)(v0)
279 * lui at,0xcfff
280 * ori at,at,0xffff
281 * and t9,t9,at
282 * jr t9
283 * move at,at
284 * or:
285 * lui v0, %hi(sym)
286 * lX t9, %lo(sym)(v0)
287 * li at, 0x3
288 * dmtc0 at, $22
289 * jr t9
290 * nop
291 *
292 * A profiled n32/n64 stub will start with:
293 * move ta, ra
294 * jal _mcount
295 * nop
296 */
297 mips_reg_t regs[32];
298 uint32_t used = 1 |__BIT(_R_A0)|__BIT(_R_A1)|__BIT(_R_A2)|__BIT(_R_A3);
299 size_t n;
300 const char *errstr = "mips";
301
302 #ifdef GPROF
303 static uint32_t mcount_addr = 0;
304 extern void _mcount(u_long, u_long); /* XXX decl */
305
306 if (mcount_addr == 0)
307 mcount_addr = (uint32_t)(uintptr_t)_mcount & 0x0fffffff;
308 #endif /* GPROF */
309
310 /*
311 * This is basically a small MIPS emulator for those instructions
312 * that might be in a stub routine.
313 */
314 for (n = 0; n < 16; n++) {
315 const InstFmt insn = { .word = stubp[n] };
316 switch (insn.IType.op) {
317 case OP_LUI:
318 regs[insn.IType.rt] = (int16_t)insn.IType.imm << 16;
319 used |= (1 << insn.IType.rt);
320 break;
321 #ifdef _LP64
322 case OP_LD:
323 if ((used & (1 << insn.IType.rs)) == 0) {
324 errstr = "LD";
325 goto out;
326 }
327 regs[insn.IType.rt] = *(const int64_t *)
328 (regs[insn.IType.rs] + (int16_t)insn.IType.imm);
329 used |= (1 << insn.IType.rt);
330 break;
331 case OP_SD:
332 if (insn.IType.rt != _R_RA || insn.IType.rs != _R_SP) {
333 errstr = "SD";
334 goto out;
335 }
336 break;
337 #else
338 case OP_LW:
339 if ((used & (1 << insn.IType.rs)) == 0) {
340 errstr = "LW";
341 goto out;
342 }
343 regs[insn.IType.rt] = *(const int32_t *)
344 ((intptr_t)regs[insn.IType.rs]
345 + (int16_t)insn.IType.imm);
346 used |= (1 << insn.IType.rt);
347 break;
348 case OP_SW:
349 if (insn.IType.rt != _R_RA || insn.IType.rs != _R_SP) {
350 errstr = "SW";
351 goto out;
352 }
353 break;
354 #endif
355 case OP_ORI:
356 if ((used & (1 << insn.IType.rs)) == 0) {
357 errstr = "ORI";
358 goto out;
359 }
360 regs[insn.IType.rt] |= insn.IType.imm;
361 used |= (1 << insn.IType.rt);
362 break;
363 case OP_COP0:
364 switch (insn.RType.rs) {
365 case OP_DMT:
366 if (insn.RType.rd != 22) {
367 errstr = "dmtc0 dst";
368 goto out;
369 }
370 if ((used & (1 << insn.RType.rt)) == 0) {
371 errstr = "dmtc0 src";
372 goto out;
373 }
374 break;
375 default:
376 errstr = "COP0";
377 goto out;
378 }
379 break;
380 #ifdef GPROF
381 case OP_JAL:
382 if (insn.JType.target << 2 != mcount_addr) {
383 errstr = "JAL-non-_mcount";
384 goto out;
385 }
386 break;
387 #endif /* GPROF */
388 case OP_SPECIAL:
389 switch (insn.RType.func) {
390 case OP_JALR:
391 case OP_JR:
392 if ((used & (1 << insn.RType.rs)) == 0) {
393 errstr = "JR";
394 goto out;
395 }
396 if (stubp[n+1] != 0
397 && (stubp[n+1] & 0xfff0003c) != 0x0000003c
398 && stubp[n+1] != 0x00200825) {
399 n++;
400 errstr = "delay slot";
401 goto out;
402 }
403 return regs[insn.RType.rs];
404 case OP_AND:
405 if ((used & (1 << insn.RType.rs)) == 0
406 || (used & (1 << insn.RType.rt)) == 0) {
407 errstr = "AND";
408 goto out;
409 }
410 regs[insn.RType.rd] =
411 regs[insn.RType.rs] & regs[insn.RType.rt];
412 used |= (1 << insn.RType.rd);
413 break;
414 #if !defined(__mips_o32)
415 case OP_DSLL32: /* force to 32-bits */
416 case OP_DSRA32: /* force to 32-bits */
417 if (regs[insn.RType.rd] != regs[insn.RType.rt]
418 || (used & (1 << insn.RType.rt)) == 0
419 || regs[insn.RType.shamt] != 0) {
420 errstr = "AND";
421 goto out;
422 }
423 break;
424 #endif
425 case OP_SLL: /* nop */
426 if (insn.RType.rd != _R_ZERO) {
427 errstr = "NOP";
428 goto out;
429 }
430 break;
431 #ifdef GPROF
432 case OP_OR:
433 if (insn.RType.rt != 0) {
434 errstr = "NON-MOVE OR";
435 goto out;
436 }
437 if (insn.RType.rd != 1 ||
438 insn.RType.rs != 31) {
439 errstr = "NON at,ra MOVE";
440 goto out;
441 }
442 break;
443 #endif /* GPROF */
444 case OP_DSLL:
445 default:
446 errstr = "SPECIAL";
447 goto out;
448 }
449 break;
450 default:
451 errstr = "mips";
452 goto out;
453 }
454 }
455
456 out:
457 printf("%s: unexpected %s insn %#x at %p\n",
458 __func__, errstr,
459 stubp[n], &stubp[n]);
460 return 0;
461 }
462
463 void
mips_fixup_stubs(uint32_t * start,uint32_t * end)464 mips_fixup_stubs(uint32_t *start, uint32_t *end)
465 {
466 #ifdef DEBUG
467 size_t fixups_done = 0;
468 uint32_t cycles =
469 #if (MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
470 (CPUISMIPS3 ? mips3_cp0_count_read() : 0);
471 #else
472 0;
473 #endif
474 #endif
475 extern uint32_t __stub_start[], __stub_end[];
476
477 KASSERT(MIPS_KSEG0_P(start));
478 KASSERT(MIPS_KSEG0_P(end));
479 KASSERT(MIPS_KSEG0_START == (((intptr_t)start >> 28) << 28));
480
481 if (end > __stub_start)
482 end = __stub_start;
483
484 for (uint32_t *insnp = start; insnp < end; insnp++) {
485 uint32_t insn = *insnp;
486 uint32_t offset = insn & 0x03ffffff;
487 uint32_t opcode = insn >> 26;
488 const uint32_t * const stubp =
489 &((uint32_t *)(((intptr_t)insnp >> 28) << 28))[offset];
490
491 /*
492 * First we check to see if this is a jump and whether it is
493 * within the range we are interested in.
494 */
495 if ((opcode != OPCODE_J && opcode != OPCODE_JAL)
496 || stubp < __stub_start || __stub_end <= stubp)
497 continue;
498
499 const intptr_t real_addr = mips_fixup_addr(stubp);
500
501 /*
502 * If the real_addr has been set yet, don't fix up.
503 */
504 if (real_addr == 0) {
505 continue;
506 }
507 /*
508 * Verify the real destination is in the same 256MB
509 * as the location of the jump instruction.
510 */
511 KASSERT((real_addr >> 28) == ((intptr_t)insnp >> 28));
512
513 /*
514 * Now fix it up. Replace the old displacement to the stub
515 * with the real displacement.
516 */
517 struct mips_jump_fixup_info fixup = {
518 .jfi_stub = fixup_addr2offset(stubp),
519 .jfi_real = fixup_addr2offset(real_addr),
520 };
521
522 fixup_mips_jump(insnp, &fixup);
523 #ifdef DEBUG
524 fixups_done++;
525 #endif
526 }
527
528 if (sizeof(uint32_t [end - start]) > mips_cache_info.mci_picache_size)
529 mips_icache_sync_all();
530 else
531 mips_icache_sync_range((intptr_t)start,
532 sizeof(uint32_t [end - start]));
533
534 #ifdef DEBUG
535 #if (MIPS3 + MIPS4 + MIPS32 + MIPS32R2 + MIPS64 + MIPS64R2) > 0
536 if (CPUISMIPS3)
537 cycles = mips3_cp0_count_read() - cycles;
538 #endif
539 printf("%s: %zu fixup%s done in %u cycles\n", __func__,
540 fixups_done, fixups_done == 1 ? "" : "s",
541 cycles);
542 #endif
543 }
544
545 #define __stub __section(".stub")
546
547 void mips_cpu_switch_resume(struct lwp *) __stub;
548 tlb_asid_t
549 tlb_get_asid(void) __stub;
550 void tlb_set_asid(uint32_t, struct pmap *) __stub;
551 void tlb_invalidate_all(void) __stub;
552 void tlb_invalidate_globals(void) __stub;
553 void tlb_invalidate_asids(uint32_t, uint32_t) __stub;
554 void tlb_invalidate_addr(vaddr_t, tlb_asid_t) __stub;
555 u_int tlb_record_asids(u_long *, uint32_t) __stub;
556 bool tlb_update_addr(vaddr_t, tlb_asid_t, pt_entry_t, bool)
557 __stub;
558 void tlb_read_entry(size_t, struct tlbmask *) __stub;
559 void tlb_write_entry(size_t, const struct tlbmask *) __stub;
560
561 /*
562 * wbflush isn't a stub since it gets overridden quite late
563 * (after mips_vector_init returns).
564 */
565 void wbflush(void) /*__stub*/;
566
567 void
mips_cpu_switch_resume(struct lwp * l)568 mips_cpu_switch_resume(struct lwp *l)
569 {
570 (*mips_locore_jumpvec.ljv_cpu_switch_resume)(l);
571 }
572
573 tlb_asid_t
tlb_get_asid(void)574 tlb_get_asid(void)
575 {
576 return (*mips_locore_jumpvec.ljv_tlb_get_asid)();
577 }
578
579 void
tlb_set_asid(uint32_t asid,struct pmap * pm)580 tlb_set_asid(uint32_t asid, struct pmap *pm)
581 {
582 (*mips_locore_jumpvec.ljv_tlb_set_asid)(asid);
583 }
584
585 void
tlb_invalidate_all(void)586 tlb_invalidate_all(void)
587 {
588 (*mips_locore_jumpvec.ljv_tlb_invalidate_all)();
589 }
590
591 void
tlb_invalidate_addr(vaddr_t va,tlb_asid_t asid)592 tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid)
593 {
594 (*mips_locore_jumpvec.ljv_tlb_invalidate_addr)(va, asid);
595 }
596
597 void
tlb_invalidate_globals(void)598 tlb_invalidate_globals(void)
599 {
600 (*mips_locore_jumpvec.ljv_tlb_invalidate_globals)();
601 }
602
603 void
tlb_invalidate_asids(uint32_t asid_lo,uint32_t asid_hi)604 tlb_invalidate_asids(uint32_t asid_lo, uint32_t asid_hi)
605 {
606 (*mips_locore_jumpvec.ljv_tlb_invalidate_asids)(asid_lo, asid_hi);
607 }
608
609 u_int
tlb_record_asids(u_long * bitmap,tlb_asid_t asid_max)610 tlb_record_asids(u_long *bitmap, tlb_asid_t asid_max)
611 {
612 return (*mips_locore_jumpvec.ljv_tlb_record_asids)(bitmap, asid_max);
613 }
614
615 #if 0
616 bool
617 tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert)
618 {
619 return (*mips_locore_jumpvec.ljv_tlb_update_addr)(va, asid, pte, insert);
620 }
621 #endif
622
623 void
tlb_read_entry(size_t tlbno,struct tlbmask * tlb)624 tlb_read_entry(size_t tlbno, struct tlbmask *tlb)
625 {
626 (*mips_locore_jumpvec.ljv_tlb_read_entry)(tlbno, tlb);
627 }
628
629 void
tlb_write_entry(size_t tlbno,const struct tlbmask * tlb)630 tlb_write_entry(size_t tlbno, const struct tlbmask *tlb)
631 {
632 (*mips_locore_jumpvec.ljv_tlb_write_entry)(tlbno, tlb);
633 }
634
635 void
wbflush(void)636 wbflush(void)
637 {
638 (*mips_locoresw.lsw_wbflush)();
639 }
640
641 #ifndef LOCKDEBUG
642 void mutex_enter(kmutex_t *mtx) __stub;
643 void mutex_exit(kmutex_t *mtx) __stub;
644 void mutex_spin_enter(kmutex_t *mtx) __stub;
645 void mutex_spin_exit(kmutex_t *mtx) __stub;
646
647 void
mutex_enter(kmutex_t * mtx)648 mutex_enter(kmutex_t *mtx)
649 {
650
651 (*mips_locore_atomicvec.lav_mutex_enter)(mtx);
652 }
653
654 void
mutex_exit(kmutex_t * mtx)655 mutex_exit(kmutex_t *mtx)
656 {
657
658 (*mips_locore_atomicvec.lav_mutex_exit)(mtx);
659 }
660
661 void
mutex_spin_enter(kmutex_t * mtx)662 mutex_spin_enter(kmutex_t *mtx)
663 {
664
665 (*mips_locore_atomicvec.lav_mutex_spin_enter)(mtx);
666 }
667
668 void
mutex_spin_exit(kmutex_t * mtx)669 mutex_spin_exit(kmutex_t *mtx)
670 {
671
672 (*mips_locore_atomicvec.lav_mutex_spin_exit)(mtx);
673 }
674 #endif /* !LOCKDEBUG */
675
676 u_int _atomic_cas_uint(volatile u_int *, u_int, u_int) __stub;
677 u_long _atomic_cas_ulong(volatile u_long *, u_long, u_long) __stub;
678
679 u_int
_atomic_cas_uint(volatile u_int * ptr,u_int old,u_int new)680 _atomic_cas_uint(volatile u_int *ptr, u_int old, u_int new)
681 {
682
683 return (*mips_locore_atomicvec.lav_atomic_cas_uint)(ptr, old, new);
684 }
685
686 u_long
_atomic_cas_ulong(volatile u_long * ptr,u_long old,u_long new)687 _atomic_cas_ulong(volatile u_long *ptr, u_long old, u_long new)
688 {
689
690 return (*mips_locore_atomicvec.lav_atomic_cas_ulong)(ptr, old, new);
691 }
692
693 __strong_alias(atomic_cas_uint, _atomic_cas_uint)
694 __strong_alias(atomic_cas_uint_ni, _atomic_cas_uint)
695 __strong_alias(_atomic_cas_32, _atomic_cas_uint)
696 __strong_alias(_atomic_cas_32_ni, _atomic_cas_uint)
697 __strong_alias(atomic_cas_32, _atomic_cas_uint)
698 __strong_alias(atomic_cas_32_ni, _atomic_cas_uint)
699 __strong_alias(atomic_cas_ptr, _atomic_cas_ulong)
700 __strong_alias(atomic_cas_ptr_ni, _atomic_cas_ulong)
701 __strong_alias(atomic_cas_ulong, _atomic_cas_ulong)
702 __strong_alias(atomic_cas_ulong_ni, _atomic_cas_ulong)
703 #ifdef _LP64
704 __strong_alias(atomic_cas_64, _atomic_cas_ulong)
705 __strong_alias(atomic_cas_64_ni, _atomic_cas_ulong)
706 __strong_alias(_atomic_cas_64, _atomic_cas_ulong)
707 __strong_alias(_atomic_cas_64_ni, _atomic_cas_ulong)
708 #endif
709
710 int __ucas_32(volatile uint32_t *, uint32_t, uint32_t, uint32_t *) __stub;
711 int
__ucas_32(volatile uint32_t * ptr,uint32_t old,uint32_t new,uint32_t * retp)712 __ucas_32(volatile uint32_t *ptr, uint32_t old, uint32_t new, uint32_t *retp)
713 {
714
715 return (*mips_locore_atomicvec.lav_ucas_32)(ptr, old, new, retp);
716 }
717 __strong_alias(_ucas_32,__ucas_32);
718
719 #ifdef _LP64
720 int __ucas_64(volatile uint64_t *, uint64_t, uint64_t, uint64_t *) __stub;
721 int
__ucas_64(volatile uint64_t * ptr,uint64_t old,uint64_t new,uint64_t * retp)722 __ucas_64(volatile uint64_t *ptr, uint64_t old, uint64_t new, uint64_t *retp)
723 {
724
725 return (*mips_locore_atomicvec.lav_ucas_64)(ptr, old, new, retp);
726 }
727 __strong_alias(_ucas_64,__ucas_64);
728 #endif /* _LP64 */
729