1 /* $NetBSD: octeon_intr.c,v 1.27 2022/04/09 23:34:40 riastradh Exp $ */
2 /*
3 * Copyright 2001, 2002 Wasabi Systems, Inc.
4 * All rights reserved.
5 *
6 * Written by Jason R. Thorpe and Simon Burge for Wasabi Systems, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed for the NetBSD Project by
19 * Wasabi Systems, Inc.
20 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
21 * or promote products derived from this software without specific prior
22 * written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 /*
38 * Platform-specific interrupt support for the MIPS Malta.
39 */
40
41 #include "opt_multiprocessor.h"
42
43 #include "cpunode.h"
44 #define __INTR_PRIVATE
45
46 #include <sys/cdefs.h>
47 __KERNEL_RCSID(0, "$NetBSD: octeon_intr.c,v 1.27 2022/04/09 23:34:40 riastradh Exp $");
48
49 #include <sys/param.h>
50 #include <sys/cpu.h>
51 #include <sys/systm.h>
52 #include <sys/device.h>
53 #include <sys/intr.h>
54 #include <sys/kernel.h>
55 #include <sys/kmem.h>
56 #include <sys/atomic.h>
57 #include <sys/xcall.h>
58
59 #include <lib/libkern/libkern.h>
60
61 #include <mips/locore.h>
62
63 #include <mips/cavium/dev/octeon_ciureg.h>
64 #include <mips/cavium/octeonvar.h>
65
66 /*
67 * XXX:
68 * Force all interrupts (except clock intrs and IPIs) to be routed
69 * through cpu0 until MP on MIPS is more stable.
70 */
71 #define OCTEON_CPU0_INTERRUPTS
72
73
74 /*
75 * This is a mask of bits to clear in the SR when we go to a
76 * given hardware interrupt priority level.
77 */
78 static const struct ipl_sr_map octeon_ipl_sr_map = {
79 .sr_bits = {
80 [IPL_NONE] = 0,
81 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0,
82 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK,
83 [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0,
84 [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
85 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
86 [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
87 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
88 [IPL_HIGH] = MIPS_INT_MASK,
89 },
90 };
91
92 static const char * octeon_intrnames[NIRQS] = {
93 "workq 0",
94 "workq 1",
95 "workq 2",
96 "workq 3",
97 "workq 4",
98 "workq 5",
99 "workq 6",
100 "workq 7",
101 "workq 8",
102 "workq 9",
103 "workq 10",
104 "workq 11",
105 "workq 12",
106 "workq 13",
107 "workq 14",
108 "workq 15",
109 "gpio 0",
110 "gpio 1",
111 "gpio 2",
112 "gpio 3",
113 "gpio 4",
114 "gpio 5",
115 "gpio 6",
116 "gpio 7",
117 "gpio 8",
118 "gpio 9",
119 "gpio 10",
120 "gpio 11",
121 "gpio 12",
122 "gpio 13",
123 "gpio 14",
124 "gpio 15",
125 "mbox 0-15",
126 "mbox 16-31",
127 "uart 0",
128 "uart 1",
129 "pci inta",
130 "pci intb",
131 "pci intc",
132 "pci intd",
133 "pci msi 0-15",
134 "pci msi 16-31",
135 "pci msi 32-47",
136 "pci msi 48-63",
137 "wdog summary",
138 "twsi",
139 "rml",
140 "trace",
141 "gmx drop",
142 "reserved",
143 "ipd drop",
144 "reserved",
145 "timer 0",
146 "timer 1",
147 "timer 2",
148 "timer 3",
149 "usb",
150 "pcm/tdm",
151 "mpi/spi",
152 "reserved",
153 "reserved",
154 "reserved",
155 "reserved",
156 "reserved",
157 };
158
159 struct octeon_intrhand {
160 int (*ih_func)(void *);
161 void *ih_arg;
162 int ih_irq;
163 int ih_ipl;
164 };
165
166 #ifdef MULTIPROCESSOR
167 static int octeon_send_ipi(struct cpu_info *, int);
168 static int octeon_ipi_intr(void *);
169
170 static struct octeon_intrhand ipi_intrhands[2] = {
171 [0] = {
172 .ih_func = octeon_ipi_intr,
173 .ih_arg = (void *)(uintptr_t)__BITS(15,0),
174 .ih_irq = CIU_INT_MBOX_15_0,
175 .ih_ipl = IPL_HIGH,
176 },
177 [1] = {
178 .ih_func = octeon_ipi_intr,
179 .ih_arg = (void *)(uintptr_t)__BITS(31,16),
180 .ih_irq = CIU_INT_MBOX_31_16,
181 .ih_ipl = IPL_SCHED,
182 },
183 };
184
185 static int ipi_prio[NIPIS] = {
186 [IPI_NOP] = IPL_HIGH,
187 [IPI_AST] = IPL_HIGH,
188 [IPI_SHOOTDOWN] = IPL_SCHED,
189 [IPI_SYNCICACHE] = IPL_HIGH,
190 [IPI_KPREEMPT] = IPL_HIGH,
191 [IPI_SUSPEND] = IPL_HIGH,
192 [IPI_HALT] = IPL_HIGH,
193 [IPI_XCALL] = IPL_HIGH,
194 [IPI_GENERIC] = IPL_HIGH,
195 [IPI_WDOG] = IPL_HIGH,
196 };
197
198 #endif
199
200 static struct octeon_intrhand *octciu_intrs[NIRQS] = {
201 #ifdef MULTIPROCESSOR
202 [CIU_INT_MBOX_15_0] = &ipi_intrhands[0],
203 [CIU_INT_MBOX_31_16] = &ipi_intrhands[1],
204 #endif
205 };
206
207 static kmutex_t octeon_intr_lock;
208
209 #if defined(MULTIPROCESSOR)
210 #define OCTEON_NCPU MAXCPUS
211 #else
212 #define OCTEON_NCPU 1
213 #endif
214
215 struct cpu_softc octeon_cpu_softc[OCTEON_NCPU];
216
217 static void
octeon_intr_setup(void)218 octeon_intr_setup(void)
219 {
220 struct cpu_softc *cpu;
221 int cpunum;
222
223 #define X(a) MIPS_PHYS_TO_XKPHYS(OCTEON_CCA_NONE, (a))
224
225 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
226 cpu = &octeon_cpu_softc[cpunum];
227
228 cpu->cpu_ip2_sum0 = X(CIU_IP2_SUM0(cpunum));
229 cpu->cpu_ip3_sum0 = X(CIU_IP3_SUM0(cpunum));
230 cpu->cpu_ip4_sum0 = X(CIU_IP4_SUM0(cpunum));
231
232 cpu->cpu_int_sum1 = X(CIU_INT_SUM1);
233
234 cpu->cpu_ip2_en[0] = X(CIU_IP2_EN0(cpunum));
235 cpu->cpu_ip3_en[0] = X(CIU_IP3_EN0(cpunum));
236 cpu->cpu_ip4_en[0] = X(CIU_IP4_EN0(cpunum));
237
238 cpu->cpu_ip2_en[1] = X(CIU_IP2_EN1(cpunum));
239 cpu->cpu_ip3_en[1] = X(CIU_IP3_EN1(cpunum));
240 cpu->cpu_ip4_en[1] = X(CIU_IP4_EN1(cpunum));
241
242 cpu->cpu_wdog = X(CIU_WDOG(cpunum));
243 cpu->cpu_pp_poke = X(CIU_PP_POKE(cpunum));
244
245 #ifdef MULTIPROCESSOR
246 cpu->cpu_mbox_set = X(CIU_MBOX_SET(cpunum));
247 cpu->cpu_mbox_clr = X(CIU_MBOX_CLR(cpunum));
248 #endif
249 }
250
251 #undef X
252
253 }
254
255 void
octeon_intr_init(struct cpu_info * ci)256 octeon_intr_init(struct cpu_info *ci)
257 {
258 const int cpunum = cpu_index(ci);
259 struct cpu_softc *cpu = &octeon_cpu_softc[cpunum];
260 const char * const xname = cpu_name(ci);
261 int bank;
262
263 cpu->cpu_ci = ci;
264 ci->ci_softc = cpu;
265
266 KASSERT(cpunum == ci->ci_cpuid);
267
268 if (ci->ci_cpuid == 0) {
269 ipl_sr_map = octeon_ipl_sr_map;
270 mutex_init(&octeon_intr_lock, MUTEX_DEFAULT, IPL_HIGH);
271 #ifdef MULTIPROCESSOR
272 mips_locoresw.lsw_send_ipi = octeon_send_ipi;
273 #endif
274
275 octeon_intr_setup();
276 }
277
278 #ifdef MULTIPROCESSOR
279 // Enable the IPIs
280 cpu->cpu_ip4_enable[0] |= __BIT(CIU_INT_MBOX_15_0);
281 cpu->cpu_ip3_enable[0] |= __BIT(CIU_INT_MBOX_31_16);
282 #endif
283
284 if (ci->ci_dev) {
285 for (bank = 0; bank < NBANKS; bank++) {
286 aprint_verbose_dev(ci->ci_dev,
287 "enabling intr masks %u "
288 " %#"PRIx64"/%#"PRIx64"/%#"PRIx64"\n",
289 bank,
290 cpu->cpu_ip2_enable[bank],
291 cpu->cpu_ip3_enable[bank],
292 cpu->cpu_ip4_enable[bank]);
293 }
294 }
295
296 for (bank = 0; bank < NBANKS; bank++) {
297 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
298 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
299 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
300 }
301
302 #ifdef MULTIPROCESSOR
303 mips3_sd(cpu->cpu_mbox_clr, __BITS(31,0));
304 #endif
305
306 for (int i = 0; i < NIRQS; i++) {
307 if (octeon_intrnames[i] == NULL)
308 octeon_intrnames[i] = kmem_asprintf("irq %d", i);
309 evcnt_attach_dynamic(&cpu->cpu_intr_evs[i],
310 EVCNT_TYPE_INTR, NULL, xname, octeon_intrnames[i]);
311 }
312 }
313
314 void
octeon_cal_timer(int corefreq)315 octeon_cal_timer(int corefreq)
316 {
317 /* Compute the number of cycles per second. */
318 curcpu()->ci_cpu_freq = corefreq;
319
320 /* Compute the number of ticks for hz. */
321 curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz;
322
323 /* Compute the delay divisor and reciprical. */
324 curcpu()->ci_divisor_delay =
325 ((curcpu()->ci_cpu_freq + 500000) / 1000000);
326 #if 0
327 MIPS_SET_CI_RECIPRICAL(curcpu());
328 #endif
329
330 mips3_cp0_count_write(0);
331 mips3_cp0_compare_write(0);
332 }
333
334 void *
octeon_intr_establish(int irq,int ipl,int (* func)(void *),void * arg)335 octeon_intr_establish(int irq, int ipl, int (*func)(void *), void *arg)
336 {
337 struct octeon_intrhand *ih;
338 struct cpu_softc *cpu;
339 #ifndef OCTEON_CPU0_INTERRUPTS
340 int cpunum;
341 #endif
342
343 if (irq >= NIRQS)
344 panic("octeon_intr_establish: bogus IRQ %d", irq);
345 if (ipl < IPL_VM)
346 panic("octeon_intr_establish: bogus IPL %d", ipl);
347
348 ih = kmem_zalloc(sizeof(*ih), KM_NOSLEEP);
349 if (ih == NULL)
350 return (NULL);
351
352 ih->ih_func = func;
353 ih->ih_arg = arg;
354 ih->ih_irq = irq;
355 ih->ih_ipl = ipl;
356
357 mutex_enter(&octeon_intr_lock);
358
359 /*
360 * First, make it known.
361 */
362 KASSERTMSG(octciu_intrs[irq] == NULL, "irq %d in use! (%p)",
363 irq, octciu_intrs[irq]);
364
365 atomic_store_release(&octciu_intrs[irq], ih);
366
367 /*
368 * Now enable it.
369 */
370 const int bank = irq / 64;
371 const uint64_t irq_mask = __BIT(irq % 64);
372
373 switch (ipl) {
374 case IPL_VM:
375 cpu = &octeon_cpu_softc[0];
376 cpu->cpu_ip2_enable[bank] |= irq_mask;
377 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
378 break;
379
380 case IPL_SCHED:
381 #ifdef OCTEON_CPU0_INTERRUPTS
382 cpu = &octeon_cpu_softc[0];
383 cpu->cpu_ip3_enable[bank] |= irq_mask;
384 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
385 #else /* OCTEON_CPU0_INTERRUPTS */
386 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
387 cpu = &octeon_cpu_softc[cpunum];
388 if (cpu->cpu_ci == NULL)
389 break;
390 cpu->cpu_ip3_enable[bank] |= irq_mask;
391 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
392 }
393 #endif /* OCTEON_CPU0_INTERRUPTS */
394 break;
395
396 case IPL_DDB:
397 case IPL_HIGH:
398 #ifdef OCTEON_CPU0_INTERRUPTS
399 cpu = &octeon_cpu_softc[0];
400 cpu->cpu_ip4_enable[bank] |= irq_mask;
401 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
402 #else /* OCTEON_CPU0_INTERRUPTS */
403 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
404 cpu = &octeon_cpu_softc[cpunum];
405 if (cpu->cpu_ci == NULL)
406 break;
407 cpu->cpu_ip4_enable[bank] |= irq_mask;
408 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
409 }
410 #endif /* OCTEON_CPU0_INTERRUPTS */
411 break;
412 }
413
414 mutex_exit(&octeon_intr_lock);
415
416 return ih;
417 }
418
419 void
octeon_intr_disestablish(void * cookie)420 octeon_intr_disestablish(void *cookie)
421 {
422 struct octeon_intrhand * const ih = cookie;
423 struct cpu_softc *cpu;
424 const int irq = ih->ih_irq & (NIRQS-1);
425 const int ipl = ih->ih_ipl;
426 int cpunum;
427
428 mutex_enter(&octeon_intr_lock);
429
430 /*
431 * First disable it.
432 */
433 const int bank = irq / 64;
434 const uint64_t irq_mask = ~__BIT(irq % 64);
435
436 switch (ipl) {
437 case IPL_VM:
438 cpu = &octeon_cpu_softc[0];
439 cpu->cpu_ip2_enable[bank] &= ~irq_mask;
440 mips3_sd(cpu->cpu_ip2_en[bank], cpu->cpu_ip2_enable[bank]);
441 break;
442
443 case IPL_SCHED:
444 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
445 cpu = &octeon_cpu_softc[cpunum];
446 if (cpu->cpu_ci == NULL)
447 break;
448 cpu->cpu_ip3_enable[bank] &= ~irq_mask;
449 mips3_sd(cpu->cpu_ip3_en[bank], cpu->cpu_ip3_enable[bank]);
450 }
451 break;
452
453 case IPL_DDB:
454 case IPL_HIGH:
455 for (cpunum = 0; cpunum < OCTEON_NCPU; cpunum++) {
456 cpu = &octeon_cpu_softc[cpunum];
457 if (cpu->cpu_ci == NULL)
458 break;
459 cpu->cpu_ip4_enable[bank] &= ~irq_mask;
460 mips3_sd(cpu->cpu_ip4_en[bank], cpu->cpu_ip4_enable[bank]);
461 }
462 break;
463 }
464
465 atomic_store_relaxed(&octciu_intrs[irq], NULL);
466
467 mutex_exit(&octeon_intr_lock);
468
469 /*
470 * Wait until the interrupt handler is no longer running on all
471 * CPUs before freeing ih and returning.
472 */
473 xc_barrier(0);
474 kmem_free(ih, sizeof(*ih));
475 }
476
477 void
octeon_iointr(int ipl,vaddr_t pc,uint32_t ipending)478 octeon_iointr(int ipl, vaddr_t pc, uint32_t ipending)
479 {
480 struct cpu_info * const ci = curcpu();
481 struct cpu_softc * const cpu = ci->ci_softc;
482 int bank;
483
484 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
485 KASSERT((ipending & ~MIPS_INT_MASK) == 0);
486 KASSERT(ipending & MIPS_HARD_INT_MASK);
487 uint64_t hwpend[2] = { 0, 0 };
488
489 const uint64_t sum1 = mips3_ld(cpu->cpu_int_sum1);
490
491 if (ipending & MIPS_INT_MASK_2) {
492 hwpend[0] = mips3_ld(cpu->cpu_ip4_sum0)
493 & cpu->cpu_ip4_enable[0];
494 hwpend[1] = sum1 & cpu->cpu_ip4_enable[1];
495 } else if (ipending & MIPS_INT_MASK_1) {
496 hwpend[0] = mips3_ld(cpu->cpu_ip3_sum0)
497 & cpu->cpu_ip3_enable[0];
498 hwpend[1] = sum1 & cpu->cpu_ip3_enable[1];
499 } else if (ipending & MIPS_INT_MASK_0) {
500 hwpend[0] = mips3_ld(cpu->cpu_ip2_sum0)
501 & cpu->cpu_ip2_enable[0];
502 hwpend[1] = sum1 & cpu->cpu_ip2_enable[1];
503 } else {
504 panic("octeon_iointr: unexpected ipending %#x", ipending);
505 }
506 for (bank = 0; bank <= 1; bank++) {
507 while (hwpend[bank] != 0) {
508 const int bit = ffs64(hwpend[bank]) - 1;
509 const int irq = (bank * 64) + bit;
510 hwpend[bank] &= ~__BIT(bit);
511
512 struct octeon_intrhand * const ih =
513 atomic_load_consume(&octciu_intrs[irq]);
514 cpu->cpu_intr_evs[irq].ev_count++;
515 if (__predict_true(ih != NULL)) {
516 #ifdef MULTIPROCESSOR
517 if (ipl == IPL_VM) {
518 KERNEL_LOCK(1, NULL);
519 #endif
520 (*ih->ih_func)(ih->ih_arg);
521 #ifdef MULTIPROCESSOR
522 KERNEL_UNLOCK_ONE(NULL);
523 } else {
524 (*ih->ih_func)(ih->ih_arg);
525 }
526 #endif
527 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
528 }
529 }
530 }
531 KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE);
532 }
533
534 #ifdef MULTIPROCESSOR
535 __CTASSERT(NIPIS < 16);
536
537 int
octeon_ipi_intr(void * arg)538 octeon_ipi_intr(void *arg)
539 {
540 struct cpu_info * const ci = curcpu();
541 struct cpu_softc * const cpu = ci->ci_softc;
542 const uint32_t mbox_mask = (uintptr_t) arg;
543 uint32_t ipi_mask = mbox_mask;
544
545 KASSERTMSG((mbox_mask & __BITS(31,16)) == 0 || ci->ci_cpl >= IPL_SCHED,
546 "mbox_mask %#"PRIx32" cpl %d", mbox_mask, ci->ci_cpl);
547
548 ipi_mask &= mips3_ld(cpu->cpu_mbox_set);
549 if (ipi_mask == 0)
550 return 0;
551 membar_acquire();
552
553 mips3_sd(cpu->cpu_mbox_clr, ipi_mask);
554
555 KASSERT(__SHIFTOUT(ipi_mask, mbox_mask) < __BIT(NIPIS));
556
557 #if NWDOG > 0
558 // Handle WDOG requests ourselves.
559 if (ipi_mask & __BIT(IPI_WDOG)) {
560 softint_schedule(cpu->cpu_wdog_sih);
561 atomic_and_64(&ci->ci_request_ipis, ~__BIT(IPI_WDOG));
562 ipi_mask &= ~__BIT(IPI_WDOG);
563 ci->ci_evcnt_per_ipi[IPI_WDOG].ev_count++;
564 if (__predict_true(ipi_mask == 0))
565 return 1;
566 }
567 #endif
568
569 /* if the request is clear, it was previously processed */
570 if ((atomic_load_relaxed(&ci->ci_request_ipis) & ipi_mask) == 0)
571 return 0;
572 membar_acquire();
573
574 atomic_or_64(&ci->ci_active_ipis, ipi_mask);
575 atomic_and_64(&ci->ci_request_ipis, ~ipi_mask);
576
577 ipi_process(ci, __SHIFTOUT(ipi_mask, mbox_mask));
578
579 atomic_and_64(&ci->ci_active_ipis, ~ipi_mask);
580
581 return 1;
582 }
583
584 int
octeon_send_ipi(struct cpu_info * ci,int req)585 octeon_send_ipi(struct cpu_info *ci, int req)
586 {
587 KASSERT(req < NIPIS);
588 if (ci == NULL) {
589 CPU_INFO_ITERATOR cii;
590 for (CPU_INFO_FOREACH(cii, ci)) {
591 if (ci != curcpu()) {
592 octeon_send_ipi(ci, req);
593 }
594 }
595 return 0;
596 }
597 KASSERT(cold || ci->ci_softc != NULL);
598 if (ci->ci_softc == NULL)
599 return -1;
600
601 struct cpu_softc * const cpu = ci->ci_softc;
602 const u_int ipi_shift = ipi_prio[req] == IPL_SCHED ? 16 : 0;
603 const uint32_t ipi_mask = __BIT(req + ipi_shift);
604
605 membar_release();
606 atomic_or_64(&ci->ci_request_ipis, ipi_mask);
607
608 membar_release();
609 mips3_sd(cpu->cpu_mbox_set, ipi_mask);
610
611 return 0;
612 }
613 #endif /* MULTIPROCESSOR */
614