xref: /openbsd-src/sys/arch/octeon/dev/octciu.c (revision cb5217fba3727742f326b0c20baed47113c0e1ef)
1 /*	$OpenBSD: octciu.c,v 1.10 2018/02/24 11:42:31 visa Exp $	*/
2 
3 /*
4  * Copyright (c) 2000-2004 Opsycon AB  (www.opsycon.se)
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
16  * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
19  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  */
28 
29 /*
30  * Driver for OCTEON Central Interrupt Unit (CIU).
31  *
32  * CIU is present at least on CN3xxx, CN5xxx, CN60xx, CN61xx,
33  * CN70xx, and CN71xx.
34  */
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/atomic.h>
39 #include <sys/conf.h>
40 #include <sys/device.h>
41 #include <sys/kernel.h>
42 #include <sys/malloc.h>
43 
44 #include <dev/ofw/fdt.h>
45 #include <dev/ofw/openfirm.h>
46 
47 #include <mips64/mips_cpu.h>
48 
49 #include <machine/autoconf.h>
50 #include <machine/fdt.h>
51 #include <machine/intr.h>
52 #include <machine/octeonreg.h>
53 
54 #define OCTCIU_NINTS 192
55 
56 #define INTPRI_CIU_0	(INTPRI_CLOCK + 1)
57 #define INTPRI_CIU_1	(INTPRI_CLOCK + 2)
58 
59 struct intrbank {
60 	uint64_t	en;		/* enable mask register */
61 	uint64_t	sum;		/* service request register */
62 	int		id;		/* bank number */
63 };
64 
65 #define NBANKS		3
66 #define BANK_SIZE	64
67 #define IRQ_TO_BANK(x)	((x) >> 6)
68 #define IRQ_TO_BIT(x)	((x) & 0x3f)
69 
70 #define IS_WORKQ_IRQ(x)	((unsigned int)(x) < 16)
71 
72 struct octciu_cpu {
73 	struct intrbank		 scpu_ibank[NBANKS];
74 	uint64_t		 scpu_intem[NBANKS];
75 	uint64_t		 scpu_imask[NIPLS][NBANKS];
76 };
77 
78 struct octciu_softc {
79 	struct device		 sc_dev;
80 	bus_space_tag_t		 sc_iot;
81 	bus_space_handle_t	 sc_ioh;
82 	struct octciu_cpu	 sc_cpu[MAXCPUS];
83 	struct intrhand		*sc_intrhand[OCTCIU_NINTS];
84 	unsigned int		 sc_nbanks;
85 
86 	int			(*sc_ipi_handler)(void *);
87 
88 	struct intr_controller	 sc_ic;
89 };
90 
91 int	 octciu_match(struct device *, void *, void *);
92 void	 octciu_attach(struct device *, struct device *, void *);
93 
94 void	 octciu_init(void);
95 void	 octciu_intr_makemasks(struct octciu_softc *);
96 uint32_t octciu_intr0(uint32_t, struct trapframe *);
97 uint32_t octciu_intr2(uint32_t, struct trapframe *);
98 uint32_t octciu_intr_bank(struct octciu_softc *, struct intrbank *,
99 	    struct trapframe *);
100 void	*octciu_intr_establish(int, int, int (*)(void *), void *,
101 	    const char *);
102 void	*octciu_intr_establish_fdt_idx(void *, int, int, int,
103 	    int (*)(void *), void *, const char *);
104 void	 octciu_intr_disestablish(void *);
105 void	 octciu_splx(int);
106 
107 uint32_t octciu_ipi_intr(uint32_t, struct trapframe *);
108 int	 octciu_ipi_establish(int (*)(void *), cpuid_t);
109 void	 octciu_ipi_set(cpuid_t);
110 void	 octciu_ipi_clear(cpuid_t);
111 
112 const struct cfattach octciu_ca = {
113 	sizeof(struct octciu_softc), octciu_match, octciu_attach
114 };
115 
116 struct cfdriver octciu_cd = {
117 	NULL, "octciu", DV_DULL
118 };
119 
120 struct octciu_softc	*octciu_sc;
121 
122 int
123 octciu_match(struct device *parent, void *match, void *aux)
124 {
125 	struct fdt_attach_args *faa = aux;
126 
127 	return OF_is_compatible(faa->fa_node, "cavium,octeon-3860-ciu");
128 }
129 
130 void
131 octciu_attach(struct device *parent, struct device *self, void *aux)
132 {
133 	struct fdt_attach_args *faa = aux;
134 	struct octciu_softc *sc = (struct octciu_softc *)self;
135 
136 	if (faa->fa_nreg != 1) {
137 		printf(": expected one IO space, got %d\n", faa->fa_nreg);
138 		return;
139 	}
140 
141 	sc->sc_iot = faa->fa_iot;
142 	if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, faa->fa_reg[0].size,
143 	    0, &sc->sc_ioh)) {
144 		printf(": could not map IO space\n");
145 		return;
146 	}
147 
148 	if (octeon_ver == OCTEON_2 || octeon_ver == OCTEON_3)
149 		sc->sc_nbanks = 3;
150 	else
151 		sc->sc_nbanks = 2;
152 
153 	printf("\n");
154 
155 	sc->sc_ic.ic_cookie = sc;
156 	sc->sc_ic.ic_node = faa->fa_node;
157 	sc->sc_ic.ic_init = octciu_init;
158 	sc->sc_ic.ic_establish = octciu_intr_establish;
159 	sc->sc_ic.ic_establish_fdt_idx = octciu_intr_establish_fdt_idx;
160 	sc->sc_ic.ic_disestablish = octciu_intr_disestablish;
161 #ifdef MULTIPROCESSOR
162 	sc->sc_ic.ic_ipi_establish = octciu_ipi_establish;
163 	sc->sc_ic.ic_ipi_set = octciu_ipi_set;
164 	sc->sc_ic.ic_ipi_clear = octciu_ipi_clear;
165 #endif
166 
167 	octciu_sc = sc;
168 
169 	set_intr(INTPRI_CIU_0, CR_INT_0, octciu_intr0);
170 	if (sc->sc_nbanks == 3)
171 		set_intr(INTPRI_CIU_1, CR_INT_2, octciu_intr2);
172 #ifdef MULTIPROCESSOR
173 	set_intr(INTPRI_IPI, CR_INT_1, octciu_ipi_intr);
174 #endif
175 
176 	octciu_init();
177 
178 	register_splx_handler(octciu_splx);
179 	octeon_intr_register(&sc->sc_ic);
180 }
181 
182 void
183 octciu_init(void)
184 {
185 	struct octciu_softc *sc = octciu_sc;
186 	struct octciu_cpu *scpu;
187 	int cpuid = cpu_number();
188 	int s;
189 
190 	scpu = &sc->sc_cpu[cpuid];
191 
192 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP2_EN0(cpuid), 0);
193 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid), 0);
194 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP2_EN1(cpuid), 0);
195 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN1(cpuid), 0);
196 
197 	if (sc->sc_nbanks == 3)
198 		bus_space_write_8(sc->sc_iot, sc->sc_ioh,
199 		    CIU_IP4_EN2(cpuid), 0);
200 
201 	scpu->scpu_ibank[0].en = CIU_IP2_EN0(cpuid);
202 	scpu->scpu_ibank[0].sum = CIU_IP2_SUM0(cpuid);
203 	scpu->scpu_ibank[0].id = 0;
204 	scpu->scpu_ibank[1].en = CIU_IP2_EN1(cpuid);
205 	scpu->scpu_ibank[1].sum = CIU_INT32_SUM1;
206 	scpu->scpu_ibank[1].id = 1;
207 	scpu->scpu_ibank[2].en = CIU_IP4_EN2(cpuid);
208 	scpu->scpu_ibank[2].sum = CIU_IP4_SUM2(cpuid);
209 	scpu->scpu_ibank[2].id = 2;
210 
211 	s = splhigh();
212 	octciu_intr_makemasks(sc);
213 	splx(s);	/* causes hw mask update */
214 }
215 
216 void *
217 octciu_intr_establish(int irq, int level, int (*ih_fun)(void *),
218     void *ih_arg, const char *ih_what)
219 {
220 	struct octciu_softc *sc = octciu_sc;
221 	struct intrhand **p, *q, *ih;
222 	int cpuid = cpu_number();
223 	int flags;
224 	int s;
225 
226 #ifdef DIAGNOSTIC
227 	if (irq >= sc->sc_nbanks * BANK_SIZE || irq < 0)
228 		panic("%s: illegal irq %d", __func__, irq);
229 #endif
230 
231 #ifdef MULTIPROCESSOR
232 	/* Span work queue interrupts across CPUs. */
233 	if (IS_WORKQ_IRQ(irq))
234 		cpuid = irq % ncpusfound;
235 #endif
236 
237 	flags = (level & IPL_MPSAFE) ? IH_MPSAFE : 0;
238 	level &= ~IPL_MPSAFE;
239 
240 	ih = malloc(sizeof *ih, M_DEVBUF, M_NOWAIT);
241 	if (ih == NULL)
242 		return NULL;
243 
244 	ih->ih_next = NULL;
245 	ih->ih_fun = ih_fun;
246 	ih->ih_arg = ih_arg;
247 	ih->ih_level = level;
248 	ih->ih_flags = flags;
249 	ih->ih_irq = irq;
250 	ih->ih_cpuid = cpuid;
251 	evcount_attach(&ih->ih_count, ih_what, &ih->ih_irq);
252 
253 	s = splhigh();
254 
255 	/*
256 	 * Figure out where to put the handler.
257 	 * This is O(N^2), but we want to preserve the order, and N is
258 	 * generally small.
259 	 */
260 	for (p = &sc->sc_intrhand[irq]; (q = *p) != NULL; p = &q->ih_next)
261 		continue;
262 	*p = ih;
263 
264 	sc->sc_cpu[cpuid].scpu_intem[IRQ_TO_BANK(irq)] |=
265 	    1UL << IRQ_TO_BIT(irq);
266 	octciu_intr_makemasks(sc);
267 
268 	splx(s);	/* causes hw mask update */
269 
270 	return (ih);
271 }
272 
273 void *
274 octciu_intr_establish_fdt_idx(void *cookie, int node, int idx, int level,
275     int (*ih_fun)(void *), void *ih_arg, const char *ih_what)
276 {
277 	uint32_t *cells;
278 	int irq, len;
279 
280 	len = OF_getproplen(node, "interrupts");
281 	if (len / (sizeof(uint32_t) * 2) <= idx ||
282 	    len % (sizeof(uint32_t) * 2) != 0)
283 		return NULL;
284 
285 	cells = malloc(len, M_TEMP, M_NOWAIT);
286 	if (cells == NULL)
287 		return NULL;
288 
289 	OF_getpropintarray(node, "interrupts", cells, len);
290 	irq = cells[idx * 2] * BANK_SIZE + cells[idx * 2 + 1];
291 
292 	free(cells, M_TEMP, len);
293 
294 	return octciu_intr_establish(irq, level, ih_fun, ih_arg, ih_what);
295 }
296 
297 void
298 octciu_intr_disestablish(void *_ih)
299 {
300 	struct intrhand *ih = _ih;
301 	struct intrhand *p;
302 	struct octciu_softc *sc = octciu_sc;
303 	unsigned int irq = ih->ih_irq;
304 	int cpuid = cpu_number();
305 	int s;
306 
307 	KASSERT(irq < sc->sc_nbanks * BANK_SIZE);
308 	KASSERT(!IS_WORKQ_IRQ(irq));
309 
310 	s = splhigh();
311 
312 	if (ih == sc->sc_intrhand[irq]) {
313 		sc->sc_intrhand[irq] = ih->ih_next;
314 
315 		if (sc->sc_intrhand[irq] == NULL)
316 			sc->sc_cpu[cpuid].scpu_intem[IRQ_TO_BANK(irq)] &=
317 			    ~(1UL << IRQ_TO_BIT(irq));
318 	} else {
319 		for (p = sc->sc_intrhand[irq]; p != NULL; p = p->ih_next) {
320 			if (p->ih_next == ih) {
321 				p->ih_next = ih->ih_next;
322 				break;
323 			}
324 		}
325 		if (p == NULL)
326 			panic("%s: intrhand %p has not been registered",
327 			    __func__, ih);
328 	}
329 	free(ih, M_DEVBUF, sizeof(*ih));
330 
331 	octciu_intr_makemasks(sc);
332 	splx(s);	/* causes hw mask update */
333 }
334 
335 /*
336  * Recompute interrupt masks.
337  */
338 void
339 octciu_intr_makemasks(struct octciu_softc *sc)
340 {
341 	cpuid_t cpuid = cpu_number();
342 	struct octciu_cpu *scpu = &sc->sc_cpu[cpuid];
343 	struct intrhand *q;
344 	uint intrlevel[OCTCIU_NINTS];
345 	int irq, level;
346 
347 	/* First, figure out which levels each IRQ uses. */
348 	for (irq = 0; irq < OCTCIU_NINTS; irq++) {
349 		uint levels = 0;
350 		for (q = sc->sc_intrhand[irq]; q != NULL; q = q->ih_next) {
351 			if (q->ih_cpuid == cpuid)
352 				levels |= 1 << q->ih_level;
353 		}
354 		intrlevel[irq] = levels;
355 	}
356 
357 	/*
358 	 * Then figure out which IRQs use each level.
359 	 * Note that we make sure never to overwrite imask[IPL_HIGH], in
360 	 * case an interrupt occurs during intr_disestablish() and causes
361 	 * an unfortunate splx() while we are here recomputing the masks.
362 	 */
363 	for (level = IPL_NONE; level < NIPLS; level++) {
364 		uint64_t mask[NBANKS] = {};
365 		for (irq = 0; irq < OCTCIU_NINTS; irq++)
366 			if (intrlevel[irq] & (1 << level))
367 				mask[IRQ_TO_BANK(irq)] |=
368 				    1UL << IRQ_TO_BIT(irq);
369 		scpu->scpu_imask[level][0] = mask[0];
370 		scpu->scpu_imask[level][1] = mask[1];
371 		scpu->scpu_imask[level][2] = mask[2];
372 	}
373 	/*
374 	 * There are tty, network and disk drivers that use free() at interrupt
375 	 * time, so vm > (tty | net | bio).
376 	 *
377 	 * Enforce a hierarchy that gives slow devices a better chance at not
378 	 * dropping data.
379 	 */
380 #define ADD_MASK(dst, src) do {	\
381 	dst[0] |= src[0];	\
382 	dst[1] |= src[1];	\
383 	dst[2] |= src[2];	\
384 } while (0)
385 	ADD_MASK(scpu->scpu_imask[IPL_NET], scpu->scpu_imask[IPL_BIO]);
386 	ADD_MASK(scpu->scpu_imask[IPL_TTY], scpu->scpu_imask[IPL_NET]);
387 	ADD_MASK(scpu->scpu_imask[IPL_VM], scpu->scpu_imask[IPL_TTY]);
388 	ADD_MASK(scpu->scpu_imask[IPL_CLOCK], scpu->scpu_imask[IPL_VM]);
389 	ADD_MASK(scpu->scpu_imask[IPL_HIGH], scpu->scpu_imask[IPL_CLOCK]);
390 	ADD_MASK(scpu->scpu_imask[IPL_IPI], scpu->scpu_imask[IPL_HIGH]);
391 
392 	/*
393 	 * These are pseudo-levels.
394 	 */
395 	scpu->scpu_imask[IPL_NONE][0] = 0;
396 	scpu->scpu_imask[IPL_NONE][1] = 0;
397 	scpu->scpu_imask[IPL_NONE][2] = 0;
398 }
399 
400 static inline int
401 octciu_next_irq(uint64_t *isr)
402 {
403 	uint64_t irq, tmp = *isr;
404 
405 	if (tmp == 0)
406 		return -1;
407 
408 	asm volatile (
409 	"	.set push\n"
410 	"	.set mips64\n"
411 	"	dclz	%0, %0\n"
412 	"	.set pop\n"
413 	: "=r" (tmp) : "0" (tmp));
414 
415 	irq = 63u - tmp;
416 	*isr &= ~(1u << irq);
417 	return irq;
418 }
419 
420 /*
421  * Dispatch interrupts in given bank.
422  */
423 uint32_t
424 octciu_intr_bank(struct octciu_softc *sc, struct intrbank *bank,
425     struct trapframe *frame)
426 {
427 	struct cpu_info *ci = curcpu();
428 	struct intrhand *ih;
429 	struct octciu_cpu *scpu = &sc->sc_cpu[ci->ci_cpuid];
430 	uint64_t imr, isr, mask;
431 	int handled, ipl, irq;
432 #ifdef MULTIPROCESSOR
433 	register_t sr;
434 	int need_lock;
435 #endif
436 
437 	isr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, bank->sum);
438 	imr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, bank->en);
439 
440 	isr &= imr;
441 	if (isr == 0)
442 		return 0;	/* not for us */
443 
444 	/*
445 	 * Mask all pending interrupts.
446 	 */
447 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, bank->en, imr & ~isr);
448 
449 	/*
450 	 * If interrupts are spl-masked, mask them and wait for splx()
451 	 * to reenable them when necessary.
452 	 */
453 	if ((mask = isr & scpu->scpu_imask[frame->ipl][bank->id])
454 	    != 0) {
455 		isr &= ~mask;
456 		imr &= ~mask;
457 	}
458 	if (isr == 0)
459 		return 1;
460 
461 	/*
462 	 * Now process allowed interrupts.
463 	 */
464 
465 	ipl = ci->ci_ipl;
466 
467 	while ((irq = octciu_next_irq(&isr)) >= 0) {
468 		irq += bank->id * BANK_SIZE;
469 		handled = 0;
470 		for (ih = sc->sc_intrhand[irq]; ih != NULL; ih = ih->ih_next) {
471 			splraise(ih->ih_level);
472 #ifdef MULTIPROCESSOR
473 			if (ih->ih_level < IPL_IPI) {
474 				sr = getsr();
475 				ENABLEIPI();
476 			}
477 			if (ih->ih_flags & IH_MPSAFE)
478 				need_lock = 0;
479 			else
480 				need_lock = 1;
481 			if (need_lock)
482 				__mp_lock(&kernel_lock);
483 #endif
484 			if ((*ih->ih_fun)(ih->ih_arg) != 0) {
485 				handled = 1;
486 				atomic_inc_long(
487 				    (unsigned long *)&ih->ih_count.ec_count);
488 			}
489 #ifdef MULTIPROCESSOR
490 			if (need_lock)
491 				__mp_unlock(&kernel_lock);
492 			if (ih->ih_level < IPL_IPI)
493 				setsr(sr);
494 #endif
495 		}
496 		if (!handled)
497 			printf("spurious interrupt %d\n", irq);
498 	}
499 
500 	ci->ci_ipl = ipl;
501 
502 	/*
503 	 * Reenable interrupts which have been serviced.
504 	 */
505 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, bank->en, imr);
506 
507 	return 1;
508 }
509 
510 uint32_t
511 octciu_intr0(uint32_t hwpend, struct trapframe *frame)
512 {
513 	struct octciu_softc *sc = octciu_sc;
514 	struct octciu_cpu *scpu = &sc->sc_cpu[cpu_number()];
515 	int handled;
516 
517 	handled = octciu_intr_bank(sc, &scpu->scpu_ibank[0], frame);
518 	handled |= octciu_intr_bank(sc, &scpu->scpu_ibank[1], frame);
519 	return handled ? hwpend : 0;
520 }
521 
522 uint32_t
523 octciu_intr2(uint32_t hwpend, struct trapframe *frame)
524 {
525 	struct octciu_softc *sc = octciu_sc;
526 	struct octciu_cpu *scpu = &sc->sc_cpu[cpu_number()];
527 	int handled;
528 
529 	handled = octciu_intr_bank(sc, &scpu->scpu_ibank[2], frame);
530 	return handled ? hwpend : 0;
531 }
532 
533 void
534 octciu_splx(int newipl)
535 {
536 	struct cpu_info *ci = curcpu();
537 	struct octciu_softc *sc = octciu_sc;
538 	struct octciu_cpu *scpu = &sc->sc_cpu[ci->ci_cpuid];
539 
540 	ci->ci_ipl = newipl;
541 
542 	/* Set hardware masks. */
543 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, scpu->scpu_ibank[0].en,
544 	    scpu->scpu_intem[0] & ~scpu->scpu_imask[newipl][0]);
545 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, scpu->scpu_ibank[1].en,
546 	    scpu->scpu_intem[1] & ~scpu->scpu_imask[newipl][1]);
547 
548 	if (sc->sc_nbanks == 3)
549 		bus_space_write_8(sc->sc_iot, sc->sc_ioh,
550 		    scpu->scpu_ibank[2].en,
551 		    scpu->scpu_intem[2] & ~scpu->scpu_imask[newipl][2]);
552 
553 	/* If we still have softints pending trigger processing. */
554 	if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT)
555 		setsoftintr0();
556 }
557 
558 #ifdef MULTIPROCESSOR
559 uint32_t
560 octciu_ipi_intr(uint32_t hwpend, struct trapframe *frame)
561 {
562 	struct octciu_softc *sc = octciu_sc;
563 	u_long cpuid = cpu_number();
564 
565 	/*
566 	 * Mask all pending interrupts.
567 	 */
568 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid), 0);
569 
570 	if (sc->sc_ipi_handler == NULL)
571 		return hwpend;
572 
573 	sc->sc_ipi_handler((void *)cpuid);
574 
575 	/*
576 	 * Reenable interrupts which have been serviced.
577 	 */
578 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid),
579 		(1ULL << CIU_INT_MBOX0)|(1ULL << CIU_INT_MBOX1));
580 	return hwpend;
581 }
582 
583 int
584 octciu_ipi_establish(int (*func)(void *), cpuid_t cpuid)
585 {
586 	struct octciu_softc *sc = octciu_sc;
587 
588 	if (cpuid == 0)
589 		sc->sc_ipi_handler = func;
590 
591 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid),
592 		0xffffffff);
593 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_IP3_EN0(cpuid),
594 		(1ULL << CIU_INT_MBOX0)|(1ULL << CIU_INT_MBOX1));
595 
596 	return 0;
597 }
598 
599 void
600 octciu_ipi_set(cpuid_t cpuid)
601 {
602 	struct octciu_softc *sc = octciu_sc;
603 
604 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_SET(cpuid), 1);
605 }
606 
607 void
608 octciu_ipi_clear(cpuid_t cpuid)
609 {
610 	struct octciu_softc *sc = octciu_sc;
611 	uint64_t clr;
612 
613 	clr = bus_space_read_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid));
614 	bus_space_write_8(sc->sc_iot, sc->sc_ioh, CIU_MBOX_CLR(cpuid), clr);
615 }
616 #endif /* MULTIPROCESSOR */
617