xref: /netbsd-src/sys/arch/powerpc/pic/intr.c (revision 7fa608457b817eca6e0977b37f758ae064f3c99c)
1 /*	$NetBSD: intr.c,v 1.2 2007/10/17 19:56:45 garbled Exp $ */
2 
3 /*-
4  * Copyright (c) 2007 Michael Lorenz
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of The NetBSD Foundation nor the names of its
16  *    contributors may be used to endorse or promote products derived
17  *    from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.2 2007/10/17 19:56:45 garbled Exp $");
34 
35 #include "opt_multiprocessor.h"
36 
37 #include <sys/param.h>
38 #include <sys/malloc.h>
39 #include <sys/kernel.h>
40 
41 #include <uvm/uvm_extern.h>
42 
43 #include <arch/powerpc/pic/picvar.h>
44 #include "opt_pic.h"
45 #include "opt_interrupt.h"
46 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
47 #include <machine/isa_machdep.h>
48 #endif
49 
50 #ifdef MULTIPROCESSOR
51 #include <arch/powerpc/pic/ipivar.h>
52 #endif
53 
54 #define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
55 
56 #define NVIRQ		32	/* 32 virtual IRQs */
57 #define NIRQ		128	/* up to 128 HW IRQs */
58 
59 #define HWIRQ_MAX	(NVIRQ - 4 - 1)
60 #define HWIRQ_MASK	0x0fffffff
61 #define	LEGAL_VIRQ(x)	((x) >= 0 && (x) < NVIRQ)
62 
63 struct pic_ops *pics[MAX_PICS];
64 int num_pics = 0;
65 int max_base = 0;
66 uint8_t	virq[NIRQ];
67 int	virq_max = 0;
68 int	imask[NIPL];
69 int	primary_pic = 0;
70 
71 static int	fakeintr(void *);
72 static int	mapirq(uint32_t);
73 static void	intr_calculatemasks(void);
74 static struct pic_ops *find_pic_by_irq(int);
75 
76 static struct intr_source intrsources[NVIRQ];
77 
78 void
79 pic_init(void)
80 {
81 	int i;
82 
83 	for (i = 0; i < NIRQ; i++)
84 		virq[i] = 0;
85 	memset(intrsources, 0, sizeof(intrsources));
86 }
87 
88 int
89 pic_add(struct pic_ops *pic)
90 {
91 
92 	if (num_pics >= MAX_PICS)
93 		return -1;
94 
95 	pics[num_pics] = pic;
96 	pic->pic_intrbase = max_base;
97 	max_base += pic->pic_numintrs;
98 	num_pics++;
99 
100 	return pic->pic_intrbase;
101 }
102 
103 void
104 pic_finish_setup(void)
105 {
106 	struct pic_ops *pic;
107 	int i;
108 
109 	for (i = 0; i < num_pics; i++) {
110 		pic = pics[i];
111 		if (pic->pic_finish_setup != NULL)
112 			pic->pic_finish_setup(pic);
113 	}
114 }
115 
116 static struct pic_ops *
117 find_pic_by_irq(int irq)
118 {
119 	struct pic_ops *current;
120 	int base = 0;
121 
122 	while (base < num_pics) {
123 
124 		current = pics[base];
125 		if ((irq >= current->pic_intrbase) &&
126 		    (irq < (current->pic_intrbase + current->pic_numintrs))) {
127 
128 			return current;
129 		}
130 		base++;
131 	}
132 	return NULL;
133 }
134 
135 static int
136 fakeintr(void *arg)
137 {
138 
139 	return 0;
140 }
141 
142 /*
143  * Register an interrupt handler.
144  */
145 void *
146 intr_establish(int hwirq, int type, int level, int (*ih_fun)(void *),
147     void *ih_arg)
148 {
149 	struct intrhand **p, *q, *ih;
150 	struct intr_source *is;
151 	struct pic_ops *pic;
152 	static struct intrhand fakehand;
153 	int irq, maxlevel = level;
154 
155 	if (maxlevel == IPL_NONE)
156 		maxlevel = IPL_HIGH;
157 
158 	if (hwirq >= max_base) {
159 
160 		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
161 		    max_base - 1);
162 	}
163 
164 	pic = find_pic_by_irq(hwirq);
165 	if (pic == NULL) {
166 
167 		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
168 	}
169 
170 	irq = mapirq(hwirq);
171 
172 	/* no point in sleeping unless someone can free memory. */
173 	ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
174 	if (ih == NULL)
175 		panic("intr_establish: can't malloc handler info");
176 
177 	if (!LEGAL_VIRQ(irq) || type == IST_NONE)
178 		panic("intr_establish: bogus irq (%d) or type (%d)", irq, type);
179 
180 	is = &intrsources[irq];
181 
182 	switch (is->is_type) {
183 	case IST_NONE:
184 		is->is_type = type;
185 		break;
186 	case IST_EDGE:
187 	case IST_LEVEL:
188 		if (type == is->is_type)
189 			break;
190 	case IST_PULSE:
191 		if (type != IST_NONE)
192 			panic("intr_establish: can't share %s with %s",
193 			    intr_typename(is->is_type),
194 			    intr_typename(type));
195 		break;
196 	}
197 	if (is->is_hand == NULL) {
198 		snprintf(is->is_source, sizeof(is->is_source), "irq %d",
199 		    is->is_hwirq);
200 		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
201 		    pic->pic_name, is->is_source);
202 	}
203 
204 	/*
205 	 * Figure out where to put the handler.
206 	 * This is O(N^2), but we want to preserve the order, and N is
207 	 * generally small.
208 	 */
209 	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
210 
211 		maxlevel = max(maxlevel, q->ih_level);
212 	}
213 
214 	/*
215 	 * Actually install a fake handler momentarily, since we might be doing
216 	 * this with interrupts enabled and don't want the real routine called
217 	 * until masking is set up.
218 	 */
219 	fakehand.ih_level = level;
220 	fakehand.ih_fun = fakeintr;
221 	*p = &fakehand;
222 
223 	/*
224 	 * Poke the real handler in now.
225 	 */
226 	ih->ih_fun = ih_fun;
227 	ih->ih_arg = ih_arg;
228 	ih->ih_next = NULL;
229 	ih->ih_level = level;
230 	ih->ih_irq = irq;
231 	*p = ih;
232 
233 	if (pic->pic_establish_irq != NULL)
234 		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
235 		    is->is_type, maxlevel);
236 
237 	/*
238 	 * now that the handler is established we're actually ready to
239 	 * calculate the masks
240 	 */
241 	intr_calculatemasks();
242 
243 
244 	return ih;
245 }
246 
247 void
248 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
249 {
250 }
251 
252 /*
253  * Deregister an interrupt handler.
254  */
255 void
256 intr_disestablish(void *arg)
257 {
258 	struct intrhand *ih = arg;
259 	int irq = ih->ih_irq;
260 	struct intr_source *is = &intrsources[irq];
261 	struct intrhand **p, *q;
262 
263 	if (!LEGAL_VIRQ(irq))
264 		panic("intr_disestablish: bogus irq %d", irq);
265 
266 	/*
267 	 * Remove the handler from the chain.
268 	 * This is O(n^2), too.
269 	 */
270 	for (p = &is->is_hand; (q = *p) != NULL && q != ih; p = &q->ih_next)
271 		;
272 	if (q)
273 		*p = q->ih_next;
274 	else
275 		panic("intr_disestablish: handler not registered");
276 	free((void *)ih, M_DEVBUF);
277 
278 	intr_calculatemasks();
279 
280 	if (is->is_hand == NULL) {
281 		is->is_type = IST_NONE;
282 		evcnt_detach(&is->is_ev);
283 	}
284 }
285 
286 /*
287  * Map max_base irqs into 32 (bits).
288  */
289 static int
290 mapirq(uint32_t irq)
291 {
292 	struct pic_ops *pic;
293 	int v;
294 
295 	if (irq >= max_base)
296 		panic("invalid irq %d", irq);
297 
298 	if ((pic = find_pic_by_irq(irq)) == NULL)
299 		panic("%s: cannot find PIC for IRQ %d", __func__, irq);
300 
301 	if (virq[irq])
302 		return virq[irq];
303 
304 	virq_max++;
305 	v = virq_max;
306 	if (v > HWIRQ_MAX)
307 		panic("virq overflow");
308 
309 	intrsources[v].is_hwirq = irq;
310 	intrsources[v].is_pic = pic;
311 	virq[irq] = v;
312 #ifdef PIC_DEBUG
313 	printf("mapping irq %d to virq %d\n", irq, v);
314 #endif
315 	return v;
316 }
317 
318 static const char * const intr_typenames[] = {
319    [IST_NONE]  = "none",
320    [IST_PULSE] = "pulsed",
321    [IST_EDGE]  = "edge-triggered",
322    [IST_LEVEL] = "level-triggered",
323 };
324 
325 const char *
326 intr_typename(int type)
327 {
328 	KASSERT((unsigned int) type < __arraycount(intr_typenames));
329 	KASSERT(intr_typenames[type] != NULL);
330 	return intr_typenames[type];
331 }
332 
333 /*
334  * Recalculate the interrupt masks from scratch.
335  * We could code special registry and deregistry versions of this function that
336  * would be faster, but the code would be nastier, and we don't expect this to
337  * happen very much anyway.
338  */
339 static void
340 intr_calculatemasks(void)
341 {
342 	struct intr_source *is;
343 	struct intrhand *q;
344 	struct pic_ops *current;
345 	int irq, level, i, base;
346 
347 	/* First, figure out which levels each IRQ uses. */
348 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
349 		register int levels = 0;
350 		for (q = is->is_hand; q; q = q->ih_next)
351 			levels |= 1 << q->ih_level;
352 		is->is_level = levels;
353 	}
354 
355 	/* Then figure out which IRQs use each level. */
356 	for (level = 0; level < NIPL; level++) {
357 		register int irqs = 0;
358 		for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++)
359 			if (is->is_level & (1 << level))
360 				irqs |= 1 << irq;
361 		imask[level] = irqs;
362 	}
363 
364 	/*
365 	 * IPL_CLOCK should mask clock interrupt even if interrupt handler
366 	 * is not registered.
367 	 */
368 	imask[IPL_CLOCK] |= 1 << SPL_CLOCK;
369 
370 	/*
371 	 * Initialize soft interrupt masks to block themselves.
372 	 */
373 	imask[IPL_SOFTCLOCK] = 1 << SIR_CLOCK;
374 	imask[IPL_SOFTNET] = 1 << SIR_NET;
375 	imask[IPL_SOFTSERIAL] = 1 << SIR_SERIAL;
376 
377 	/*
378 	 * IPL_NONE is used for hardware interrupts that are never blocked,
379 	 * and do not block anything else.
380 	 */
381 	imask[IPL_NONE] = 0;
382 
383 #ifdef SLOPPY_IPLS
384 	/*
385 	 * Enforce a sloppy hierarchy as in spl(9)
386 	 */
387 	/* everything above softclock must block softclock */
388 	for (i = IPL_SOFTCLOCK; i < NIPL; i++)
389 		imask[i] |= imask[IPL_SOFTCLOCK];
390 
391 	/* everything above softnet must block softnet */
392 	for (i = IPL_SOFTNET; i < NIPL; i++)
393 		imask[i] |= imask[IPL_SOFTNET];
394 
395 	/* IPL_TTY must block softserial */
396 	imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
397 
398 	/* IPL_VM must block net, block IO and tty */
399 	imask[IPL_VM] |= (imask[IPL_NET] | imask[IPL_BIO] | imask[IPL_TTY]);
400 
401 	/* IPL_SERIAL must block IPL_TTY */
402 	imask[IPL_SERIAL] |= imask[IPL_TTY];
403 
404 	/* IPL_HIGH must block all other priority levels */
405 	for (i = IPL_NONE; i < IPL_HIGH; i++)
406 		imask[IPL_HIGH] |= imask[i];
407 #else	/* !SLOPPY_IPLS */
408 	/*
409 	 * strict hierarchy - all IPLs block everything blocked by any lower
410 	 * IPL
411 	 */
412 	for (i = 1; i < NIPL; i++)
413 		imask[i] |= imask[i - 1];
414 #endif	/* !SLOPPY_IPLS */
415 
416 #ifdef DEBUG_IPL
417 	for (i = 0; i < NIPL; i++) {
418 		printf("%2d: %08x\n", i, imask[i]);
419 	}
420 #endif
421 
422 	/* And eventually calculate the complete masks. */
423 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
424 		register int irqs = 1 << irq;
425 		for (q = is->is_hand; q; q = q->ih_next)
426 			irqs |= imask[q->ih_level];
427 		is->is_mask = irqs;
428 	}
429 
430 	/* Lastly, enable IRQs actually in use. */
431 	for (base = 0; base < num_pics; base++) {
432 		current = pics[base];
433 		for (i = 0; i < current->pic_numintrs; i++)
434 			current->pic_disable_irq(current, i);
435 	}
436 
437 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
438 		if (is->is_hand)
439 			pic_enable_irq(is->is_hwirq);
440 	}
441 }
442 
443 void
444 pic_enable_irq(int num)
445 {
446 	struct pic_ops *current;
447 	int type;
448 
449 	current = find_pic_by_irq(num);
450 	if (current == NULL)
451 		panic("%s: bogus IRQ %d", __func__, num);
452 	type = intrsources[virq[num]].is_type;
453 	current->pic_enable_irq(current, num - current->pic_intrbase, type);
454 }
455 
456 void
457 pic_mark_pending(int irq)
458 {
459 	struct cpu_info * const ci = curcpu();
460 	int v, msr;
461 
462 	v = virq[irq];
463 	if (v == 0)
464 		printf("IRQ %d maps to 0\n", irq);
465 
466 	msr = mfmsr();
467 	mtmsr(msr & ~PSL_EE);
468 	ci->ci_ipending |= 1 << v;
469 	mtmsr(msr);
470 }
471 
472 void
473 pic_do_pending_int(void)
474 {
475 	struct cpu_info * const ci = curcpu();
476 	struct intr_source *is;
477 	struct intrhand *ih;
478 	struct pic_ops *pic;
479 	int irq;
480 	int pcpl;
481 	int hwpend;
482 	int emsr, dmsr;
483 
484 	if (ci->ci_iactive)
485 		return;
486 
487 	ci->ci_iactive = 1;
488 	emsr = mfmsr();
489 	KASSERT(emsr & PSL_EE);
490 	dmsr = emsr & ~PSL_EE;
491 	mtmsr(dmsr);
492 
493 	pcpl = ci->ci_cpl;
494 again:
495 
496 	/* Do now unmasked pendings */
497 	while ((hwpend = (ci->ci_ipending & ~pcpl & HWIRQ_MASK)) != 0) {
498 		irq = 31 - cntlzw(hwpend);
499 		KASSERT(irq <= virq_max);
500 		ci->ci_ipending &= ~(1 << irq);
501 		if (irq == 0) {
502 			printf("VIRQ0");
503 			continue;
504 		}
505 		is = &intrsources[irq];
506 		pic = is->is_pic;
507 
508 		splraise(is->is_mask);
509 		mtmsr(emsr);
510 		KERNEL_LOCK(1, NULL);
511 		ih = is->is_hand;
512 		while (ih) {
513 #ifdef DIAGNOSTIC
514 			if (!ih->ih_fun) {
515 				printf("NULL interrupt handler!\n");
516 				panic("irq %02d, hwirq %02d, is %p\n",
517 					irq, is->is_hwirq, is);
518 			}
519 #endif
520 			(*ih->ih_fun)(ih->ih_arg);
521 			ih = ih->ih_next;
522 		}
523 		KERNEL_UNLOCK_ONE(NULL);
524 		mtmsr(dmsr);
525 		ci->ci_cpl = pcpl;
526 
527 		is->is_ev.ev_count++;
528 		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
529 		    is->is_type);
530 	}
531 
532 	if ((ci->ci_ipending & ~pcpl) & (1 << SIR_SERIAL)) {
533 		ci->ci_ipending &= ~(1 << SIR_SERIAL);
534 		splsoftserial();
535 		mtmsr(emsr);
536 		KERNEL_LOCK(1, NULL);
537 
538 		softintr__run(IPL_SOFTSERIAL);
539 
540 		KERNEL_UNLOCK_ONE(NULL);
541 		mtmsr(dmsr);
542 		ci->ci_cpl = pcpl;
543 		ci->ci_ev_softserial.ev_count++;
544 		goto again;
545 	}
546 	if ((ci->ci_ipending & ~pcpl) & (1 << SIR_NET)) {
547 
548 		ci->ci_ipending &= ~(1 << SIR_NET);
549 		splsoftnet();
550 
551 		mtmsr(emsr);
552 		KERNEL_LOCK(1, NULL);
553 
554 		softintr__run(IPL_SOFTNET);
555 
556 		KERNEL_UNLOCK_ONE(NULL);
557 		mtmsr(dmsr);
558 		ci->ci_cpl = pcpl;
559 		ci->ci_ev_softnet.ev_count++;
560 		goto again;
561 	}
562 	if ((ci->ci_ipending & ~pcpl) & (1 << SIR_CLOCK)) {
563 		ci->ci_ipending &= ~(1 << SIR_CLOCK);
564 		splsoftclock();
565 		mtmsr(emsr);
566 		KERNEL_LOCK(1, NULL);
567 
568 		softintr__run(IPL_SOFTCLOCK);
569 
570 		KERNEL_UNLOCK_ONE(NULL);
571 		mtmsr(dmsr);
572 		ci->ci_cpl = pcpl;
573 		ci->ci_ev_softclock.ev_count++;
574 		goto again;
575 	}
576 
577 	ci->ci_cpl = pcpl;	/* Don't use splx... we are here already! */
578 	ci->ci_iactive = 0;
579 	mtmsr(emsr);
580 }
581 
582 int
583 pic_handle_intr(void *cookie)
584 {
585 	struct pic_ops *pic = cookie;
586 	struct cpu_info *ci = curcpu();
587 	struct intr_source *is;
588 	struct intrhand *ih;
589 	int irq, realirq;
590 	int pcpl, msr, r_imen, bail;
591 
592 	realirq = pic->pic_get_irq(pic);
593 	if (realirq == 255)
594 		return 0;
595 
596 	msr = mfmsr();
597 	pcpl = ci->ci_cpl;
598 
599 start:
600 
601 #ifdef MULTIPROCESSOR
602 	/* THIS IS WRONG XXX */
603 	while (realirq == ipiops.ppc_ipi_vector) {
604 		ppcipi_intr(NULL);
605 		pic->pic_ack_irq(pic, realirq);
606 		realirq = pic->pic_get_irq(pic);
607 	}
608 	if (realirq == 255) {
609 		return 0;
610 	}
611 #endif
612 
613 	irq = virq[realirq + pic->pic_intrbase];
614 #ifdef PIC_DEBUG
615 	if (irq == 0) {
616 		printf("%s: %d virq 0\n", pic->pic_name, realirq);
617 		goto boo;
618 	}
619 #endif /* PIC_DEBUG */
620 	KASSERT(realirq < pic->pic_numintrs);
621 	r_imen = 1 << irq;
622 	is = &intrsources[irq];
623 
624 	if ((pcpl & r_imen) != 0) {
625 
626 		ci->ci_ipending |= r_imen; /* Masked! Mark this as pending */
627 		pic->pic_disable_irq(pic, realirq);
628 	} else {
629 
630 		/* this interrupt is no longer pending */
631 		ci->ci_ipending &= ~r_imen;
632 
633 		splraise(is->is_mask);
634 		mtmsr(msr | PSL_EE);
635 		KERNEL_LOCK(1, NULL);
636 		ih = is->is_hand;
637 		bail = 0;
638 		while ((ih != NULL) && (bail < 10)) {
639 			if (ih->ih_fun == NULL)
640 				panic("bogus handler for IRQ %s %d",
641 				    pic->pic_name, realirq);
642 			(*ih->ih_fun)(ih->ih_arg);
643 			ih = ih->ih_next;
644 			bail++;
645 		}
646 		KERNEL_UNLOCK_ONE(NULL);
647 		mtmsr(msr);
648 		ci->ci_cpl = pcpl;
649 
650 		uvmexp.intrs++;
651 		is->is_ev.ev_count++;
652 	}
653 #ifdef PIC_DEBUG
654 boo:
655 #endif /* PIC_DEBUG */
656 	pic->pic_ack_irq(pic, realirq);
657 	realirq = pic->pic_get_irq(pic);
658 	if (realirq != 255)
659 		goto start;
660 
661 	mtmsr(msr | PSL_EE);
662 	splx(pcpl);	/* Process pendings. */
663 	mtmsr(msr);
664 
665 	return 0;
666 }
667 
668 void
669 pic_ext_intr(void)
670 {
671 
672 	KASSERT(pics[primary_pic] != NULL);
673 	pic_handle_intr(pics[primary_pic]);
674 
675 	return;
676 
677 }
678 
679 int
680 splraise(int ncpl)
681 {
682 	struct cpu_info *ci = curcpu();
683 	int ocpl;
684 
685 	__asm volatile("sync; eieio");	/* don't reorder.... */
686 
687 	ocpl = ci->ci_cpl;
688 	ci->ci_cpl = ocpl | ncpl;
689 	__asm volatile("sync; eieio");	/* reorder protect */
690 	return ocpl;
691 }
692 
693 void
694 splx(int ncpl)
695 {
696 	struct cpu_info *ci = curcpu();
697 
698 	__asm volatile("sync; eieio");	/* reorder protect */
699 	ci->ci_cpl = ncpl;
700 	if (ci->ci_ipending & ~ncpl)
701 		pic_do_pending_int();
702 	__asm volatile("sync; eieio");	/* reorder protect */
703 }
704 
705 int
706 spllower(int ncpl)
707 {
708 	struct cpu_info *ci = curcpu();
709 	int ocpl;
710 
711 	__asm volatile("sync; eieio");	/* reorder protect */
712 	ocpl = ci->ci_cpl;
713 	ci->ci_cpl = ncpl;
714 	if (ci->ci_ipending & ~ncpl)
715 		pic_do_pending_int();
716 	__asm volatile("sync; eieio");	/* reorder protect */
717 	return ocpl;
718 }
719 
720 /* Following code should be implemented with lwarx/stwcx to avoid
721  * the disable/enable. i need to read the manual once more.... */
722 void
723 softintr(int ipl)
724 {
725 	int msrsave;
726 
727 	msrsave = mfmsr();
728 	mtmsr(msrsave & ~PSL_EE);
729 	curcpu()->ci_ipending |= 1 << ipl;
730 	mtmsr(msrsave);
731 }
732 
733 void
734 genppc_cpu_configure(void)
735 {
736 	aprint_normal("biomask %x netmask %x ttymask %x\n",
737 	    imask[IPL_BIO] & 0x1fffffff,
738 	    imask[IPL_NET] & 0x1fffffff,
739 	    imask[IPL_TTY] & 0x1fffffff);
740 
741 	spl0();
742 }
743 
744 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
745 /*
746  * isa_intr_alloc needs to be done here, because it needs direct access to
747  * the various interrupt handler structures.
748  */
749 
750 int
751 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
752     int mask, int type, int *irq_p)
753 {
754 	int irq, vi;
755 	int maybe_irq = -1;
756 	int shared_depth = 0;
757 	struct intr_source *is;
758 
759 	if (pic == NULL)
760 		return 1;
761 
762 	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
763 	     mask >>= 1, irq++) {
764 		if ((mask & 1) == 0)
765 			continue;
766 		vi = virq[irq + pic->pic_intrbase];
767 		if (!vi) {
768 			*irq_p = irq;
769 			return 0;
770 		}
771 		is = &intrsources[vi];
772 		if (is->is_type == IST_NONE) {
773 			*irq_p = irq;
774 			return 0;
775 		}
776 		/* Level interrupts can be shared */
777 		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
778 			struct intrhand *ih = is->is_hand;
779 			int depth;
780 
781 			if (maybe_irq == -1) {
782 				maybe_irq = irq;
783 				continue;
784 			}
785 			for (depth = 0; ih != NULL; ih = ih->ih_next)
786 				depth++;
787 			if (depth < shared_depth) {
788 				maybe_irq = irq;
789 				shared_depth = depth;
790 			}
791 		}
792 	}
793 	if (maybe_irq != -1) {
794 		*irq_p = maybe_irq;
795 		return 0;
796 	}
797 	return 1;
798 }
799 #endif
800