xref: /netbsd-src/sys/arch/powerpc/pic/intr.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: intr.c,v 1.6 2008/04/29 06:53:02 martin Exp $ */
2 
3 /*-
4  * Copyright (c) 2007 Michael Lorenz
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.6 2008/04/29 06:53:02 martin Exp $");
31 
32 #include "opt_multiprocessor.h"
33 
34 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/cpu.h>
38 
39 #include <uvm/uvm_extern.h>
40 
41 #include <arch/powerpc/pic/picvar.h>
42 #include "opt_pic.h"
43 #include "opt_interrupt.h"
44 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
45 #include <machine/isa_machdep.h>
46 #endif
47 
48 #ifdef MULTIPROCESSOR
49 #include <arch/powerpc/pic/ipivar.h>
50 #endif
51 
52 #define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
53 
54 #define NVIRQ		32	/* 32 virtual IRQs */
55 #define NIRQ		128	/* up to 128 HW IRQs */
56 
57 #define HWIRQ_MAX	(NVIRQ - 4 - 1)
58 #define HWIRQ_MASK	0x0fffffff
59 #define	LEGAL_VIRQ(x)	((x) >= 0 && (x) < NVIRQ)
60 
61 struct pic_ops *pics[MAX_PICS];
62 int num_pics = 0;
63 int max_base = 0;
64 uint8_t	virq[NIRQ];
65 int	virq_max = 0;
66 int	imask[NIPL];
67 int	primary_pic = 0;
68 
69 static int	fakeintr(void *);
70 static int	mapirq(uint32_t);
71 static void	intr_calculatemasks(void);
72 static struct pic_ops *find_pic_by_irq(int);
73 
74 static struct intr_source intrsources[NVIRQ];
75 
76 void
77 pic_init(void)
78 {
79 	int i;
80 
81 	for (i = 0; i < NIRQ; i++)
82 		virq[i] = 0;
83 	memset(intrsources, 0, sizeof(intrsources));
84 }
85 
86 int
87 pic_add(struct pic_ops *pic)
88 {
89 
90 	if (num_pics >= MAX_PICS)
91 		return -1;
92 
93 	pics[num_pics] = pic;
94 	pic->pic_intrbase = max_base;
95 	max_base += pic->pic_numintrs;
96 	num_pics++;
97 
98 	return pic->pic_intrbase;
99 }
100 
101 void
102 pic_finish_setup(void)
103 {
104 	struct pic_ops *pic;
105 	int i;
106 
107 	for (i = 0; i < num_pics; i++) {
108 		pic = pics[i];
109 		if (pic->pic_finish_setup != NULL)
110 			pic->pic_finish_setup(pic);
111 	}
112 }
113 
114 static struct pic_ops *
115 find_pic_by_irq(int irq)
116 {
117 	struct pic_ops *current;
118 	int base = 0;
119 
120 	while (base < num_pics) {
121 
122 		current = pics[base];
123 		if ((irq >= current->pic_intrbase) &&
124 		    (irq < (current->pic_intrbase + current->pic_numintrs))) {
125 
126 			return current;
127 		}
128 		base++;
129 	}
130 	return NULL;
131 }
132 
133 static int
134 fakeintr(void *arg)
135 {
136 
137 	return 0;
138 }
139 
140 /*
141  * Register an interrupt handler.
142  */
143 void *
144 intr_establish(int hwirq, int type, int level, int (*ih_fun)(void *),
145     void *ih_arg)
146 {
147 	struct intrhand **p, *q, *ih;
148 	struct intr_source *is;
149 	struct pic_ops *pic;
150 	static struct intrhand fakehand;
151 	int irq, maxlevel = level;
152 
153 	if (maxlevel == IPL_NONE)
154 		maxlevel = IPL_HIGH;
155 
156 	if (hwirq >= max_base) {
157 
158 		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
159 		    max_base - 1);
160 	}
161 
162 	pic = find_pic_by_irq(hwirq);
163 	if (pic == NULL) {
164 
165 		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
166 	}
167 
168 	irq = mapirq(hwirq);
169 
170 	/* no point in sleeping unless someone can free memory. */
171 	ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
172 	if (ih == NULL)
173 		panic("intr_establish: can't malloc handler info");
174 
175 	if (!LEGAL_VIRQ(irq) || type == IST_NONE)
176 		panic("intr_establish: bogus irq (%d) or type (%d)", irq, type);
177 
178 	is = &intrsources[irq];
179 
180 	switch (is->is_type) {
181 	case IST_NONE:
182 		is->is_type = type;
183 		break;
184 	case IST_EDGE:
185 	case IST_LEVEL:
186 		if (type == is->is_type)
187 			break;
188 	case IST_PULSE:
189 		if (type != IST_NONE)
190 			panic("intr_establish: can't share %s with %s",
191 			    intr_typename(is->is_type),
192 			    intr_typename(type));
193 		break;
194 	}
195 	if (is->is_hand == NULL) {
196 		snprintf(is->is_source, sizeof(is->is_source), "irq %d",
197 		    is->is_hwirq);
198 		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
199 		    pic->pic_name, is->is_source);
200 	}
201 
202 	/*
203 	 * Figure out where to put the handler.
204 	 * This is O(N^2), but we want to preserve the order, and N is
205 	 * generally small.
206 	 */
207 	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
208 
209 		maxlevel = max(maxlevel, q->ih_level);
210 	}
211 
212 	/*
213 	 * Actually install a fake handler momentarily, since we might be doing
214 	 * this with interrupts enabled and don't want the real routine called
215 	 * until masking is set up.
216 	 */
217 	fakehand.ih_level = level;
218 	fakehand.ih_fun = fakeintr;
219 	*p = &fakehand;
220 
221 	/*
222 	 * Poke the real handler in now.
223 	 */
224 	ih->ih_fun = ih_fun;
225 	ih->ih_arg = ih_arg;
226 	ih->ih_next = NULL;
227 	ih->ih_level = level;
228 	ih->ih_irq = irq;
229 	*p = ih;
230 
231 	if (pic->pic_establish_irq != NULL)
232 		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
233 		    is->is_type, maxlevel);
234 
235 	/*
236 	 * now that the handler is established we're actually ready to
237 	 * calculate the masks
238 	 */
239 	intr_calculatemasks();
240 
241 
242 	return ih;
243 }
244 
245 void
246 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
247 {
248 }
249 
250 /*
251  * Deregister an interrupt handler.
252  */
253 void
254 intr_disestablish(void *arg)
255 {
256 	struct intrhand *ih = arg;
257 	int irq = ih->ih_irq;
258 	struct intr_source *is = &intrsources[irq];
259 	struct intrhand **p, *q;
260 
261 	if (!LEGAL_VIRQ(irq))
262 		panic("intr_disestablish: bogus irq %d", irq);
263 
264 	/*
265 	 * Remove the handler from the chain.
266 	 * This is O(n^2), too.
267 	 */
268 	for (p = &is->is_hand; (q = *p) != NULL && q != ih; p = &q->ih_next)
269 		;
270 	if (q)
271 		*p = q->ih_next;
272 	else
273 		panic("intr_disestablish: handler not registered");
274 	free((void *)ih, M_DEVBUF);
275 
276 	intr_calculatemasks();
277 
278 	if (is->is_hand == NULL) {
279 		is->is_type = IST_NONE;
280 		evcnt_detach(&is->is_ev);
281 	}
282 }
283 
284 /*
285  * Map max_base irqs into 32 (bits).
286  */
287 static int
288 mapirq(uint32_t irq)
289 {
290 	struct pic_ops *pic;
291 	int v;
292 
293 	if (irq >= max_base)
294 		panic("invalid irq %d", irq);
295 
296 	if ((pic = find_pic_by_irq(irq)) == NULL)
297 		panic("%s: cannot find PIC for IRQ %d", __func__, irq);
298 
299 	if (virq[irq])
300 		return virq[irq];
301 
302 	virq_max++;
303 	v = virq_max;
304 	if (v > HWIRQ_MAX)
305 		panic("virq overflow");
306 
307 	intrsources[v].is_hwirq = irq;
308 	intrsources[v].is_pic = pic;
309 	virq[irq] = v;
310 #ifdef PIC_DEBUG
311 	printf("mapping irq %d to virq %d\n", irq, v);
312 #endif
313 	return v;
314 }
315 
316 static const char * const intr_typenames[] = {
317    [IST_NONE]  = "none",
318    [IST_PULSE] = "pulsed",
319    [IST_EDGE]  = "edge-triggered",
320    [IST_LEVEL] = "level-triggered",
321 };
322 
323 const char *
324 intr_typename(int type)
325 {
326 	KASSERT((unsigned int) type < __arraycount(intr_typenames));
327 	KASSERT(intr_typenames[type] != NULL);
328 	return intr_typenames[type];
329 }
330 
331 /*
332  * Recalculate the interrupt masks from scratch.
333  * We could code special registry and deregistry versions of this function that
334  * would be faster, but the code would be nastier, and we don't expect this to
335  * happen very much anyway.
336  */
337 static void
338 intr_calculatemasks(void)
339 {
340 	struct intr_source *is;
341 	struct intrhand *q;
342 	struct pic_ops *current;
343 	int irq, level, i, base;
344 
345 	/* First, figure out which levels each IRQ uses. */
346 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
347 		register int levels = 0;
348 		for (q = is->is_hand; q; q = q->ih_next)
349 			levels |= 1 << q->ih_level;
350 		is->is_level = levels;
351 	}
352 
353 	/* Then figure out which IRQs use each level. */
354 	for (level = 0; level < NIPL; level++) {
355 		register int irqs = 0;
356 		for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++)
357 			if (is->is_level & (1 << level))
358 				irqs |= 1 << irq;
359 		imask[level] = irqs;
360 	}
361 
362 	/*
363 	 * IPL_CLOCK should mask clock interrupt even if interrupt handler
364 	 * is not registered.
365 	 */
366 	imask[IPL_CLOCK] |= 1 << SPL_CLOCK;
367 
368 	/*
369 	 * Initialize soft interrupt masks to block themselves.
370 	 */
371 	imask[IPL_SOFTCLOCK] = 1 << SIR_CLOCK;
372 	imask[IPL_SOFTNET] = 1 << SIR_NET;
373 	imask[IPL_SOFTSERIAL] = 1 << SIR_SERIAL;
374 
375 	/*
376 	 * IPL_NONE is used for hardware interrupts that are never blocked,
377 	 * and do not block anything else.
378 	 */
379 	imask[IPL_NONE] = 0;
380 
381 #ifdef SLOPPY_IPLS
382 	/*
383 	 * Enforce a sloppy hierarchy as in spl(9)
384 	 */
385 	/* everything above softclock must block softclock */
386 	for (i = IPL_SOFTCLOCK; i < NIPL; i++)
387 		imask[i] |= imask[IPL_SOFTCLOCK];
388 
389 	/* everything above softnet must block softnet */
390 	for (i = IPL_SOFTNET; i < NIPL; i++)
391 		imask[i] |= imask[IPL_SOFTNET];
392 
393 	/* IPL_TTY must block softserial */
394 	imask[IPL_TTY] |= imask[IPL_SOFTSERIAL];
395 
396 	/* IPL_VM must block net, block IO and tty */
397 	imask[IPL_VM] |= (imask[IPL_NET] | imask[IPL_BIO] | imask[IPL_TTY]);
398 
399 	/* IPL_SERIAL must block IPL_TTY */
400 	imask[IPL_SERIAL] |= imask[IPL_TTY];
401 
402 	/* IPL_HIGH must block all other priority levels */
403 	for (i = IPL_NONE; i < IPL_HIGH; i++)
404 		imask[IPL_HIGH] |= imask[i];
405 #else	/* !SLOPPY_IPLS */
406 	/*
407 	 * strict hierarchy - all IPLs block everything blocked by any lower
408 	 * IPL
409 	 */
410 	for (i = 1; i < NIPL; i++)
411 		imask[i] |= imask[i - 1];
412 #endif	/* !SLOPPY_IPLS */
413 
414 #ifdef DEBUG_IPL
415 	for (i = 0; i < NIPL; i++) {
416 		printf("%2d: %08x\n", i, imask[i]);
417 	}
418 #endif
419 
420 	/* And eventually calculate the complete masks. */
421 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
422 		register int irqs = 1 << irq;
423 		for (q = is->is_hand; q; q = q->ih_next)
424 			irqs |= imask[q->ih_level];
425 		is->is_mask = irqs;
426 	}
427 
428 	/* Lastly, enable IRQs actually in use. */
429 	for (base = 0; base < num_pics; base++) {
430 		current = pics[base];
431 		for (i = 0; i < current->pic_numintrs; i++)
432 			current->pic_disable_irq(current, i);
433 	}
434 
435 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
436 		if (is->is_hand)
437 			pic_enable_irq(is->is_hwirq);
438 	}
439 }
440 
441 void
442 pic_enable_irq(int num)
443 {
444 	struct pic_ops *current;
445 	int type;
446 
447 	current = find_pic_by_irq(num);
448 	if (current == NULL)
449 		panic("%s: bogus IRQ %d", __func__, num);
450 	type = intrsources[virq[num]].is_type;
451 	current->pic_enable_irq(current, num - current->pic_intrbase, type);
452 }
453 
454 void
455 pic_mark_pending(int irq)
456 {
457 	struct cpu_info * const ci = curcpu();
458 	int v, msr;
459 
460 	v = virq[irq];
461 	if (v == 0)
462 		printf("IRQ %d maps to 0\n", irq);
463 
464 	msr = mfmsr();
465 	mtmsr(msr & ~PSL_EE);
466 	ci->ci_ipending |= 1 << v;
467 	mtmsr(msr);
468 }
469 
470 void
471 pic_do_pending_int(void)
472 {
473 	struct cpu_info * const ci = curcpu();
474 	struct intr_source *is;
475 	struct intrhand *ih;
476 	struct pic_ops *pic;
477 	int irq;
478 	int pcpl;
479 	int hwpend;
480 	int emsr, dmsr;
481 
482 	if (ci->ci_iactive)
483 		return;
484 
485 	ci->ci_iactive = 1;
486 	emsr = mfmsr();
487 	KASSERT(emsr & PSL_EE);
488 	dmsr = emsr & ~PSL_EE;
489 	mtmsr(dmsr);
490 
491 	pcpl = ci->ci_cpl;
492 #ifdef __HAVE_FAST_SOFTINTS
493 again:
494 #endif
495 
496 	/* Do now unmasked pendings */
497 	ci->ci_idepth++;
498 	while ((hwpend = (ci->ci_ipending & ~pcpl & HWIRQ_MASK)) != 0) {
499 		irq = 31 - cntlzw(hwpend);
500 		KASSERT(irq <= virq_max);
501 		ci->ci_ipending &= ~(1 << irq);
502 		if (irq == 0) {
503 			printf("VIRQ0");
504 			continue;
505 		}
506 		is = &intrsources[irq];
507 		pic = is->is_pic;
508 
509 		splraise(is->is_mask);
510 		mtmsr(emsr);
511 		ih = is->is_hand;
512 		while (ih) {
513 #ifdef DIAGNOSTIC
514 			if (!ih->ih_fun) {
515 				printf("NULL interrupt handler!\n");
516 				panic("irq %02d, hwirq %02d, is %p\n",
517 					irq, is->is_hwirq, is);
518 			}
519 #endif
520 			if (ih->ih_level == IPL_VM) {
521 				KERNEL_LOCK(1, NULL);
522 			}
523 			(*ih->ih_fun)(ih->ih_arg);
524 			if (ih->ih_level == IPL_VM) {
525 				KERNEL_UNLOCK_ONE(NULL);
526 			}
527 			ih = ih->ih_next;
528 		}
529 		mtmsr(dmsr);
530 		ci->ci_cpl = pcpl;
531 
532 		is->is_ev.ev_count++;
533 		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
534 		    is->is_type);
535 	}
536 	ci->ci_idepth--;
537 
538 #ifdef __HAVE_FAST_SOFTINTS
539 	if ((ci->ci_ipending & ~pcpl) & (1 << SIR_SERIAL)) {
540 		ci->ci_ipending &= ~(1 << SIR_SERIAL);
541 		splsoftserial();
542 		mtmsr(emsr);
543 		softintr__run(IPL_SOFTSERIAL);
544 		mtmsr(dmsr);
545 		ci->ci_cpl = pcpl;
546 		ci->ci_ev_softserial.ev_count++;
547 		goto again;
548 	}
549 	if ((ci->ci_ipending & ~pcpl) & (1 << SIR_NET)) {
550 		ci->ci_ipending &= ~(1 << SIR_NET);
551 		splsoftnet();
552 		mtmsr(emsr);
553 		softintr__run(IPL_SOFTNET);
554 		mtmsr(dmsr);
555 		ci->ci_cpl = pcpl;
556 		ci->ci_ev_softnet.ev_count++;
557 		goto again;
558 	}
559 	if ((ci->ci_ipending & ~pcpl) & (1 << SIR_CLOCK)) {
560 		ci->ci_ipending &= ~(1 << SIR_CLOCK);
561 		splsoftclock();
562 		mtmsr(emsr);
563 		softintr__run(IPL_SOFTCLOCK);
564 		mtmsr(dmsr);
565 		ci->ci_cpl = pcpl;
566 		ci->ci_ev_softclock.ev_count++;
567 		goto again;
568 	}
569 #endif
570 
571 	ci->ci_cpl = pcpl;	/* Don't use splx... we are here already! */
572 	ci->ci_iactive = 0;
573 	mtmsr(emsr);
574 }
575 
576 int
577 pic_handle_intr(void *cookie)
578 {
579 	struct pic_ops *pic = cookie;
580 	struct cpu_info *ci = curcpu();
581 	struct intr_source *is;
582 	struct intrhand *ih;
583 	int irq, realirq;
584 	int pcpl, msr, r_imen, bail;
585 
586 	realirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
587 	if (realirq == 255)
588 		return 0;
589 
590 	msr = mfmsr();
591 	pcpl = ci->ci_cpl;
592 
593 start:
594 
595 #ifdef MULTIPROCESSOR
596 	/* THIS IS WRONG XXX */
597 	while (realirq == ipiops.ppc_ipi_vector) {
598 		ppcipi_intr(NULL);
599 		pic->pic_ack_irq(pic, realirq);
600 		realirq = pic->pic_get_irq(pic, PIC_GET_RECHECK);
601 	}
602 	if (realirq == 255) {
603 		return 0;
604 	}
605 #endif
606 
607 	irq = virq[realirq + pic->pic_intrbase];
608 #ifdef PIC_DEBUG
609 	if (irq == 0) {
610 		printf("%s: %d virq 0\n", pic->pic_name, realirq);
611 		goto boo;
612 	}
613 #endif /* PIC_DEBUG */
614 	KASSERT(realirq < pic->pic_numintrs);
615 	r_imen = 1 << irq;
616 	is = &intrsources[irq];
617 
618 	if ((pcpl & r_imen) != 0) {
619 
620 		ci->ci_ipending |= r_imen; /* Masked! Mark this as pending */
621 		pic->pic_disable_irq(pic, realirq);
622 	} else {
623 
624 		/* this interrupt is no longer pending */
625 		ci->ci_ipending &= ~r_imen;
626 		ci->ci_idepth++;
627 
628 		splraise(is->is_mask);
629 		mtmsr(msr | PSL_EE);
630 		ih = is->is_hand;
631 		bail = 0;
632 		while ((ih != NULL) && (bail < 10)) {
633 			if (ih->ih_fun == NULL)
634 				panic("bogus handler for IRQ %s %d",
635 				    pic->pic_name, realirq);
636 			if (ih->ih_level == IPL_VM) {
637 				KERNEL_LOCK(1, NULL);
638 			}
639 			(*ih->ih_fun)(ih->ih_arg);
640 			if (ih->ih_level == IPL_VM) {
641 				KERNEL_UNLOCK_ONE(NULL);
642 			}
643 			ih = ih->ih_next;
644 			bail++;
645 		}
646 		mtmsr(msr);
647 		ci->ci_cpl = pcpl;
648 
649 		uvmexp.intrs++;
650 		is->is_ev.ev_count++;
651 		ci->ci_idepth--;
652 	}
653 #ifdef PIC_DEBUG
654 boo:
655 #endif /* PIC_DEBUG */
656 	pic->pic_ack_irq(pic, realirq);
657 	realirq = pic->pic_get_irq(pic, PIC_GET_RECHECK);
658 	if (realirq != 255)
659 		goto start;
660 
661 	mtmsr(msr | PSL_EE);
662 	splx(pcpl);	/* Process pendings. */
663 	mtmsr(msr);
664 
665 	return 0;
666 }
667 
668 void
669 pic_ext_intr(void)
670 {
671 
672 	KASSERT(pics[primary_pic] != NULL);
673 	pic_handle_intr(pics[primary_pic]);
674 
675 	return;
676 
677 }
678 
679 int
680 splraise(int ncpl)
681 {
682 	struct cpu_info *ci = curcpu();
683 	int ocpl;
684 
685 	__asm volatile("sync; eieio");	/* don't reorder.... */
686 
687 	ocpl = ci->ci_cpl;
688 	ci->ci_cpl = ocpl | ncpl;
689 	__asm volatile("sync; eieio");	/* reorder protect */
690 	return ocpl;
691 }
692 
693 void
694 splx(int ncpl)
695 {
696 	struct cpu_info *ci = curcpu();
697 
698 	__asm volatile("sync; eieio");	/* reorder protect */
699 	ci->ci_cpl = ncpl;
700 	if (ci->ci_ipending & ~ncpl)
701 		pic_do_pending_int();
702 	__asm volatile("sync; eieio");	/* reorder protect */
703 }
704 
705 int
706 spllower(int ncpl)
707 {
708 	struct cpu_info *ci = curcpu();
709 	int ocpl;
710 
711 	__asm volatile("sync; eieio");	/* reorder protect */
712 	ocpl = ci->ci_cpl;
713 	ci->ci_cpl = ncpl;
714 	if (ci->ci_ipending & ~ncpl)
715 		pic_do_pending_int();
716 	__asm volatile("sync; eieio");	/* reorder protect */
717 	return ocpl;
718 }
719 
720 /* Following code should be implemented with lwarx/stwcx to avoid
721  * the disable/enable. i need to read the manual once more.... */
722 void
723 softintr(int ipl)
724 {
725 	int msrsave;
726 
727 	msrsave = mfmsr();
728 	mtmsr(msrsave & ~PSL_EE);
729 	curcpu()->ci_ipending |= 1 << ipl;
730 	mtmsr(msrsave);
731 }
732 
733 void
734 genppc_cpu_configure(void)
735 {
736 	aprint_normal("biomask %x netmask %x ttymask %x\n",
737 	    imask[IPL_BIO] & 0x1fffffff,
738 	    imask[IPL_NET] & 0x1fffffff,
739 	    imask[IPL_TTY] & 0x1fffffff);
740 
741 	spl0();
742 }
743 
744 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
745 /*
746  * isa_intr_alloc needs to be done here, because it needs direct access to
747  * the various interrupt handler structures.
748  */
749 
750 int
751 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
752     int mask, int type, int *irq_p)
753 {
754 	int irq, vi;
755 	int maybe_irq = -1;
756 	int shared_depth = 0;
757 	struct intr_source *is;
758 
759 	if (pic == NULL)
760 		return 1;
761 
762 	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
763 	     mask >>= 1, irq++) {
764 		if ((mask & 1) == 0)
765 			continue;
766 		vi = virq[irq + pic->pic_intrbase];
767 		if (!vi) {
768 			*irq_p = irq;
769 			return 0;
770 		}
771 		is = &intrsources[vi];
772 		if (is->is_type == IST_NONE) {
773 			*irq_p = irq;
774 			return 0;
775 		}
776 		/* Level interrupts can be shared */
777 		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
778 			struct intrhand *ih = is->is_hand;
779 			int depth;
780 
781 			if (maybe_irq == -1) {
782 				maybe_irq = irq;
783 				continue;
784 			}
785 			for (depth = 0; ih != NULL; ih = ih->ih_next)
786 				depth++;
787 			if (depth < shared_depth) {
788 				maybe_irq = irq;
789 				shared_depth = depth;
790 			}
791 		}
792 	}
793 	if (maybe_irq != -1) {
794 		*irq_p = maybe_irq;
795 		return 0;
796 	}
797 	return 1;
798 }
799 #endif
800