xref: /netbsd-src/sys/arch/powerpc/pic/intr.c (revision d25ffa98a4bfca1fe272f3c182496ec9934faac7)
1 /*	$NetBSD: intr.c,v 1.15 2011/06/17 23:36:18 matt Exp $ */
2 
3 /*-
4  * Copyright (c) 2007 Michael Lorenz
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.15 2011/06/17 23:36:18 matt Exp $");
31 
32 #include "opt_multiprocessor.h"
33 
34 #define __INTR_PRIVATE
35 
36 #include <sys/param.h>
37 #include <sys/malloc.h>
38 #include <sys/kernel.h>
39 #include <sys/cpu.h>
40 
41 #include <arch/powerpc/pic/picvar.h>
42 #include "opt_pic.h"
43 #include "opt_interrupt.h"
44 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
45 #include <machine/isa_machdep.h>
46 #endif
47 
48 #ifdef MULTIPROCESSOR
49 #include <arch/powerpc/pic/ipivar.h>
50 #endif
51 
52 #ifdef __HAVE_FAST_SOFTINTS
53 #include <powerpc/softint.h>
54 #endif
55 
56 #define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
57 
58 #define	PIC_VIRQ_LEGAL_P(x)	((u_int)(x) < NVIRQ)
59 
60 struct pic_ops *pics[MAX_PICS];
61 int num_pics = 0;
62 int max_base = 0;
63 uint8_t	virq_map[NIRQ];
64 imask_t virq_mask = HWIRQ_MASK;
65 imask_t	imask[NIPL];
66 int	primary_pic = 0;
67 
68 static int	fakeintr(void *);
69 static int	mapirq(int);
70 static void	intr_calculatemasks(void);
71 static struct pic_ops *find_pic_by_hwirq(int);
72 
73 static struct intr_source intrsources[NVIRQ];
74 
75 void
76 pic_init(void)
77 {
78 	/* everything is in bss, no reason to zero it. */
79 }
80 
81 int
82 pic_add(struct pic_ops *pic)
83 {
84 
85 	if (num_pics >= MAX_PICS)
86 		return -1;
87 
88 	pics[num_pics] = pic;
89 	pic->pic_intrbase = max_base;
90 	max_base += pic->pic_numintrs;
91 	num_pics++;
92 
93 	return pic->pic_intrbase;
94 }
95 
96 void
97 pic_finish_setup(void)
98 {
99 	for (size_t i = 0; i < num_pics; i++) {
100 		struct pic_ops * const pic = pics[i];
101 		if (pic->pic_finish_setup != NULL)
102 			pic->pic_finish_setup(pic);
103 	}
104 }
105 
106 static struct pic_ops *
107 find_pic_by_hwirq(int hwirq)
108 {
109 	for (u_int base = 0; base < num_pics; base++) {
110 		struct pic_ops * const pic = pics[base];
111 		if (pic->pic_intrbase <= hwirq
112 		    && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
113 			return pic;
114 		}
115 	}
116 	return NULL;
117 }
118 
119 static int
120 fakeintr(void *arg)
121 {
122 
123 	return 0;
124 }
125 
126 /*
127  * Register an interrupt handler.
128  */
129 void *
130 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
131     void *ih_arg)
132 {
133 	struct intrhand **p, *q, *ih;
134 	struct pic_ops *pic;
135 	static struct intrhand fakehand;
136 	int maxipl = ipl;
137 
138 	if (maxipl == IPL_NONE)
139 		maxipl = IPL_HIGH;
140 
141 	if (hwirq >= max_base) {
142 		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
143 		    max_base - 1);
144 	}
145 
146 	pic = find_pic_by_hwirq(hwirq);
147 	if (pic == NULL) {
148 
149 		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
150 	}
151 
152 	const int virq = mapirq(hwirq);
153 
154 	/* no point in sleeping unless someone can free memory. */
155 	ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
156 	if (ih == NULL)
157 		panic("intr_establish: can't malloc handler info");
158 
159 	if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
160 		panic("intr_establish: bogus irq (%d) or type (%d)",
161 		    hwirq, type);
162 
163 	struct intr_source * const is = &intrsources[virq];
164 
165 	switch (is->is_type) {
166 	case IST_NONE:
167 		is->is_type = type;
168 		break;
169 	case IST_EDGE:
170 	case IST_LEVEL:
171 		if (type == is->is_type)
172 			break;
173 		/* FALLTHROUGH */
174 	case IST_PULSE:
175 		if (type != IST_NONE)
176 			panic("intr_establish: can't share %s with %s",
177 			    intr_typename(is->is_type),
178 			    intr_typename(type));
179 		break;
180 	}
181 	if (is->is_hand == NULL) {
182 		snprintf(is->is_source, sizeof(is->is_source), "irq %d",
183 		    is->is_hwirq);
184 		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
185 		    pic->pic_name, is->is_source);
186 	}
187 
188 	/*
189 	 * Figure out where to put the handler.
190 	 * This is O(N^2), but we want to preserve the order, and N is
191 	 * generally small.
192 	 */
193 	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
194 		maxipl = max(maxipl, q->ih_ipl);
195 	}
196 
197 	/*
198 	 * Actually install a fake handler momentarily, since we might be doing
199 	 * this with interrupts enabled and don't want the real routine called
200 	 * until masking is set up.
201 	 */
202 	fakehand.ih_ipl = ipl;
203 	fakehand.ih_fun = fakeintr;
204 	*p = &fakehand;
205 
206 	/*
207 	 * Poke the real handler in now.
208 	 */
209 	ih->ih_fun = ih_fun;
210 	ih->ih_arg = ih_arg;
211 	ih->ih_next = NULL;
212 	ih->ih_ipl = ipl;
213 	ih->ih_virq = virq;
214 	*p = ih;
215 
216 	if (pic->pic_establish_irq != NULL)
217 		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
218 		    is->is_type, maxipl);
219 
220 	/*
221 	 * Remember the highest IPL used by this handler.
222 	 */
223 	is->is_ipl = maxipl;
224 
225 	/*
226 	 * now that the handler is established we're actually ready to
227 	 * calculate the masks
228 	 */
229 	intr_calculatemasks();
230 
231 
232 	return ih;
233 }
234 
235 void
236 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
237 {
238 }
239 
240 /*
241  * Deregister an interrupt handler.
242  */
243 void
244 intr_disestablish(void *arg)
245 {
246 	struct intrhand * const ih = arg;
247 	const int virq = ih->ih_virq;
248 	struct intr_source * const is = &intrsources[virq];
249 	struct intrhand **p, **q;
250 	int maxipl = IPL_NONE;
251 
252 	if (!PIC_VIRQ_LEGAL_P(virq))
253 		panic("intr_disestablish: bogus virq %d", virq);
254 
255 	/*
256 	 * Remove the handler from the chain.
257 	 * This is O(n^2), too.
258 	 */
259 	for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
260 		struct intrhand * const tmp_ih = *p;
261 		if (tmp_ih == ih) {
262 			q = p;
263 		} else {
264 			maxipl = max(maxipl, tmp_ih->ih_ipl);
265 		}
266 	}
267 	if (q)
268 		*q = ih->ih_next;
269 	else
270 		panic("intr_disestablish: handler not registered");
271 	free((void *)ih, M_DEVBUF);
272 
273 	/*
274 	 * Reset the IPL for this source now that we've removed a handler.
275 	 */
276 	is->is_ipl = maxipl;
277 
278 	intr_calculatemasks();
279 
280 	if (is->is_hand == NULL) {
281 		is->is_type = IST_NONE;
282 		evcnt_detach(&is->is_ev);
283 		/*
284 		 * Make the virutal IRQ available again.
285 		 */
286 		virq_map[virq] = 0;
287 		virq_mask |= PIC_VIRQ_TO_MASK(virq);
288 	}
289 }
290 
291 /*
292  * Map max_base irqs into 32 (bits).
293  */
294 static int
295 mapirq(int hwirq)
296 {
297 	struct pic_ops *pic;
298 
299 	if (hwirq >= max_base)
300 		panic("invalid irq %d", hwirq);
301 
302 	if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
303 		panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
304 
305 	if (virq_map[hwirq])
306 		return virq_map[hwirq];
307 
308 	if (virq_mask == 0)
309 		panic("virq overflow");
310 
311 	const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
312 	struct intr_source * const is = intrsources + virq;
313 
314 	virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
315 
316 	is->is_hwirq = hwirq;
317 	is->is_pic = pic;
318 	virq_map[hwirq] = virq;
319 #ifdef PIC_DEBUG
320 	printf("mapping hwirq %d to virq %d\n", irq, virq);
321 #endif
322 	return virq;
323 }
324 
325 static const char * const intr_typenames[] = {
326    [IST_NONE]  = "none",
327    [IST_PULSE] = "pulsed",
328    [IST_EDGE]  = "edge-triggered",
329    [IST_LEVEL] = "level-triggered",
330 };
331 
332 const char *
333 intr_typename(int type)
334 {
335 	KASSERT((unsigned int) type < __arraycount(intr_typenames));
336 	KASSERT(intr_typenames[type] != NULL);
337 	return intr_typenames[type];
338 }
339 
340 /*
341  * Recalculate the interrupt masks from scratch.
342  * We could code special registry and deregistry versions of this function that
343  * would be faster, but the code would be nastier, and we don't expect this to
344  * happen very much anyway.
345  */
346 static void
347 intr_calculatemasks(void)
348 {
349 	imask_t newmask[NIPL] = { [IPL_NONE...IPL_HIGH] = 0 };
350 	struct intr_source *is;
351 	int irq;
352 
353 	for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
354 		newmask[ipl] = 0;
355 	}
356 
357 	/* First, figure out which ipl each IRQ uses. */
358 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
359 		newmask[is->is_ipl] |= PIC_VIRQ_TO_MASK(irq);
360 	}
361 
362 	/*
363 	 * IPL_NONE is used for hardware interrupts that are never blocked,
364 	 * and do not block anything else.
365 	 */
366 	newmask[IPL_NONE] = 0;
367 
368 	/*
369 	 * strict hierarchy - all IPLs block everything blocked by any lower
370 	 * IPL
371 	 */
372 	for (u_int ipl = 1; ipl < NIPL; ipl++) {
373 		newmask[ipl] |= newmask[ipl - 1];
374 	}
375 
376 #ifdef DEBUG_IPL
377 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
378 		printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
379 	}
380 #endif
381 
382 	/*
383 	 * Disable all interrupts.
384 	 */
385 	for (u_int base = 0; base < num_pics; base++) {
386 		struct pic_ops * const pic = pics[base];
387 		for (u_int i = 0; i < pic->pic_numintrs; i++) {
388 			pic->pic_disable_irq(pic, i);
389 		}
390 	}
391 
392 	/*
393 	 * Now that all interrupts are disabled, update the ipl masks.
394 	 */
395 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
396 		imask[ipl] = newmask[ipl];
397 	}
398 
399 	/*
400 	 * Lastly, enable IRQs actually in use.
401 	 */
402 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
403 		if (is->is_hand)
404 			pic_enable_irq(is->is_hwirq);
405 	}
406 }
407 
408 void
409 pic_enable_irq(int hwirq)
410 {
411 	struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
412 	if (pic == NULL)
413 		panic("%s: bogus IRQ %d", __func__, hwirq);
414 	const int type = intrsources[virq_map[hwirq]].is_type;
415 	(*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
416 }
417 
418 void
419 pic_mark_pending(int hwirq)
420 {
421 	struct cpu_info * const ci = curcpu();
422 
423 	const int virq = virq_map[hwirq];
424 	if (virq == 0)
425 		printf("IRQ %d maps to 0\n", hwirq);
426 
427 	const register_t msr = mfmsr();
428 	mtmsr(msr & ~PSL_EE);
429 	ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
430 	mtmsr(msr);
431 }
432 
433 static void
434 intr_deliver(struct intr_source *is, int virq)
435 {
436 	bool locked = false;
437 	for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
438 		KASSERTMSG(ih->ih_fun != NULL,
439 		    ("%s: irq %d, hwirq %d, is %p ih %p: "
440 		     "NULL interrupt handler!\n", __func__,
441 		     virq, is->is_hwirq, is, ih));
442 		if (ih->ih_ipl == IPL_VM) {
443 			if (!locked) {
444 				KERNEL_LOCK(1, NULL);
445 				locked = true;
446 			}
447 		} else if (locked) {
448 			KERNEL_UNLOCK_ONE(NULL);
449 			locked = false;
450 		}
451 		(*ih->ih_fun)(ih->ih_arg);
452 	}
453 	if (locked) {
454 		KERNEL_UNLOCK_ONE(NULL);
455 	}
456 	is->is_ev.ev_count++;
457 }
458 
459 void
460 pic_do_pending_int(void)
461 {
462 	struct cpu_info * const ci = curcpu();
463 	imask_t vpend;
464 
465 	if (ci->ci_iactive)
466 		return;
467 
468 	ci->ci_iactive = 1;
469 
470 	const register_t emsr = mfmsr();
471 	const register_t dmsr = emsr & ~PSL_EE;
472 
473 	KASSERT(emsr & PSL_EE);
474 	mtmsr(dmsr);
475 
476 	const int pcpl = ci->ci_cpl;
477 #ifdef __HAVE_FAST_SOFTINTS
478 again:
479 #endif
480 
481 	/* Do now unmasked pendings */
482 	while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
483 		ci->ci_idepth++;
484 		KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
485 
486 		/* Get most significant pending bit */
487 		const int virq = PIC_VIRQ_MS_PENDING(vpend);
488 		ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
489 
490 		struct intr_source * const is = &intrsources[virq];
491 		struct pic_ops * const pic = is->is_pic;
492 
493 		splraise(is->is_ipl);
494 		mtmsr(emsr);
495 		intr_deliver(is, virq);
496 		mtmsr(dmsr);
497 		ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
498 
499 		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
500 		    is->is_type);
501 		ci->ci_idepth--;
502 	}
503 
504 #ifdef __HAVE_FAST_SOFTINTS
505 	const u_int softints = (ci->ci_data.cpu_softints << pcpl) & IPL_SOFTMASK;
506 
507 	if (__predict_false(softints != 0)) {
508 		ci->ci_cpl = IPL_HIGH;
509 		mtmsr(emsr);
510 		powerpc_softint(ci, pcpl,
511 		    (vaddr_t)__builtin_return_address(0));
512 		mtmsr(dmsr);
513 		ci->ci_cpl = pcpl;
514 		if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
515 			goto again;
516 	}
517 #endif
518 
519 	ci->ci_iactive = 0;
520 	mtmsr(emsr);
521 }
522 
523 int
524 pic_handle_intr(void *cookie)
525 {
526 	struct pic_ops *pic = cookie;
527 	struct cpu_info *ci = curcpu();
528 	int picirq;
529 
530 	picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
531 	if (picirq == 255)
532 		return 0;
533 
534 	const register_t msr = mfmsr();
535 	const int pcpl = ci->ci_cpl;
536 
537 	do {
538 #ifdef MULTIPROCESSOR
539 		/* THIS IS WRONG XXX */
540 		if (picirq == ipiops.ppc_ipi_vector) {
541 			ci->ci_cpl = IPL_HIGH;
542 			ipi_intr(NULL);
543 			ci->ci_cpl = pcpl;
544 			pic->pic_ack_irq(pic, picirq);
545 			continue;
546 		}
547 #endif
548 
549 		const int virq = virq_map[picirq + pic->pic_intrbase];
550 		KASSERT(virq != 0);
551 		KASSERT(picirq < pic->pic_numintrs);
552 		imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
553 		struct intr_source * const is = &intrsources[virq];
554 
555 		if ((imask[pcpl] & v_imen) != 0) {
556 			ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
557 			pic->pic_disable_irq(pic, picirq);
558 		} else {
559 			/* this interrupt is no longer pending */
560 			ci->ci_ipending &= ~v_imen;
561 			ci->ci_idepth++;
562 
563 			splraise(is->is_ipl);
564 			mtmsr(msr | PSL_EE);
565 			intr_deliver(is, virq);
566 			mtmsr(msr);
567 			ci->ci_cpl = pcpl;
568 
569 			ci->ci_data.cpu_nintr++;
570 			ci->ci_idepth--;
571 		}
572 		pic->pic_ack_irq(pic, picirq);
573 	} while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
574 
575 	mtmsr(msr | PSL_EE);
576 	splx(pcpl);	/* Process pendings. */
577 	mtmsr(msr);
578 
579 	return 0;
580 }
581 
582 void
583 pic_ext_intr(void)
584 {
585 
586 	KASSERT(pics[primary_pic] != NULL);
587 	pic_handle_intr(pics[primary_pic]);
588 
589 	return;
590 
591 }
592 
593 int
594 splraise(int ncpl)
595 {
596 	struct cpu_info *ci = curcpu();
597 	int ocpl;
598 
599 	if (ncpl == ci->ci_cpl) return ncpl;
600 	__asm volatile("sync; eieio");	/* don't reorder.... */
601 	ocpl = ci->ci_cpl;
602 	KASSERT(ncpl < NIPL);
603 	ci->ci_cpl = max(ncpl, ocpl);
604 	__asm volatile("sync; eieio");	/* reorder protect */
605 	__insn_barrier();
606 	return ocpl;
607 }
608 
609 static inline bool
610 have_pending_intr_p(struct cpu_info *ci, int ncpl)
611 {
612 	if (ci->ci_ipending & ~imask[ncpl])
613 		return true;
614 #ifdef __HAVE_FAST_SOFTINTS
615 	if ((ci->ci_data.cpu_softints << ncpl) & IPL_SOFTMASK)
616 		return true;
617 #endif
618 	return false;
619 }
620 
621 void
622 splx(int ncpl)
623 {
624 	struct cpu_info *ci = curcpu();
625 
626 	__insn_barrier();
627 	__asm volatile("sync; eieio");	/* reorder protect */
628 	ci->ci_cpl = ncpl;
629 	if (have_pending_intr_p(ci, ncpl))
630 		pic_do_pending_int();
631 
632 	__asm volatile("sync; eieio");	/* reorder protect */
633 }
634 
635 int
636 spllower(int ncpl)
637 {
638 	struct cpu_info *ci = curcpu();
639 	int ocpl;
640 
641 	__insn_barrier();
642 	__asm volatile("sync; eieio");	/* reorder protect */
643 	ocpl = ci->ci_cpl;
644 	ci->ci_cpl = ncpl;
645 	if (have_pending_intr_p(ci, ncpl))
646 		pic_do_pending_int();
647 	__asm volatile("sync; eieio");	/* reorder protect */
648 	return ocpl;
649 }
650 
651 void
652 genppc_cpu_configure(void)
653 {
654 	aprint_normal("biomask %x netmask %x ttymask %x\n",
655 	    (u_int)imask[IPL_BIO] & 0x1fffffff,
656 	    (u_int)imask[IPL_NET] & 0x1fffffff,
657 	    (u_int)imask[IPL_TTY] & 0x1fffffff);
658 
659 	spl0();
660 }
661 
662 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
663 /*
664  * isa_intr_alloc needs to be done here, because it needs direct access to
665  * the various interrupt handler structures.
666  */
667 
668 int
669 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
670     int mask, int type, int *irq_p)
671 {
672 	int irq, vi;
673 	int maybe_irq = -1;
674 	int shared_depth = 0;
675 	struct intr_source *is;
676 
677 	if (pic == NULL)
678 		return 1;
679 
680 	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
681 	     mask >>= 1, irq++) {
682 		if ((mask & 1) == 0)
683 			continue;
684 		vi = virq_map[irq + pic->pic_intrbase];
685 		if (!vi) {
686 			*irq_p = irq;
687 			return 0;
688 		}
689 		is = &intrsources[vi];
690 		if (is->is_type == IST_NONE) {
691 			*irq_p = irq;
692 			return 0;
693 		}
694 		/* Level interrupts can be shared */
695 		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
696 			struct intrhand *ih = is->is_hand;
697 			int depth;
698 
699 			if (maybe_irq == -1) {
700 				maybe_irq = irq;
701 				continue;
702 			}
703 			for (depth = 0; ih != NULL; ih = ih->ih_next)
704 				depth++;
705 			if (depth < shared_depth) {
706 				maybe_irq = irq;
707 				shared_depth = depth;
708 			}
709 		}
710 	}
711 	if (maybe_irq != -1) {
712 		*irq_p = maybe_irq;
713 		return 0;
714 	}
715 	return 1;
716 }
717 #endif
718