xref: /netbsd-src/sys/arch/powerpc/pic/intr.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /*	$NetBSD: intr.c,v 1.24 2016/05/26 17:38:05 macallan Exp $ */
2 
3 /*-
4  * Copyright (c) 2007 Michael Lorenz
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.24 2016/05/26 17:38:05 macallan Exp $");
31 
32 #include "opt_interrupt.h"
33 #include "opt_multiprocessor.h"
34 #include "opt_pic.h"
35 
36 #define __INTR_PRIVATE
37 
38 #include <sys/param.h>
39 #include <sys/cpu.h>
40 #include <sys/kernel.h>
41 #include <sys/kmem.h>
42 
43 #include <powerpc/psl.h>
44 #include <powerpc/pic/picvar.h>
45 
46 #if defined(PIC_I8259) || defined (PIC_PREPIVR)
47 #include <machine/isa_machdep.h>
48 #endif
49 
50 #ifdef MULTIPROCESSOR
51 #include <powerpc/pic/ipivar.h>
52 #endif
53 
54 #ifdef __HAVE_FAST_SOFTINTS
55 #include <powerpc/softint.h>
56 #endif
57 
58 #define MAX_PICS	8	/* 8 PICs ought to be enough for everyone */
59 
60 #define	PIC_VIRQ_LEGAL_P(x)	((u_int)(x) < NVIRQ)
61 
62 struct pic_ops *pics[MAX_PICS];
63 int num_pics = 0;
64 int max_base = 0;
65 uint8_t	virq_map[NIRQ];
66 imask_t virq_mask = HWIRQ_MASK;
67 imask_t	imask[NIPL];
68 int	primary_pic = 0;
69 
70 static int	fakeintr(void *);
71 static int	mapirq(int);
72 static void	intr_calculatemasks(void);
73 static struct pic_ops *find_pic_by_hwirq(int);
74 
75 static struct intr_source intrsources[NVIRQ];
76 
77 void
78 pic_init(void)
79 {
80 	/* everything is in bss, no reason to zero it. */
81 }
82 
83 int
84 pic_add(struct pic_ops *pic)
85 {
86 
87 	if (num_pics >= MAX_PICS)
88 		return -1;
89 
90 	pics[num_pics] = pic;
91 	pic->pic_intrbase = max_base;
92 	max_base += pic->pic_numintrs;
93 	num_pics++;
94 
95 	return pic->pic_intrbase;
96 }
97 
98 void
99 pic_finish_setup(void)
100 {
101 	for (size_t i = 0; i < num_pics; i++) {
102 		struct pic_ops * const pic = pics[i];
103 		if (pic->pic_finish_setup != NULL)
104 			pic->pic_finish_setup(pic);
105 	}
106 }
107 
108 static struct pic_ops *
109 find_pic_by_hwirq(int hwirq)
110 {
111 	for (u_int base = 0; base < num_pics; base++) {
112 		struct pic_ops * const pic = pics[base];
113 		if (pic->pic_intrbase <= hwirq
114 		    && hwirq < pic->pic_intrbase + pic->pic_numintrs) {
115 			return pic;
116 		}
117 	}
118 	return NULL;
119 }
120 
121 static int
122 fakeintr(void *arg)
123 {
124 
125 	return 0;
126 }
127 
128 /*
129  * Register an interrupt handler.
130  */
131 void *
132 intr_establish(int hwirq, int type, int ipl, int (*ih_fun)(void *),
133     void *ih_arg)
134 {
135 	struct intrhand **p, *q, *ih;
136 	struct pic_ops *pic;
137 	static struct intrhand fakehand;
138 	int maxipl = ipl;
139 
140 	if (maxipl == IPL_NONE)
141 		maxipl = IPL_HIGH;
142 
143 	if (hwirq >= max_base) {
144 		panic("%s: bogus IRQ %d, max is %d", __func__, hwirq,
145 		    max_base - 1);
146 	}
147 
148 	pic = find_pic_by_hwirq(hwirq);
149 	if (pic == NULL) {
150 
151 		panic("%s: cannot find a pic for IRQ %d", __func__, hwirq);
152 	}
153 
154 	const int virq = mapirq(hwirq);
155 
156 	/* no point in sleeping unless someone can free memory. */
157 	ih = kmem_intr_alloc(sizeof(*ih), cold ? KM_NOSLEEP : KM_SLEEP);
158 	if (ih == NULL)
159 		panic("intr_establish: can't allocate handler info");
160 
161 	if (!PIC_VIRQ_LEGAL_P(virq) || type == IST_NONE)
162 		panic("intr_establish: bogus irq (%d) or type (%d)",
163 		    hwirq, type);
164 
165 	struct intr_source * const is = &intrsources[virq];
166 
167 	switch (is->is_type) {
168 	case IST_NONE:
169 		is->is_type = type;
170 		break;
171 	case IST_EDGE_FALLING:
172 	case IST_EDGE_RISING:
173 	case IST_LEVEL_LOW:
174 	case IST_LEVEL_HIGH:
175 		if (type == is->is_type)
176 			break;
177 		/* FALLTHROUGH */
178 	case IST_PULSE:
179 		if (type != IST_NONE)
180 			panic("intr_establish: can't share %s with %s",
181 			    intr_typename(is->is_type),
182 			    intr_typename(type));
183 		break;
184 	}
185 	if (is->is_hand == NULL) {
186 		snprintf(is->is_source, sizeof(is->is_source), "irq %d",
187 		    is->is_hwirq);
188 		evcnt_attach_dynamic(&is->is_ev, EVCNT_TYPE_INTR, NULL,
189 		    pic->pic_name, is->is_source);
190 	}
191 
192 	/*
193 	 * Figure out where to put the handler.
194 	 * This is O(N^2), but we want to preserve the order, and N is
195 	 * generally small.
196 	 */
197 	for (p = &is->is_hand; (q = *p) != NULL; p = &q->ih_next) {
198 		maxipl = max(maxipl, q->ih_ipl);
199 	}
200 
201 	/*
202 	 * Actually install a fake handler momentarily, since we might be doing
203 	 * this with interrupts enabled and don't want the real routine called
204 	 * until masking is set up.
205 	 */
206 	fakehand.ih_ipl = ipl;
207 	fakehand.ih_fun = fakeintr;
208 	*p = &fakehand;
209 
210 	/*
211 	 * Poke the real handler in now.
212 	 */
213 	ih->ih_fun = ih_fun;
214 	ih->ih_arg = ih_arg;
215 	ih->ih_next = NULL;
216 	ih->ih_ipl = ipl;
217 	ih->ih_virq = virq;
218 	*p = ih;
219 
220 	if (pic->pic_establish_irq != NULL)
221 		pic->pic_establish_irq(pic, hwirq - pic->pic_intrbase,
222 		    is->is_type, maxipl);
223 
224 	/*
225 	 * Remember the highest IPL used by this handler.
226 	 */
227 	is->is_ipl = maxipl;
228 
229 	/*
230 	 * now that the handler is established we're actually ready to
231 	 * calculate the masks
232 	 */
233 	intr_calculatemasks();
234 
235 
236 	return ih;
237 }
238 
239 void
240 dummy_pic_establish_intr(struct pic_ops *pic, int irq, int type, int pri)
241 {
242 }
243 
244 /*
245  * Deregister an interrupt handler.
246  */
247 void
248 intr_disestablish(void *arg)
249 {
250 	struct intrhand * const ih = arg;
251 	const int virq = ih->ih_virq;
252 	struct intr_source * const is = &intrsources[virq];
253 	struct intrhand **p, **q;
254 	int maxipl = IPL_NONE;
255 
256 	if (!PIC_VIRQ_LEGAL_P(virq))
257 		panic("intr_disestablish: bogus virq %d", virq);
258 
259 	/*
260 	 * Remove the handler from the chain.
261 	 * This is O(n^2), too.
262 	 */
263 	for (p = &is->is_hand, q = NULL; (*p) != NULL; p = &(*p)->ih_next) {
264 		struct intrhand * const tmp_ih = *p;
265 		if (tmp_ih == ih) {
266 			q = p;
267 		} else {
268 			maxipl = max(maxipl, tmp_ih->ih_ipl);
269 		}
270 	}
271 	if (q)
272 		*q = ih->ih_next;
273 	else
274 		panic("intr_disestablish: handler not registered");
275 	kmem_intr_free((void *)ih, sizeof(*ih));
276 
277 	/*
278 	 * Reset the IPL for this source now that we've removed a handler.
279 	 */
280 	is->is_ipl = maxipl;
281 
282 	intr_calculatemasks();
283 
284 	if (is->is_hand == NULL) {
285 		is->is_type = IST_NONE;
286 		evcnt_detach(&is->is_ev);
287 		/*
288 		 * Make the virutal IRQ available again.
289 		 */
290 		virq_map[virq] = 0;
291 		virq_mask |= PIC_VIRQ_TO_MASK(virq);
292 	}
293 }
294 
295 /*
296  * Map max_base irqs into 32 (bits).
297  */
298 static int
299 mapirq(int hwirq)
300 {
301 	struct pic_ops *pic;
302 
303 	if (hwirq >= max_base)
304 		panic("invalid irq %d", hwirq);
305 
306 	if ((pic = find_pic_by_hwirq(hwirq)) == NULL)
307 		panic("%s: cannot find PIC for HWIRQ %d", __func__, hwirq);
308 
309 	if (virq_map[hwirq])
310 		return virq_map[hwirq];
311 
312 	if (virq_mask == 0)
313 		panic("virq overflow");
314 
315 	const int virq = PIC_VIRQ_MS_PENDING(virq_mask);
316 	struct intr_source * const is = intrsources + virq;
317 
318 	virq_mask &= ~PIC_VIRQ_TO_MASK(virq);
319 
320 	is->is_hwirq = hwirq;
321 	is->is_pic = pic;
322 	virq_map[hwirq] = virq;
323 #ifdef PIC_DEBUG
324 	printf("mapping hwirq %d to virq %d\n", hwirq, virq);
325 #endif
326 	return virq;
327 }
328 
329 static const char * const intr_typenames[] = {
330    [IST_NONE]  = "none",
331    [IST_PULSE] = "pulsed",
332    [IST_EDGE_FALLING]  = "falling edge triggered",
333    [IST_EDGE_RISING]  = "rising edge triggered",
334    [IST_LEVEL_LOW] = "low level triggered",
335    [IST_LEVEL_HIGH] = "high level triggered",
336 };
337 
338 const char *
339 intr_typename(int type)
340 {
341 	KASSERT((unsigned int) type < __arraycount(intr_typenames));
342 	KASSERT(intr_typenames[type] != NULL);
343 	return intr_typenames[type];
344 }
345 
346 /*
347  * Recalculate the interrupt masks from scratch.
348  * We could code special registry and deregistry versions of this function that
349  * would be faster, but the code would be nastier, and we don't expect this to
350  * happen very much anyway.
351  */
352 static void
353 intr_calculatemasks(void)
354 {
355 	imask_t newmask[NIPL];
356 	struct intr_source *is;
357 	struct intrhand *ih;
358 	int irq;
359 
360 	for (u_int ipl = IPL_NONE; ipl < NIPL; ipl++) {
361 		newmask[ipl] = 0;
362 	}
363 
364 	/* First, figure out which ipl each IRQ uses. */
365 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
366 		for (ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
367 			newmask[ih->ih_ipl] |= PIC_VIRQ_TO_MASK(irq);
368 		}
369 	}
370 
371 	/*
372 	 * IPL_NONE is used for hardware interrupts that are never blocked,
373 	 * and do not block anything else.
374 	 */
375 	newmask[IPL_NONE] = 0;
376 
377 	/*
378 	 * strict hierarchy - all IPLs block everything blocked by any lower
379 	 * IPL
380 	 */
381 	for (u_int ipl = 1; ipl < NIPL; ipl++) {
382 		newmask[ipl] |= newmask[ipl - 1];
383 	}
384 
385 #ifdef PIC_DEBUG
386 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
387 		printf("%u: %08x -> %08x\n", ipl, imask[ipl], newmask[ipl]);
388 	}
389 #endif
390 
391 	/*
392 	 * Disable all interrupts.
393 	 */
394 	for (u_int base = 0; base < num_pics; base++) {
395 		struct pic_ops * const pic = pics[base];
396 		for (u_int i = 0; i < pic->pic_numintrs; i++) {
397 			pic->pic_disable_irq(pic, i);
398 		}
399 	}
400 
401 	/*
402 	 * Now that all interrupts are disabled, update the ipl masks.
403 	 */
404 	for (u_int ipl = 0; ipl < NIPL; ipl++) {
405 		imask[ipl] = newmask[ipl];
406 	}
407 
408 	/*
409 	 * Lastly, enable IRQs actually in use.
410 	 */
411 	for (irq = 0, is = intrsources; irq < NVIRQ; irq++, is++) {
412 		if (is->is_hand)
413 			pic_enable_irq(is->is_hwirq);
414 	}
415 }
416 
417 void
418 pic_enable_irq(int hwirq)
419 {
420 	struct pic_ops * const pic = find_pic_by_hwirq(hwirq);
421 	if (pic == NULL)
422 		panic("%s: bogus IRQ %d", __func__, hwirq);
423 	const int type = intrsources[virq_map[hwirq]].is_type;
424 	(*pic->pic_enable_irq)(pic, hwirq - pic->pic_intrbase, type);
425 }
426 
427 void
428 pic_mark_pending(int hwirq)
429 {
430 	struct cpu_info * const ci = curcpu();
431 
432 	const int virq = virq_map[hwirq];
433 	if (virq == 0)
434 		printf("IRQ %d maps to 0\n", hwirq);
435 
436 	const register_t msr = mfmsr();
437 	mtmsr(msr & ~PSL_EE);
438 	ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq);
439 	mtmsr(msr);
440 }
441 
442 static void
443 intr_deliver(struct intr_source *is, int virq)
444 {
445 	bool locked = false;
446 	for (struct intrhand *ih = is->is_hand; ih != NULL; ih = ih->ih_next) {
447 		KASSERTMSG(ih->ih_fun != NULL,
448 		    "%s: irq %d, hwirq %d, is %p ih %p: "
449 		     "NULL interrupt handler!\n", __func__,
450 		     virq, is->is_hwirq, is, ih);
451 		if (ih->ih_ipl == IPL_VM) {
452 			if (!locked) {
453 				KERNEL_LOCK(1, NULL);
454 				locked = true;
455 			}
456 		} else if (locked) {
457 			KERNEL_UNLOCK_ONE(NULL);
458 			locked = false;
459 		}
460 		(*ih->ih_fun)(ih->ih_arg);
461 	}
462 	if (locked) {
463 		KERNEL_UNLOCK_ONE(NULL);
464 	}
465 	is->is_ev.ev_count++;
466 }
467 
468 void
469 pic_do_pending_int(void)
470 {
471 	struct cpu_info * const ci = curcpu();
472 	imask_t vpend;
473 
474 	if (ci->ci_iactive)
475 		return;
476 
477 	ci->ci_iactive = 1;
478 
479 	const register_t emsr = mfmsr();
480 	const register_t dmsr = emsr & ~PSL_EE;
481 
482 	KASSERT(emsr & PSL_EE);
483 	mtmsr(dmsr);
484 
485 	const int pcpl = ci->ci_cpl;
486 #ifdef __HAVE_FAST_SOFTINTS
487 again:
488 #endif
489 
490 	/* Do now unmasked pendings */
491 	while ((vpend = (ci->ci_ipending & ~imask[pcpl])) != 0) {
492 		ci->ci_idepth++;
493 		KASSERT((PIC_VIRQ_TO_MASK(0) & ci->ci_ipending) == 0);
494 
495 		/* Get most significant pending bit */
496 		const int virq = PIC_VIRQ_MS_PENDING(vpend);
497 		ci->ci_ipending &= ~PIC_VIRQ_TO_MASK(virq);
498 
499 		struct intr_source * const is = &intrsources[virq];
500 		struct pic_ops * const pic = is->is_pic;
501 
502 		splraise(is->is_ipl);
503 		mtmsr(emsr);
504 		intr_deliver(is, virq);
505 		mtmsr(dmsr);
506 		ci->ci_cpl = pcpl; /* Don't use splx... we are here already! */
507 
508 		pic->pic_reenable_irq(pic, is->is_hwirq - pic->pic_intrbase,
509 		    is->is_type);
510 		ci->ci_idepth--;
511 	}
512 
513 #ifdef __HAVE_FAST_SOFTINTS
514 	const u_int softints = ci->ci_data.cpu_softints &
515 				 (IPL_SOFTMASK << pcpl);
516 
517 	/* make sure there are no bits to screw with the line above */
518 	KASSERT((ci->ci_data.cpu_softints & ~IPL_SOFTMASK) == 0);
519 
520 	if (__predict_false(softints != 0)) {
521 		ci->ci_cpl = IPL_HIGH;
522 		mtmsr(emsr);
523 		powerpc_softint(ci, pcpl,
524 		    (vaddr_t)__builtin_return_address(0));
525 		mtmsr(dmsr);
526 		ci->ci_cpl = pcpl;
527 		if (__predict_false(ci->ci_ipending & ~imask[pcpl]))
528 			goto again;
529 	}
530 #endif
531 
532 	ci->ci_iactive = 0;
533 	mtmsr(emsr);
534 }
535 
536 int
537 pic_handle_intr(void *cookie)
538 {
539 	struct pic_ops *pic = cookie;
540 	struct cpu_info *ci = curcpu();
541 	int picirq;
542 
543 	picirq = pic->pic_get_irq(pic, PIC_GET_IRQ);
544 	if (picirq == 255)
545 		return 0;
546 
547 	const register_t msr = mfmsr();
548 	const int pcpl = ci->ci_cpl;
549 
550 	do {
551 		const int virq = virq_map[picirq + pic->pic_intrbase];
552 
553 		KASSERT(virq != 0);
554 		KASSERT(picirq < pic->pic_numintrs);
555 		imask_t v_imen = PIC_VIRQ_TO_MASK(virq);
556 		struct intr_source * const is = &intrsources[virq];
557 
558 		if ((imask[pcpl] & v_imen) != 0) {
559 			ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */
560 			pic->pic_disable_irq(pic, picirq);
561 		} else {
562 			/* this interrupt is no longer pending */
563 			ci->ci_ipending &= ~v_imen;
564 			ci->ci_idepth++;
565 
566 			splraise(is->is_ipl);
567 			mtmsr(msr | PSL_EE);
568 			intr_deliver(is, virq);
569 			mtmsr(msr);
570 			ci->ci_cpl = pcpl;
571 
572 			ci->ci_data.cpu_nintr++;
573 			ci->ci_idepth--;
574 		}
575 		pic->pic_ack_irq(pic, picirq);
576 	} while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255);
577 
578 	mtmsr(msr | PSL_EE);
579 	splx(pcpl);	/* Process pendings. */
580 	mtmsr(msr);
581 
582 	return 0;
583 }
584 
585 void
586 pic_ext_intr(void)
587 {
588 
589 	KASSERT(pics[primary_pic] != NULL);
590 	pic_handle_intr(pics[primary_pic]);
591 
592 	return;
593 
594 }
595 
596 int
597 splraise(int ncpl)
598 {
599 	struct cpu_info *ci = curcpu();
600 	int ocpl;
601 
602 	if (ncpl == ci->ci_cpl) return ncpl;
603 	__asm volatile("sync; eieio");	/* don't reorder.... */
604 	ocpl = ci->ci_cpl;
605 	KASSERT(ncpl < NIPL);
606 	ci->ci_cpl = max(ncpl, ocpl);
607 	__asm volatile("sync; eieio");	/* reorder protect */
608 	__insn_barrier();
609 	return ocpl;
610 }
611 
612 static inline bool
613 have_pending_intr_p(struct cpu_info *ci, int ncpl)
614 {
615 	if (ci->ci_ipending & ~imask[ncpl])
616 		return true;
617 #ifdef __HAVE_FAST_SOFTINTS
618 	if (ci->ci_data.cpu_softints & (IPL_SOFTMASK << ncpl))
619 		return true;
620 #endif
621 	return false;
622 }
623 
624 void
625 splx(int ncpl)
626 {
627 	struct cpu_info *ci = curcpu();
628 
629 	__insn_barrier();
630 	__asm volatile("sync; eieio");	/* reorder protect */
631 	ci->ci_cpl = ncpl;
632 	if (have_pending_intr_p(ci, ncpl))
633 		pic_do_pending_int();
634 
635 	__asm volatile("sync; eieio");	/* reorder protect */
636 }
637 
638 int
639 spllower(int ncpl)
640 {
641 	struct cpu_info *ci = curcpu();
642 	int ocpl;
643 
644 	__insn_barrier();
645 	__asm volatile("sync; eieio");	/* reorder protect */
646 	ocpl = ci->ci_cpl;
647 	ci->ci_cpl = ncpl;
648 	if (have_pending_intr_p(ci, ncpl))
649 		pic_do_pending_int();
650 	__asm volatile("sync; eieio");	/* reorder protect */
651 	return ocpl;
652 }
653 
654 void
655 genppc_cpu_configure(void)
656 {
657 	aprint_normal("vmmask %x schedmask %x highmask %x\n",
658 	    (u_int)imask[IPL_VM] & 0x7fffffff,
659 	    (u_int)imask[IPL_SCHED] & 0x7fffffff,
660 	    (u_int)imask[IPL_HIGH] & 0x7fffffff);
661 
662 	spl0();
663 }
664 
665 #if defined(PIC_PREPIVR) || defined(PIC_I8259)
666 /*
667  * isa_intr_alloc needs to be done here, because it needs direct access to
668  * the various interrupt handler structures.
669  */
670 
671 int
672 genppc_isa_intr_alloc(isa_chipset_tag_t ic, struct pic_ops *pic,
673     int mask, int type, int *irq_p)
674 {
675 	int irq, vi;
676 	int maybe_irq = -1;
677 	int shared_depth = 0;
678 	struct intr_source *is;
679 
680 	if (pic == NULL)
681 		return 1;
682 
683 	for (irq = 0; (mask != 0 && irq < pic->pic_numintrs);
684 	     mask >>= 1, irq++) {
685 		if ((mask & 1) == 0)
686 			continue;
687 		vi = virq_map[irq + pic->pic_intrbase];
688 		if (!vi) {
689 			*irq_p = irq;
690 			return 0;
691 		}
692 		is = &intrsources[vi];
693 		if (is->is_type == IST_NONE) {
694 			*irq_p = irq;
695 			return 0;
696 		}
697 		/* Level interrupts can be shared */
698 		if (type == IST_LEVEL && is->is_type == IST_LEVEL) {
699 			struct intrhand *ih = is->is_hand;
700 			int depth;
701 
702 			if (maybe_irq == -1) {
703 				maybe_irq = irq;
704 				continue;
705 			}
706 			for (depth = 0; ih != NULL; ih = ih->ih_next)
707 				depth++;
708 			if (depth < shared_depth) {
709 				maybe_irq = irq;
710 				shared_depth = depth;
711 			}
712 		}
713 	}
714 	if (maybe_irq != -1) {
715 		*irq_p = maybe_irq;
716 		return 0;
717 	}
718 	return 1;
719 }
720 #endif
721