xref: /netbsd-src/sys/arch/hppa/hppa/intr.c (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: intr.c,v 1.3 2019/05/04 13:04:36 skrll Exp $	*/
2 /*	$OpenBSD: intr.c,v 1.27 2009/12/31 12:52:35 jsing Exp $	*/
3 
4 /*
5  * Copyright (c) 2002 The NetBSD Foundation, Inc.
6  * All rights reserved.
7  *
8  * This code is derived from software contributed to The NetBSD Foundation
9  * by Matthew Fredette.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * Interrupt handling for NetBSD/hppa.
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.3 2019/05/04 13:04:36 skrll Exp $");
39 
40 #define __MUTEX_PRIVATE
41 
42 #include <sys/param.h>
43 #include <sys/malloc.h>
44 #include <sys/cpu.h>
45 
46 #include <uvm/uvm_extern.h>
47 
48 #include <machine/autoconf.h>
49 #include <machine/cpufunc.h>
50 #include <machine/intr.h>
51 #include <machine/reg.h>
52 
53 #include <hppa/hppa/machdep.h>
54 
55 #include <machine/mutex.h>
56 
57 #if defined(_KERNEL_OPT)
58 #include "opt_lockdebug.h"
59 #endif
60 
61 static int hppa_intr_ipl_next(struct cpu_info *);
62 void hppa_intr_calculatemasks(struct cpu_info *);
63 int hppa_intr_ipending(struct hppa_interrupt_register *, int);
64 void hppa_intr_dispatch(int , int , struct trapframe *);
65 
66 /* The list of all interrupt registers. */
67 struct hppa_interrupt_register *hppa_interrupt_registers[HPPA_INTERRUPT_BITS];
68 
69 
70 /*
71  * This establishes a new interrupt register.
72  */
73 void
74 hppa_interrupt_register_establish(struct cpu_info *ci,
75     struct hppa_interrupt_register *ir)
76 {
77 	int idx;
78 
79 	/* Initialize the register structure. */
80 	memset(ir, 0, sizeof(*ir));
81 	ir->ir_ci = ci;
82 
83 	for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++)
84 		ir->ir_bits_map[idx] = IR_BIT_UNUSED;
85 
86 	ir->ir_bits = ~0;
87 	/* Add this structure to the list. */
88 	for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++)
89 		if (hppa_interrupt_registers[idx] == NULL)
90 			break;
91 	if (idx == HPPA_INTERRUPT_BITS)
92 		panic("%s: too many regs", __func__);
93 	hppa_interrupt_registers[idx] = ir;
94 }
95 
96 /*
97  * This initialise interrupts for a CPU.
98  */
99 void
100 hppa_intr_initialise(struct cpu_info *ci)
101 {
102 	int i;
103 
104 	/* Initialize all prority level masks to mask everything. */
105 	for (i = 0; i < NIPL; i++)
106 		ci->ci_imask[i] = -1;
107 
108 	/* We are now at the highest priority level. */
109 	ci->ci_cpl = -1;
110 
111 	/* There are no pending interrupts. */
112 	ci->ci_ipending = 0;
113 
114 	/* We are not running an interrupt handler. */
115 	ci->ci_intr_depth = 0;
116 
117 	/* There are no interrupt handlers. */
118 	memset(ci->ci_ib, 0, sizeof(ci->ci_ib));
119 
120 	/* There are no interrupt registers. */
121 	memset(hppa_interrupt_registers, 0, sizeof(hppa_interrupt_registers));
122 }
123 
124 /*
125  * This establishes a new interrupt handler.
126  */
127 void *
128 hppa_intr_establish(int ipl, int (*handler)(void *), void *arg,
129     struct hppa_interrupt_register *ir, int bit_pos)
130 {
131 	struct hppa_interrupt_bit *ib;
132 	struct cpu_info *ci = ir->ir_ci;
133 	int idx;
134 
135 	/* Panic on a bad interrupt bit. */
136 	if (bit_pos < 0 || bit_pos >= HPPA_INTERRUPT_BITS)
137 		panic("%s: bad interrupt bit %d", __func__, bit_pos);
138 
139 	/*
140 	 * Panic if this interrupt bit is already handled, but allow
141 	 * shared interrupts for cascaded registers, e.g. dino and gsc
142 	 * XXX This could be improved.
143 	 */
144 	if (handler != NULL) {
145 		if (IR_BIT_USED_P(ir->ir_bits_map[31 ^ bit_pos]))
146 			panic("%s: interrupt already handled", __func__);
147 	}
148 
149 	/*
150 	 * If this interrupt bit leads us to another interrupt register,
151 	 * simply note that in the mapping for the bit.
152 	 */
153 	if (handler == NULL) {
154 		for (idx = 1; idx < HPPA_INTERRUPT_BITS; idx++)
155 			if (hppa_interrupt_registers[idx] == arg)
156 				break;
157 		if (idx == HPPA_INTERRUPT_BITS)
158 			panic("%s: unknown int reg", __func__);
159 
160 		ir->ir_bits_map[31 ^ bit_pos] = IR_BIT_REG(idx);
161 
162 		return NULL;
163 	}
164 
165 	/*
166 	 * Otherwise, allocate a new bit in the spl.
167 	 */
168 	idx = hppa_intr_ipl_next(ir->ir_ci);
169 
170 	ir->ir_bits &= ~(1 << bit_pos);
171 	ir->ir_rbits &= ~(1 << bit_pos);
172 	if (!IR_BIT_USED_P(ir->ir_bits_map[31 ^ bit_pos])) {
173 		ir->ir_bits_map[31 ^ bit_pos] = 1 << idx;
174 	} else {
175 		int j;
176 
177 		ir->ir_bits_map[31 ^ bit_pos] |= 1 << idx;
178 		j = (ir - hppa_interrupt_registers[0]);
179 		ci->ci_ishared |= (1 << j);
180 	}
181 	ib = &ci->ci_ib[idx];
182 
183 	/* Fill this interrupt bit. */
184 	ib->ib_reg = ir;
185 	ib->ib_ipl = ipl;
186 	ib->ib_spl = (1 << idx);
187 	snprintf(ib->ib_name, sizeof(ib->ib_name), "irq %d", bit_pos);
188 
189 	evcnt_attach_dynamic(&ib->ib_evcnt, EVCNT_TYPE_INTR, NULL, ir->ir_name,
190 	     ib->ib_name);
191 	ib->ib_handler = handler;
192 	ib->ib_arg = arg;
193 
194 	hppa_intr_calculatemasks(ci);
195 
196 	return ib;
197 }
198 
199 /*
200  * This allocates an interrupt bit within an interrupt register.
201  * It returns the bit position, or -1 if no bits were available.
202  */
203 int
204 hppa_intr_allocate_bit(struct hppa_interrupt_register *ir, int irq)
205 {
206 	int bit_pos;
207 	int last_bit;
208 	u_int mask;
209 	int *bits;
210 
211 	if (irq == -1) {
212 		bit_pos = 31;
213 		last_bit = 0;
214 		bits = &ir->ir_bits;
215 	} else {
216 		bit_pos = irq;
217 		last_bit = irq;
218 		bits = &ir->ir_rbits;
219 	}
220 	for (mask = (1 << bit_pos); bit_pos >= last_bit; bit_pos--) {
221 		if (*bits & mask)
222 			break;
223 		mask >>= 1;
224 	}
225 	if (bit_pos >= last_bit) {
226 		*bits &= ~mask;
227 		return bit_pos;
228 	}
229 
230 	return -1;
231 }
232 
233 /*
234  * This returns the next available spl bit.
235  */
236 static int
237 hppa_intr_ipl_next(struct cpu_info *ci)
238 {
239 	int idx;
240 
241 	for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++)
242 		if (ci->ci_ib[idx].ib_reg == NULL)
243 			break;
244 	if (idx == HPPA_INTERRUPT_BITS)
245 		panic("%s: too many devices", __func__);
246 	return idx;
247 }
248 
249 /*
250  * This finally initializes interrupts.
251  */
252 void
253 hppa_intr_calculatemasks(struct cpu_info *ci)
254 {
255 	struct hppa_interrupt_bit *ib;
256 	struct hppa_interrupt_register *ir;
257 	int idx, bit_pos;
258 	int mask;
259 	int ipl;
260 
261 	/*
262 	 * Put together the initial imask for each level.
263 	 */
264 	memset(ci->ci_imask, 0, sizeof(ci->ci_imask));
265 	for (bit_pos = 0; bit_pos < HPPA_INTERRUPT_BITS; bit_pos++) {
266 		ib = &ci->ci_ib[bit_pos];
267 		if (ib->ib_reg == NULL)
268 			continue;
269 		ci->ci_imask[ib->ib_ipl] |= ib->ib_spl;
270 	}
271 
272 	/*
273 	 * IPL_NONE is used for hardware interrupts that are never blocked,
274 	 * and do not block anything else.
275 	 */
276 	ci->ci_imask[IPL_NONE] = 0;
277 
278 	/*
279 	 * Enforce a hierarchy that gives slow devices a better chance at not
280 	 * dropping data.
281 	 */
282 	for (ipl = NIPL - 1; ipl > 0; ipl--)
283 		ci->ci_imask[ipl - 1] |= ci->ci_imask[ipl];
284 
285 	/*
286 	 * Load all mask registers, loading %eiem last.  This will finally
287 	 * enable interrupts, but since cpl and ipending should be -1 and 0,
288 	 * respectively, no interrupts will get dispatched until the priority
289 	 * level is lowered.
290 	 */
291 	KASSERT(ci->ci_cpl == -1);
292 	KASSERT(ci->ci_ipending == 0);
293 
294 	for (idx = 0; idx < HPPA_INTERRUPT_BITS; idx++) {
295 		ir = hppa_interrupt_registers[idx];
296 		if (ir == NULL || ir->ir_ci != ci)
297 			continue;
298 		mask = 0;
299 		for (bit_pos = 0; bit_pos < HPPA_INTERRUPT_BITS; bit_pos++) {
300 			if (IR_BIT_USED_P(ir->ir_bits_map[31 ^ bit_pos]))
301 				mask |= (1 << bit_pos);
302 		}
303 		if (ir->ir_iscpu)
304 			ir->ir_ci->ci_eiem = mask;
305 		else if (ir->ir_mask != NULL)
306 			*ir->ir_mask = mask;
307 	}
308 }
309 
310 void
311 hppa_intr_enable(void)
312 {
313 	struct cpu_info *ci = curcpu();
314 
315 	mtctl(ci->ci_eiem, CR_EIEM);
316 	ci->ci_psw |= PSW_I;
317 	hppa_enable_irq();
318 }
319 
320 
321 /*
322  * Service interrupts.  This doesn't necessarily dispatch them.  This is called
323  * with %eiem loaded with zero.  It's named hppa_intr instead of hppa_intr
324  * because trap.c calls it.
325  */
326 void
327 hppa_intr(struct trapframe *frame)
328 {
329 	struct cpu_info *ci = curcpu();
330 	int eirr;
331 	int i;
332 
333 #ifndef LOCKDEBUG
334 	extern char mutex_enter_crit_start[];
335 	extern char mutex_enter_crit_end[];
336 
337 #ifndef	MULTIPROCESSOR
338 	extern char _lock_cas_ras_start[];
339 	extern char _lock_cas_ras_end[];
340 
341 	if (frame->tf_iisq_head == HPPA_SID_KERNEL &&
342 	    frame->tf_iioq_head >= (u_int)_lock_cas_ras_start &&
343 	    frame->tf_iioq_head <= (u_int)_lock_cas_ras_end) {
344 		frame->tf_iioq_head = (u_int)_lock_cas_ras_start;
345 		frame->tf_iioq_tail = (u_int)_lock_cas_ras_start + 4;
346 	}
347 #endif
348 
349 	/*
350 	 * If we interrupted in the middle of mutex_enter(), we must patch up
351 	 * the lock owner value quickly if we got the interlock.  If any of the
352 	 * interrupt handlers need to aquire the mutex, they could deadlock if
353 	 * the owner value is left unset.
354 	 */
355 	if (frame->tf_iisq_head == HPPA_SID_KERNEL &&
356 	    frame->tf_iioq_head >= (u_int)mutex_enter_crit_start &&
357 	    frame->tf_iioq_head <= (u_int)mutex_enter_crit_end &&
358 	    frame->tf_ret0 != 0)
359 		((kmutex_t *)frame->tf_arg0)->mtx_owner = (uintptr_t)curlwp;
360 #endif
361 
362 	/*
363 	 * Read the CPU interrupt register and acknowledge all interrupts.
364 	 * Starting with this value, get our set of new pending interrupts and
365 	 * add these new bits to ipending.
366 	 */
367 	mfctl(CR_EIRR, eirr);
368 	mtctl(eirr, CR_EIRR);
369 
370 	ci->ci_ipending |= hppa_intr_ipending(&ci->ci_ir, eirr);
371 
372 	i = 0;
373 	/* If we have interrupts to dispatch, do so. */
374 	while (ci->ci_ipending & ~ci->ci_cpl) {
375 		int shared;
376 
377 		hppa_intr_dispatch(ci->ci_cpl, frame->tf_eiem, frame);
378 
379 		shared = ci->ci_ishared;
380 		while (shared) {
381 			struct hppa_interrupt_register *sir;
382 			int sbit, lvl;
383 
384 			sbit = ffs(shared) - 1;
385 			sir = hppa_interrupt_registers[sbit];
386 			lvl = *sir->ir_level;
387 
388 			ci->ci_ipending |= hppa_intr_ipending(sir, lvl);
389 			shared &= ~(1 << sbit);
390 		}
391 		i++;
392 		KASSERTMSG(i <= 2,
393 		    "%s: ci->ipending %08x ci->ci_cpl %08x shared %08x\n",
394 		    __func__, ci->ci_ipending, ci->ci_cpl, shared);
395 	}
396 }
397 
398 /*
399  * Dispatch interrupts.  This dispatches at least one interrupt.
400  * This is called with %eiem loaded with zero.
401  */
402 void
403 hppa_intr_dispatch(int ncpl, int eiem, struct trapframe *frame)
404 {
405 	struct cpu_info *ci = curcpu();
406 	struct hppa_interrupt_bit *ib;
407 	struct clockframe clkframe;
408 	int ipending_run;
409 	int bit_pos;
410 	void *arg;
411 	int handled __unused;
412 	bool locked = false;
413 
414 	/*
415 	 * Increment our depth
416 	 */
417 	ci->ci_intr_depth++;
418 
419 	/* Loop while we have interrupts to dispatch. */
420 	for (;;) {
421 
422 		/* Read ipending and mask it with ncpl. */
423 		ipending_run = (ci->ci_ipending & ~ncpl);
424 		if (ipending_run == 0)
425 			break;
426 
427 		/* Choose one of the resulting bits to dispatch. */
428 		bit_pos = ffs(ipending_run) - 1;
429 
430 		/*
431 		 * If this interrupt handler takes the clockframe
432 		 * as an argument, conjure one up.
433 		 */
434 		ib = &ci->ci_ib[bit_pos];
435 		ib->ib_evcnt.ev_count++;
436 		arg = ib->ib_arg;
437 		if (arg == NULL) {
438 			clkframe.cf_flags = (ci->ci_intr_depth ?
439 			    TFF_INTR : 0);
440 			clkframe.cf_spl = ncpl;
441 			if (frame != NULL) {
442 				clkframe.cf_flags |= frame->tf_flags;
443 				clkframe.cf_pc = frame->tf_iioq_head;
444 			}
445 			arg = &clkframe;
446 		}
447 
448 		/*
449 		 * Remove this bit from ipending, raise spl to
450 		 * the level required to run this interrupt,
451 		 * and reenable interrupts.
452 		 */
453 		ci->ci_ipending &= ~(1 << bit_pos);
454 		ci->ci_cpl = ncpl | ci->ci_imask[ib->ib_ipl];
455 		mtctl(eiem, CR_EIEM);
456 
457 		if (ib->ib_ipl == IPL_VM) {
458 			KERNEL_LOCK(1, NULL);
459 			locked = true;
460 		}
461 
462 		/* Count and dispatch the interrupt. */
463 		ci->ci_data.cpu_nintr++;
464 		handled = (*ib->ib_handler)(arg);
465 #if 0
466 		if (!handled)
467 			printf("%s: can't handle interrupt\n",
468 				ib->ib_evcnt.ev_name);
469 #endif
470 		if (locked) {
471 			KERNEL_UNLOCK_ONE(NULL);
472 			locked = false;
473 		}
474 
475 		/* Disable interrupts and loop. */
476 		mtctl(0, CR_EIEM);
477 	}
478 
479 	/* Interrupts are disabled again, restore cpl and the depth. */
480 	ci->ci_cpl = ncpl;
481 	ci->ci_intr_depth--;
482 }
483 
484 
485 int
486 hppa_intr_ipending(struct hppa_interrupt_register *ir, int eirr)
487 {
488 	int pending = 0;
489 	int idx;
490 
491 	for (idx = 31; idx >= 0; idx--) {
492 		if ((eirr & (1 << idx)) == 0)
493 			continue;
494 		if (IR_BIT_NESTED_P(ir->ir_bits_map[31 ^ idx])) {
495 			struct hppa_interrupt_register *nir;
496 			int reg = ir->ir_bits_map[31 ^ idx] & ~IR_BIT_MASK;
497 
498 			nir = hppa_interrupt_registers[reg];
499 			pending |= hppa_intr_ipending(nir, *(nir->ir_req));
500 		} else {
501 			pending |= ir->ir_bits_map[31 ^ idx];
502 		}
503 	}
504 
505 	return pending;
506 }
507 
508 bool
509 cpu_intr_p(void)
510 {
511 	struct cpu_info *ci = curcpu();
512 
513 #ifdef __HAVE_FAST_SOFTINTS
514 #error this should not count fast soft interrupts
515 #else
516 	return ci->ci_intr_depth != 0;
517 #endif
518 }
519