1 /* $NetBSD: sb1250_icu.c,v 1.4 2023/12/05 19:16:48 andvar Exp $ */
2
3 /*
4 * Copyright 2000, 2001
5 * Broadcom Corporation. All rights reserved.
6 *
7 * This software is furnished under license and may be used and copied only
8 * in accordance with the following terms and conditions. Subject to these
9 * conditions, you may download, copy, install, use, modify and distribute
10 * modified or unmodified copies of this software in source and/or binary
11 * form. No title or ownership is transferred hereby.
12 *
13 * 1) Any source code used, modified or distributed must reproduce and
14 * retain this copyright notice and list of conditions as they appear in
15 * the source file.
16 *
17 * 2) No right is granted to use any trade name, trademark, or logo of
18 * Broadcom Corporation. The "Broadcom Corporation" name may not be
19 * used to endorse or promote products derived from this software
20 * without the prior written permission of Broadcom Corporation.
21 *
22 * 3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
25 * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM BE LIABLE
26 * FOR ANY DAMAGES WHATSOEVER, AND IN PARTICULAR, BROADCOM SHALL NOT BE
27 * LIABLE FOR DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
32 * OR OTHERWISE), EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: sb1250_icu.c,v 1.4 2023/12/05 19:16:48 andvar Exp $");
37
38 #define __INTR_PRIVATE
39
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/cpu.h>
43 #include <sys/device.h>
44 #include <sys/evcnt.h>
45 #include <sys/kmem.h>
46
47 /* XXX for uvmexp */
48 #include <uvm/uvm_extern.h>
49
50 #include <mips/locore.h>
51
52 #include <evbmips/sbmips/cpuvar.h>
53 #include <evbmips/sbmips/systemsw.h>
54
55 #include <mips/sibyte/include/sb1250_regs.h>
56 #include <mips/sibyte/include/sb1250_int.h>
57 #include <mips/sibyte/include/sb1250_scd.h>
58
59 static const struct ipl_sr_map sb1250_ipl_sr_map = {
60 .sr_bits = {
61 [IPL_NONE] = MIPS_INT_MASK_5,
62 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0 | MIPS_INT_MASK_5,
63 [IPL_SOFTBIO] = MIPS_SOFT_INT_MASK_0 | MIPS_INT_MASK_5,
64 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_5,
65 [IPL_SOFTSERIAL] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_5,
66 [IPL_VM] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
67 | MIPS_INT_MASK_5,
68 [IPL_SCHED] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
69 | MIPS_INT_MASK_1 | MIPS_INT_MASK_5,
70 [IPL_DDB] = MIPS_SOFT_INT_MASK | MIPS_INT_MASK_0
71 | MIPS_INT_MASK_1 | MIPS_INT_MASK_4
72 | MIPS_INT_MASK_5,
73 [IPL_HIGH] = MIPS_INT_MASK,
74 },
75 };
76
77 /* imr values corresponding to each pin */
78 static uint64_t ints_for_ipl[_IPL_N];
79
80 struct sb1250_ihand {
81 void (*ih_fun)(void *, uint32_t, vaddr_t);
82 void *ih_arg;
83 int ih_ipl;
84 };
85
86 static struct sb1250_ihand sb1250_ihands[K_INT_SOURCES];
87
88 #ifdef MULTIPROCESSOR
89 static void sb1250_ipi_intr(void *, uint32_t, vaddr_t);
90 #endif
91 #define SB1250_I_MAP(x) (R_IMR_INTERRUPT_MAP_BASE + (x) * 8)
92
93 #define READ_REG(rp) mips3_ld((register_t)(rp))
94 #define WRITE_REG(rp, val) mips3_sd((register_t)(rp), (val))
95
96 static void sb1250_cpu_intr(int, vaddr_t, uint32_t);
97 static void *sb1250_intr_establish(u_int, u_int,
98 void (*fun)(void *, uint32_t, vaddr_t), void *);
99
100 static const char sb1250_intr_names[K_INT_SOURCES][16] = {
101 [K_INT_WATCHDOG_TIMER_0] = "wdog0",
102 [K_INT_WATCHDOG_TIMER_1] = "wdog1",
103 [K_INT_TIMER_0] = "timer0",
104 [K_INT_TIMER_1] = "timer1",
105 [K_INT_TIMER_2] = "timer2",
106 [K_INT_TIMER_3] = "timer3",
107 [K_INT_SMB_0] = "smb0",
108 [K_INT_SMB_1] = "smb1",
109 [K_INT_UART_0] = "uart0",
110 [K_INT_UART_1] = "uart1",
111 [K_INT_SER_0] = "syncser0",
112 [K_INT_SER_1] = "syncser1",
113 [K_INT_PCMCIA] = "pcmcia",
114 [K_INT_ADDR_TRAP] = "addrtrap",
115 [K_INT_PERF_CNT] = "perfcnt",
116 [K_INT_TRACE_FREEZE] = "tracefreeze",
117 [K_INT_BAD_ECC] = "bad ECC",
118 [K_INT_COR_ECC] = "corrected ECC",
119 [K_INT_IO_BUS] = "iobus",
120 [K_INT_MAC_0] = "mac0",
121 [K_INT_MAC_1] = "mac1",
122 [K_INT_MAC_2] = "mac2",
123 [K_INT_DM_CH_0] = "dmover0",
124 [K_INT_DM_CH_1] = "dmover1",
125 [K_INT_DM_CH_2] = "dmover2",
126 [K_INT_DM_CH_3] = "dmover3",
127 [K_INT_MBOX_0] = "mbox0",
128 [K_INT_MBOX_1] = "mbox1",
129 [K_INT_MBOX_2] = "mbox2",
130 [K_INT_MBOX_3] = "mbox3",
131 [K_INT_CYCLE_CP0_INT] = "zbccp0",
132 [K_INT_CYCLE_CP1_INT] = "zbccp1",
133 [K_INT_GPIO_0] = "gpio0",
134 [K_INT_GPIO_1] = "gpio1",
135 [K_INT_GPIO_2] = "gpio2",
136 [K_INT_GPIO_3] = "gpio3",
137 [K_INT_GPIO_4] = "gpio4",
138 [K_INT_GPIO_5] = "gpio5",
139 [K_INT_GPIO_6] = "gpio6",
140 [K_INT_GPIO_7] = "gpio7",
141 [K_INT_GPIO_8] = "gpio8",
142 [K_INT_GPIO_9] = "gpio9",
143 [K_INT_GPIO_10] = "gpio10",
144 [K_INT_GPIO_11] = "gpio11",
145 [K_INT_GPIO_12] = "gpio12",
146 [K_INT_GPIO_13] = "gpio13",
147 [K_INT_GPIO_14] = "gpio14",
148 [K_INT_GPIO_15] = "gpio15",
149 [K_INT_LDT_FATAL] = "ldt fatal",
150 [K_INT_LDT_NONFATAL] = "ldt nonfatal",
151 [K_INT_LDT_SMI] = "ldt smi",
152 [K_INT_LDT_NMI] = "ldt nmi",
153 [K_INT_LDT_INIT] = "ldt init",
154 [K_INT_LDT_STARTUP] = "ldt startup",
155 [K_INT_LDT_EXT] = "ldt ext",
156 [K_INT_PCI_ERROR] = "pci error",
157 [K_INT_PCI_INTA] = "pci inta",
158 [K_INT_PCI_INTB] = "pci intb",
159 [K_INT_PCI_INTC] = "pci intc",
160 [K_INT_PCI_INTD] = "pci intd",
161 [K_INT_SPARE_2] = "spare2",
162 [K_INT_MAC_0_CH1] = "mac0 ch1",
163 [K_INT_MAC_1_CH1] = "mac1 ch1",
164 [K_INT_MAC_2_CH1] = "mac2 ch1",
165 };
166
167 #ifdef MULTIPROCESSOR
168 static void
sb1250_lsw_cpu_init(struct cpu_info * ci)169 sb1250_lsw_cpu_init(struct cpu_info *ci)
170 {
171 struct cpu_softc * const cpu = ci->ci_softc;
172
173 WRITE_REG(cpu->sb1cpu_imr_base + R_IMR_INTERRUPT_MASK, cpu->sb1cpu_imr_all);
174 }
175
176 static int
sb1250_lsw_send_ipi(struct cpu_info * ci,int tag)177 sb1250_lsw_send_ipi(struct cpu_info *ci, int tag)
178 {
179 struct cpu_softc * const cpu = ci->ci_softc;
180 const uint64_t mbox_mask = 1LLU << tag;
181
182 if (kcpuset_isset(cpus_running, cpu_index(ci)))
183 WRITE_REG(cpu->sb1cpu_imr_base + R_IMR_MAILBOX_SET_CPU, mbox_mask);
184
185 return 0;
186 }
187
188 static void
sb1250_ipi_intr(void * arg,uint32_t status,vaddr_t pc)189 sb1250_ipi_intr(void *arg, uint32_t status, vaddr_t pc)
190 {
191 struct cpu_info * const ci = curcpu();
192 struct cpu_softc * const cpu = ci->ci_softc;
193 uint64_t mbox_mask;
194
195 ci->ci_data.cpu_nintr++;
196
197 mbox_mask = READ_REG(cpu->sb1cpu_imr_base + R_IMR_MAILBOX_CPU);
198 WRITE_REG(cpu->sb1cpu_imr_base + R_IMR_MAILBOX_CLR_CPU, mbox_mask);
199
200 ipi_process(ci, mbox_mask);
201 }
202 #endif /* MULTIPROCESSOR */
203
204 void
sb1250_cpu_init(struct cpu_softc * cpu)205 sb1250_cpu_init(struct cpu_softc *cpu)
206 {
207 const char * const xname = device_xname(cpu->sb1cpu_dev);
208 struct evcnt * evcnts = cpu->sb1cpu_intr_evcnts;
209
210 cpu->sb1cpu_imr_base =
211 MIPS_PHYS_TO_KSEG1(A_IMR_MAPPER(cpu->sb1cpu_ci->ci_cpuid));
212 #ifdef MULTIPROCESSOR
213 cpu->sb1cpu_imr_all =
214 ~(M_INT_MBOX_0|M_INT_MBOX_1|M_INT_MBOX_2|M_INT_MBOX_3
215 |M_INT_WATCHDOG_TIMER_0|M_INT_WATCHDOG_TIMER_1);
216 #else
217 cpu->sb1cpu_imr_all = ~(M_INT_WATCHDOG_TIMER_0|M_INT_WATCHDOG_TIMER_1);
218 #endif
219
220 for (u_int i = 0; i < K_INT_SOURCES; i++, evcnts++) {
221 WRITE_REG(cpu->sb1cpu_imr_base + SB1250_I_MAP(i), K_INT_MAP_I0);
222 evcnt_attach_dynamic(evcnts, EVCNT_TYPE_INTR, NULL,
223 xname, sb1250_intr_names[i]);
224 }
225 #if 0
226 WRITE_REG(cpu->sb1cpu_imr_base + SB1250_I_MAP(K_INT_WATCHDOG_TIMER_0), K_INT_MAP_NMI);
227 WRITE_REG(cpu->sb1cpu_imr_base + SB1250_I_MAP(K_INT_WATCHDOG_TIMER_1), K_INT_MAP_NMI);
228 #endif
229
230 WRITE_REG(cpu->sb1cpu_imr_base + R_IMR_INTERRUPT_MASK, cpu->sb1cpu_imr_all);
231 #ifdef MULTIPROCESSOR
232 if (sb1250_ihands[K_INT_MBOX_0].ih_fun == NULL) {
233 /*
234 * For now, deliver all IPIs at IPL_SCHED. Eventually
235 * some will be at IPL_VM.
236 */
237 for (int irq = K_INT_MBOX_0; irq <= K_INT_MBOX_3; irq++)
238 sb1250_intr_establish(irq, IPL_SCHED,
239 sb1250_ipi_intr, NULL);
240 }
241 #endif /* MULTIPROCESSOR */
242 }
243
244 void
sb1250_ipl_map_init(void)245 sb1250_ipl_map_init(void)
246 {
247 ipl_sr_map = sb1250_ipl_sr_map;
248 }
249
250 void
sb1250_icu_init(void)251 sb1250_icu_init(void)
252 {
253 const uint64_t imr_all = 0xffffffffffffffffULL;
254
255 KASSERT(memcmp((const void *)&ipl_sr_map, (const void *)&sb1250_ipl_sr_map, sizeof(ipl_sr_map)) == 0);
256
257 /* zero out the list of used interrupts/lines */
258 memset(ints_for_ipl, 0, sizeof ints_for_ipl);
259 memset(sb1250_ihands, 0, sizeof sb1250_ihands);
260
261 systemsw.s_cpu_intr = sb1250_cpu_intr;
262 systemsw.s_intr_establish = sb1250_intr_establish;
263
264 #ifdef MULTIPROCESSOR
265 /*
266 * Bits 27:24 (11:8 of G_SYS_PART) encode the number of CPUs present.
267 */
268 u_int sys_part = G_SYS_PART(READ_REG(MIPS_PHYS_TO_KSEG1(A_SCD_SYSTEM_REVISION)));
269 const u_int cpus = (sys_part >> 8) & 0xf;
270
271 /*
272 * Allocate an evcnt structure for every possible interrupt on
273 * every possible CPU.
274 */
275 vaddr_t imr = MIPS_PHYS_TO_KSEG1(A_IMR_CPU0_BASE + R_IMR_INTERRUPT_MASK);
276 for (u_int i = 1; imr += IMR_REGISTER_SPACING, i < cpus; i++) {
277 WRITE_REG(imr, imr_all);
278 }
279 #endif /* MULTIPROCESSOR */
280 WRITE_REG(MIPS_PHYS_TO_KSEG1(A_IMR_CPU0_BASE + R_IMR_INTERRUPT_MASK),
281 imr_all);
282
283 #ifdef MULTIPROCESSOR
284 mips_locoresw.lsw_send_ipi = sb1250_lsw_send_ipi;
285 mips_locoresw.lsw_cpu_init = sb1250_lsw_cpu_init;
286 #endif /* MULTIPROCESSOR */
287 }
288
289 static void
sb1250_cpu_intr(int ppl,vaddr_t pc,uint32_t status)290 sb1250_cpu_intr(int ppl, vaddr_t pc, uint32_t status)
291 {
292 struct cpu_info * const ci = curcpu();
293 struct cpu_softc * const cpu = ci->ci_softc;
294 const vaddr_t imr_base = cpu->sb1cpu_imr_base;
295 struct evcnt * const evcnts = cpu->sb1cpu_intr_evcnts;
296 uint32_t pending;
297 int ipl;
298
299 ci->ci_data.cpu_nintr++;
300
301 while (ppl < (ipl = splintr(&pending))) {
302 splx(ipl);
303
304 /* XXX do something if 5? */
305 if (pending & MIPS_INT_MASK_5) {
306 uint32_t cycles = mips3_cp0_count_read();
307 mips3_cp0_compare_write(cycles - 1);
308 /* just leave the bugger disabled */
309 }
310
311 uint64_t sstatus = ints_for_ipl[ipl];
312 sstatus &= READ_REG(imr_base + R_IMR_INTERRUPT_SOURCE_STATUS);
313 while (sstatus != 0) {
314 #ifndef __mips_o32
315 u_int n;
316 __asm("dclz %0,%1" : "=r"(n) : "r"(sstatus));
317 #else
318 u_int n = (sstatus >> 32)
319 ? 0 + __builtin_clz(sstatus >> 32)
320 : 32 + __builtin_clz((uint32_t)sstatus);
321 #endif
322 u_int j = 63 - n;
323 KASSERT(sstatus & (1ULL << j));
324 sstatus ^= (1ULL << j);
325 struct sb1250_ihand *ihp = &sb1250_ihands[j];
326 KASSERT(ihp->ih_fun);
327 (*ihp->ih_fun)(ihp->ih_arg, status, pc);
328 evcnts[j].ev_count++;
329 }
330 (void) splhigh();
331 }
332 }
333
334 static void *
sb1250_intr_establish(u_int num,u_int ipl,void (* fun)(void *,uint32_t,vaddr_t),void * arg)335 sb1250_intr_establish(u_int num, u_int ipl,
336 void (*fun)(void *, uint32_t, vaddr_t), void *arg)
337 {
338 struct cpu_softc * const cpu = curcpu()->ci_softc;
339 struct sb1250_ihand * const ih = &sb1250_ihands[num];
340 const int s = splhigh();
341
342 /*
343 * XXX simonb
344 * The swarm wedges hard on first serial interrupt when
345 * we try to map IPL_SERIAL at a higher priority than
346 * other device interrupts. For now, just force all
347 * devices to interrupt at IPL_VM.
348 *
349 */
350 ipl = IPL_VM; /* XXX */
351
352 if (num >= K_INT_SOURCES)
353 panic("%s: invalid interrupt number (0x%x)", __func__, num);
354 if (ipl >= _IPL_N || ipl < IPL_VM)
355 panic("%s: invalid ipl %d", __func__, ipl);
356 if (ih->ih_fun != NULL)
357 panic("%s: cannot share sb1250 interrupts", __func__);
358
359 ints_for_ipl[ipl] |= (1ULL << num);
360 cpu->sb1cpu_imr_all &= ~(1ULL << num);
361
362 ih->ih_fun = fun;
363 ih->ih_arg = arg;
364 ih->ih_ipl = ipl;
365
366 if (num <= K_INT_WATCHDOG_TIMER_1)
367 WRITE_REG(cpu->sb1cpu_imr_base + SB1250_I_MAP(num), K_INT_MAP_I4);
368 else if (ipl > IPL_VM)
369 WRITE_REG(cpu->sb1cpu_imr_base + SB1250_I_MAP(num), K_INT_MAP_I1);
370
371 WRITE_REG(cpu->sb1cpu_imr_base + R_IMR_INTERRUPT_MASK, cpu->sb1cpu_imr_all);
372
373 splx(s);
374
375 return ih;
376 }
377