1 /* $NetBSD: intr.c,v 1.13 2017/05/21 06:49:12 skrll Exp $ */ 2 3 /*- 4 * Copyright (c) 2014 Michael Lorenz 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __KERNEL_RCSID(0, "$NetBSD: intr.c,v 1.13 2017/05/21 06:49:12 skrll Exp $"); 31 32 #define __INTR_PRIVATE 33 34 #include "opt_multiprocessor.h" 35 36 #include <sys/param.h> 37 #include <sys/cpu.h> 38 #include <sys/device.h> 39 #include <sys/kernel.h> 40 #include <sys/systm.h> 41 #include <sys/timetc.h> 42 #include <sys/bitops.h> 43 44 #include <mips/locore.h> 45 #include <machine/intr.h> 46 47 #include <mips/ingenic/ingenic_var.h> 48 #include <mips/ingenic/ingenic_regs.h> 49 #include <mips/ingenic/ingenic_coreregs.h> 50 51 #include "opt_ingenic.h" 52 53 #ifdef INGENIC_INTR_DEBUG 54 #define DPRINTF printf 55 #else 56 #define DPRINTF while (0) printf 57 #endif 58 59 extern void ingenic_clockintr(struct clockframe *); 60 extern void ingenic_puts(const char *); 61 /* 62 * This is a mask of bits to clear in the SR when we go to a 63 * given hardware interrupt priority level. 64 */ 65 static const struct ipl_sr_map ingenic_ipl_sr_map = { 66 .sr_bits = { 67 [IPL_NONE] = 0, 68 [IPL_SOFTCLOCK] = MIPS_SOFT_INT_MASK_0, 69 [IPL_SOFTNET] = MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1, 70 [IPL_VM] = 71 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 | 72 MIPS_INT_MASK_0 | 73 MIPS_INT_MASK_3 | 74 MIPS_INT_MASK_4 | 75 MIPS_INT_MASK_5, 76 [IPL_SCHED] = 77 MIPS_SOFT_INT_MASK_0 | MIPS_SOFT_INT_MASK_1 | 78 MIPS_INT_MASK_0 | 79 MIPS_INT_MASK_1 | 80 MIPS_INT_MASK_2 | 81 MIPS_INT_MASK_3 | 82 MIPS_INT_MASK_4 | 83 MIPS_INT_MASK_5, 84 [IPL_DDB] = MIPS_INT_MASK, 85 [IPL_HIGH] = MIPS_INT_MASK, 86 }, 87 }; 88 89 #define NINTR 64 90 91 /* some timer channels share interrupts, couldn't find any others */ 92 struct intrhand { 93 struct evcnt ih_count; 94 char ih_name[16]; 95 int (*ih_func)(void *); 96 void *ih_arg; 97 int ih_ipl; 98 }; 99 100 struct intrhand intrs[NINTR]; 101 struct evcnt clockintrs; 102 103 void ingenic_irq(int); 104 105 void 106 evbmips_intr_init(void) 107 { 108 uint32_t reg; 109 int i; 110 111 ipl_sr_map = ingenic_ipl_sr_map; 112 113 evcnt_attach_dynamic(&clockintrs, 114 EVCNT_TYPE_INTR, NULL, "timer", "intr"); 115 116 /* zero all handlers */ 117 for (i = 0; i < NINTR; i++) { 118 intrs[i].ih_func = NULL; 119 intrs[i].ih_arg = NULL; 120 snprintf(intrs[i].ih_name, sizeof(intrs[i].ih_name), 121 "irq %d", i); 122 evcnt_attach_dynamic(&intrs[i].ih_count, EVCNT_TYPE_INTR, 123 NULL, "INTC", intrs[i].ih_name); 124 } 125 126 /* mask all peripheral IRQs */ 127 writereg(JZ_ICMR0, 0xffffffff); 128 writereg(JZ_ICMR1, 0xffffffff); 129 130 /* allow peripheral interrupts to core 0 only */ 131 reg = mips_cp0_corereim_read(); 132 reg &= 0xffff0000; 133 reg |= REIM_IRQ0_M | REIM_MIRQ0_M; 134 #ifdef MULTIPROCESSOR 135 reg |= REIM_MIRQ1_M; 136 #endif 137 mips_cp0_corereim_write(reg); 138 139 mips_cp0_corembox_write(1, 0); /* ping the 2nd core */ 140 DPRINTF("%s %08x\n", __func__, reg); 141 } 142 143 void 144 evbmips_iointr(int ipl, uint32_t ipending, struct clockframe *cf) 145 { 146 uint32_t id; 147 #ifdef INGENIC_INTR_DEBUG 148 char buffer[256]; 149 150 #if 0 151 snprintf(buffer, 256, "pending: %08x CR %08x\n", ipending, 152 mipsNN_cp0_cause_read()); 153 ingenic_puts(buffer); 154 #endif 155 #endif 156 /* see which core we're on */ 157 id = mipsNN_cp0_ebase_read() & 7; 158 159 /* 160 * XXX 161 * the manual counts the softint bits as INT0 and INT1, our headers 162 * don't so everything here looks off by two 163 */ 164 if (ipending & MIPS_INT_MASK_1) { 165 /* 166 * this is a mailbox interrupt / IPI 167 */ 168 uint32_t reg; 169 int s = splsched(); 170 171 /* read pending IPIs */ 172 reg = mips_cp0_corestatus_read(); 173 if (id == 0) { 174 if (reg & CS_MIRQ0_P) { 175 #ifdef MULTIPROCESSOR 176 uint32_t tag; 177 tag = mips_cp0_corembox_read(id); 178 179 ipi_process(curcpu(), tag); 180 #ifdef INGENIC_INTR_DEBUG 181 snprintf(buffer, 256, 182 "IPI for core 0, msg %08x\n", tag); 183 ingenic_puts(buffer); 184 #endif 185 #endif 186 reg &= (~CS_MIRQ0_P); 187 /* clear it */ 188 mips_cp0_corestatus_write(reg); 189 } 190 } else if (id == 1) { 191 if (reg & CS_MIRQ1_P) { 192 #ifdef MULTIPROCESSOR 193 uint32_t tag; 194 tag = mips_cp0_corembox_read(id); 195 ingenic_puts("1"); 196 if (tag & 0x400) 197 hardclock(cf); 198 //ipi_process(curcpu(), tag); 199 #ifdef INGENIC_INTR_DEBUG 200 snprintf(buffer, 256, 201 "IPI for core 1, msg %08x\n", tag); 202 ingenic_puts(buffer); 203 #endif 204 #endif 205 reg &= (~CS_MIRQ1_P); 206 /* clear it */ 207 mips_cp0_corestatus_write(reg); 208 } 209 } 210 splx(s); 211 } 212 if (ipending & MIPS_INT_MASK_2) { 213 /* this is a timer interrupt */ 214 ingenic_clockintr(cf); 215 clockintrs.ev_count++; 216 ingenic_puts("INT2\n"); 217 } 218 if (ipending & MIPS_INT_MASK_0) { 219 uint32_t mask; 220 /* peripheral interrupt */ 221 222 /* 223 * XXX 224 * OS timer interrupts are supposed to show up as INT2 as well 225 * but I haven't seen them there so for now we just weed them 226 * out right here. 227 * The idea is to allow peripheral interrupts on both cores but 228 * block INT0 on core1 so it would see only timer interrupts 229 * and IPIs. If that doesn't work we'll have to send an IPI to 230 * core1 for each timer tick. 231 */ 232 mask = readreg(JZ_ICPR0); 233 if (mask & 0x0c000000) { 234 writereg(JZ_ICMSR0, 0x0c000000); 235 ingenic_clockintr(cf); 236 writereg(JZ_ICMCR0, 0x0c000000); 237 clockintrs.ev_count++; 238 } 239 ingenic_irq(ipl); 240 KASSERT(id == 0); 241 } 242 } 243 244 void 245 ingenic_irq(int ipl) 246 { 247 uint32_t irql, irqh, mask, ll, hh; 248 int bit, idx, bail; 249 #ifdef INGENIC_INTR_DEBUG 250 char buffer[16]; 251 #endif 252 253 irql = readreg(JZ_ICPR0); 254 irqh = readreg(JZ_ICPR1); 255 #ifdef INGENIC_INTR_DEBUG 256 if (irql != 0) { 257 snprintf(buffer, 16, " il%08x", irql); 258 ingenic_puts(buffer); 259 } 260 #endif 261 bail = 32; 262 ll = irql; 263 hh = irqh; 264 writereg(JZ_ICMSR0, ll); 265 writereg(JZ_ICMSR1, hh); 266 bit = ffs32(irql); 267 while (bit != 0) { 268 idx = bit - 1; 269 mask = 1 << idx; 270 intrs[idx].ih_count.ev_count++; 271 if (intrs[idx].ih_func != NULL) { 272 if (intrs[idx].ih_ipl == IPL_VM) 273 KERNEL_LOCK(1, NULL); 274 intrs[idx].ih_func(intrs[idx].ih_arg); 275 if (intrs[idx].ih_ipl == IPL_VM) 276 KERNEL_UNLOCK_ONE(NULL); 277 } else { 278 /* spurious interrupt, mask it */ 279 writereg(JZ_ICMSR0, mask); 280 } 281 irql &= ~mask; 282 bit = ffs32(irql); 283 bail--; 284 KASSERT(bail > 0); 285 } 286 287 #ifdef INGENIC_INTR_DEBUG 288 if (irqh != 0) { 289 snprintf(buffer, 16, " ih%08x", irqh); 290 ingenic_puts(buffer); 291 } 292 #endif 293 bit = ffs32(irqh); 294 while (bit != 0) { 295 idx = bit - 1; 296 mask = 1 << idx; 297 idx += 32; 298 intrs[idx].ih_count.ev_count++; 299 if (intrs[idx].ih_func != NULL) { 300 if (intrs[idx].ih_ipl == IPL_VM) 301 KERNEL_LOCK(1, NULL); 302 intrs[idx].ih_func(intrs[idx].ih_arg); 303 if (intrs[idx].ih_ipl == IPL_VM) 304 KERNEL_UNLOCK_ONE(NULL); 305 } else { 306 /* spurious interrupt, mask it */ 307 writereg(JZ_ICMSR1, mask); 308 } 309 irqh &= ~mask; 310 bit = ffs32(irqh); 311 } 312 writereg(JZ_ICMCR0, ll); 313 writereg(JZ_ICMCR1, hh); 314 } 315 316 void * 317 evbmips_intr_establish(int irq, int (*func)(void *), void *arg) 318 { 319 int s; 320 321 if ((irq < 0) || (irq >= NINTR)) { 322 aprint_error("%s: invalid irq %d\n", __func__, irq); 323 return NULL; 324 } 325 326 s = splhigh(); /* XXX probably needs a mutex */ 327 intrs[irq].ih_func = func; 328 intrs[irq].ih_arg = arg; 329 intrs[irq].ih_ipl = IPL_VM; 330 331 /* now enable the IRQ */ 332 if (irq >= 32) { 333 writereg(JZ_ICMCR1, 1 << (irq - 32)); 334 } else 335 writereg(JZ_ICMCR0, 1 << irq); 336 337 splx(s); 338 339 return ((void *)(irq + 1)); 340 } 341 342 void 343 evbmips_intr_disestablish(void *cookie) 344 { 345 int irq = ((int)cookie) - 1; 346 int s; 347 348 if ((irq < 0) || (irq >= NINTR)) { 349 aprint_error("%s: invalid irq %d\n", __func__, irq); 350 return; 351 } 352 353 s = splhigh(); 354 355 /* disable the IRQ */ 356 if (irq >= 32) { 357 writereg(JZ_ICMSR1, 1 << (irq - 32)); 358 } else 359 writereg(JZ_ICMSR0, 1 << irq); 360 361 intrs[irq].ih_func = NULL; 362 intrs[irq].ih_arg = NULL; 363 intrs[irq].ih_ipl = 0; 364 365 splx(s); 366 } 367