1 /* $OpenBSD: octeon_intr.c,v 1.13 2016/07/16 10:41:53 visa Exp $ */ 2 3 /* 4 * Copyright (c) 2000-2004 Opsycon AB (www.opsycon.se) 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 16 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 17 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 19 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28 29 /* 30 * Interrupt support for Octeon Processor. 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/conf.h> 37 #include <sys/malloc.h> 38 #include <sys/device.h> 39 #include <sys/proc.h> 40 #include <sys/atomic.h> 41 42 #include <mips64/mips_cpu.h> 43 44 #include <machine/autoconf.h> 45 #include <machine/intr.h> 46 #include <machine/octeonreg.h> 47 48 #include <octeon/dev/iobusvar.h> 49 50 extern bus_space_handle_t iobus_h; 51 52 #define OCTEON_NINTS 64 53 54 void octeon_intr_makemasks(void); 55 void octeon_splx(int); 56 uint32_t octeon_iointr(uint32_t, struct trapframe *); 57 uint32_t octeon_aux(uint32_t, struct trapframe *); 58 int octeon_iointr_skip(struct intrhand *, uint64_t, uint64_t); 59 void octeon_setintrmask(int); 60 61 struct intrhand *octeon_intrhand[OCTEON_NINTS]; 62 63 #define INTPRI_CIU_0 (INTPRI_CLOCK + 1) 64 65 uint64_t octeon_intem[MAXCPUS]; 66 uint64_t octeon_imask[MAXCPUS][NIPLS]; 67 68 void 69 octeon_intr_init(void) 70 { 71 int cpuid = cpu_number(); 72 bus_space_write_8(&iobus_tag, iobus_h, CIU_IP2_EN0(cpuid), 0); 73 bus_space_write_8(&iobus_tag, iobus_h, CIU_IP3_EN0(cpuid), 0); 74 bus_space_write_8(&iobus_tag, iobus_h, CIU_IP2_EN1(cpuid), 0); 75 bus_space_write_8(&iobus_tag, iobus_h, CIU_IP3_EN1(cpuid), 0); 76 77 set_intr(INTPRI_CIU_0, CR_INT_0, octeon_iointr); 78 register_splx_handler(octeon_splx); 79 } 80 81 /* 82 * Establish an interrupt handler called from the dispatcher. 83 * The interrupt function established should return zero if there was nothing 84 * to serve (no int) and non-zero when an interrupt was serviced. 85 * 86 * Interrupts are numbered from 1 and up where 1 maps to HW int 0. 87 * XXX There is no reason to keep this... except for hardcoded interrupts 88 * XXX in kernel configuration files... 89 */ 90 void * 91 octeon_intr_establish(int irq, int level, 92 int (*ih_fun)(void *), void *ih_arg, const char *ih_what) 93 { 94 int cpuid = cpu_number(); 95 struct intrhand **p, *q, *ih; 96 int flags; 97 int s; 98 99 #ifdef DIAGNOSTIC 100 if (irq >= OCTEON_NINTS || irq < 0) 101 panic("intr_establish: illegal irq %d", irq); 102 #endif 103 104 flags = (level & IPL_MPSAFE) ? IH_MPSAFE : 0; 105 level &= ~IPL_MPSAFE; 106 107 ih = malloc(sizeof *ih, M_DEVBUF, M_NOWAIT); 108 if (ih == NULL) 109 return NULL; 110 111 ih->ih_next = NULL; 112 ih->ih_fun = ih_fun; 113 ih->ih_arg = ih_arg; 114 ih->ih_level = level; 115 ih->ih_flags = flags; 116 ih->ih_irq = irq; 117 evcount_attach(&ih->ih_count, ih_what, (void *)&ih->ih_irq); 118 119 s = splhigh(); 120 121 /* 122 * Figure out where to put the handler. 123 * This is O(N^2), but we want to preserve the order, and N is 124 * generally small. 125 */ 126 for (p = &octeon_intrhand[irq]; (q = *p) != NULL; 127 p = (struct intrhand **)&q->ih_next) 128 ; 129 *p = ih; 130 131 octeon_intem[cpuid] |= 1UL << irq; 132 octeon_intr_makemasks(); 133 134 splx(s); /* causes hw mask update */ 135 136 return (ih); 137 } 138 139 void 140 octeon_intr_disestablish(void *ih) 141 { 142 /* XXX */ 143 panic("%s not implemented", __func__); 144 } 145 146 void 147 octeon_splx(int newipl) 148 { 149 struct cpu_info *ci = curcpu(); 150 151 /* Update masks to new ipl. Order highly important! */ 152 __asm__ (".set noreorder\n"); 153 ci->ci_ipl = newipl; 154 mips_sync(); 155 __asm__ (".set reorder\n"); 156 octeon_setintrmask(newipl); 157 158 /* If we still have softints pending trigger processing. */ 159 if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT) 160 setsoftintr0(); 161 } 162 163 /* 164 * Recompute interrupt masks. 165 */ 166 void 167 octeon_intr_makemasks() 168 { 169 int cpuid = cpu_number(); 170 int irq, level; 171 struct intrhand *q; 172 uint intrlevel[OCTEON_NINTS]; 173 174 /* First, figure out which levels each IRQ uses. */ 175 for (irq = 0; irq < OCTEON_NINTS; irq++) { 176 uint levels = 0; 177 for (q = (struct intrhand *)octeon_intrhand[irq]; q != NULL; 178 q = q->ih_next) 179 levels |= 1 << q->ih_level; 180 intrlevel[irq] = levels; 181 } 182 183 /* 184 * Then figure out which IRQs use each level. 185 * Note that we make sure never to overwrite imask[IPL_HIGH], in 186 * case an interrupt occurs during intr_disestablish() and causes 187 * an unfortunate splx() while we are here recomputing the masks. 188 */ 189 for (level = IPL_NONE; level < NIPLS; level++) { 190 uint64_t irqs = 0; 191 for (irq = 0; irq < OCTEON_NINTS; irq++) 192 if (intrlevel[irq] & (1 << level)) 193 irqs |= 1UL << irq; 194 octeon_imask[cpuid][level] = irqs; 195 } 196 /* 197 * There are tty, network and disk drivers that use free() at interrupt 198 * time, so vm > (tty | net | bio). 199 * 200 * Enforce a hierarchy that gives slow devices a better chance at not 201 * dropping data. 202 */ 203 octeon_imask[cpuid][IPL_NET] |= octeon_imask[cpuid][IPL_BIO]; 204 octeon_imask[cpuid][IPL_TTY] |= octeon_imask[cpuid][IPL_NET]; 205 octeon_imask[cpuid][IPL_VM] |= octeon_imask[cpuid][IPL_TTY]; 206 octeon_imask[cpuid][IPL_CLOCK] |= octeon_imask[cpuid][IPL_VM]; 207 octeon_imask[cpuid][IPL_HIGH] |= octeon_imask[cpuid][IPL_CLOCK]; 208 octeon_imask[cpuid][IPL_IPI] |= octeon_imask[cpuid][IPL_HIGH]; 209 210 /* 211 * These are pseudo-levels. 212 */ 213 octeon_imask[cpuid][IPL_NONE] = 0; 214 } 215 216 /* 217 * Interrupt dispatcher. 218 */ 219 uint32_t 220 octeon_iointr(uint32_t hwpend, struct trapframe *frame) 221 { 222 struct cpu_info *ci = curcpu(); 223 int cpuid = cpu_number(); 224 uint64_t imr, isr, mask; 225 int ipl; 226 int bit; 227 struct intrhand *ih; 228 int rc; 229 uint64_t sum0 = CIU_IP2_SUM0(cpuid); 230 uint64_t en0 = CIU_IP2_EN0(cpuid); 231 232 isr = bus_space_read_8(&iobus_tag, iobus_h, sum0); 233 imr = bus_space_read_8(&iobus_tag, iobus_h, en0); 234 bit = 63; 235 236 isr &= imr; 237 if (isr == 0) 238 return 0; /* not for us */ 239 240 /* 241 * Mask all pending interrupts. 242 */ 243 bus_space_write_8(&iobus_tag, iobus_h, en0, imr & ~isr); 244 245 /* 246 * If interrupts are spl-masked, mask them and wait for splx() 247 * to reenable them when necessary. 248 */ 249 if ((mask = isr & octeon_imask[cpuid][frame->ipl]) != 0) { 250 isr &= ~mask; 251 imr &= ~mask; 252 } 253 254 /* 255 * Now process allowed interrupts. 256 */ 257 if (isr != 0) { 258 int lvl, bitno; 259 uint64_t tmpisr; 260 261 __asm__ (".set noreorder\n"); 262 ipl = ci->ci_ipl; 263 mips_sync(); 264 __asm__ (".set reorder\n"); 265 266 /* Service higher level interrupts first */ 267 for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) { 268 tmpisr = isr & (octeon_imask[cpuid][lvl] ^ octeon_imask[cpuid][lvl - 1]); 269 if (tmpisr == 0) 270 continue; 271 for (bitno = bit, mask = 1UL << bitno; mask != 0; 272 bitno--, mask >>= 1) { 273 if ((tmpisr & mask) == 0) 274 continue; 275 276 rc = 0; 277 for (ih = (struct intrhand *)octeon_intrhand[bitno]; 278 ih != NULL; 279 ih = ih->ih_next) { 280 #ifdef MULTIPROCESSOR 281 register_t sr; 282 int need_lock; 283 #endif 284 splraise(ih->ih_level); 285 #ifdef MULTIPROCESSOR 286 if (ih->ih_level < IPL_IPI) { 287 sr = getsr(); 288 ENABLEIPI(); 289 } 290 if (ih->ih_flags & IH_MPSAFE) 291 need_lock = 0; 292 else 293 need_lock = 294 ih->ih_level < IPL_CLOCK; 295 if (need_lock) 296 __mp_lock(&kernel_lock); 297 #endif 298 if ((*ih->ih_fun)(ih->ih_arg) != 0) { 299 rc = 1; 300 atomic_inc_long((unsigned long *) 301 &ih->ih_count.ec_count); 302 } 303 #ifdef MULTIPROCESSOR 304 if (need_lock) 305 __mp_unlock(&kernel_lock); 306 if (ih->ih_level < IPL_IPI) 307 setsr(sr); 308 #endif 309 __asm__ (".set noreorder\n"); 310 ci->ci_ipl = ipl; 311 mips_sync(); 312 __asm__ (".set reorder\n"); 313 } 314 if (rc == 0) 315 printf("spurious interrupt %d\n", bitno); 316 317 isr ^= mask; 318 if ((tmpisr ^= mask) == 0) 319 break; 320 } 321 } 322 323 /* 324 * Reenable interrupts which have been serviced. 325 */ 326 bus_space_write_8(&iobus_tag, iobus_h, en0, imr); 327 } 328 329 return hwpend; 330 } 331 332 void 333 octeon_setintrmask(int level) 334 { 335 int cpuid = cpu_number(); 336 337 bus_space_write_8(&iobus_tag, iobus_h, CIU_IP2_EN0(cpuid), 338 octeon_intem[cpuid] & ~octeon_imask[cpuid][level]); 339 } 340