1 /* 2 * Copyright (c) 2000-2004 Opsycon AB (www.opsycon.se) 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS 14 * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27 /* 28 * Interrupt support for Octeon Processor. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/conf.h> 35 #include <sys/malloc.h> 36 #include <sys/device.h> 37 #include <sys/proc.h> 38 #include <sys/atomic.h> 39 40 #include <mips64/mips_cpu.h> 41 42 #include <machine/autoconf.h> 43 #include <machine/intr.h> 44 #include <machine/octeonreg.h> 45 46 #include <octeon/dev/iobusvar.h> 47 48 extern bus_space_handle_t iobus_h; 49 50 #define OCTEON_NINTS 64 51 52 void octeon_intr_makemasks(void); 53 void octeon_splx(int); 54 uint32_t octeon_iointr(uint32_t, struct trap_frame *); 55 uint32_t octeon_aux(uint32_t, struct trap_frame *); 56 int octeon_iointr_skip(struct intrhand *, uint64_t, uint64_t); 57 void octeon_setintrmask(int); 58 59 struct intrhand *octeon_intrhand[OCTEON_NINTS]; 60 61 #define INTPRI_CIU_0 (INTPRI_CLOCK + 1) 62 63 uint64_t octeon_intem[MAXCPUS]; 64 uint64_t octeon_imask[MAXCPUS][NIPLS]; 65 66 void 67 octeon_intr_init(void) 68 { 69 int cpuid = cpu_number(); 70 bus_space_write_8(&iobus_tag, iobus_h, CIU_IP2_EN0(cpuid), 0); 71 bus_space_write_8(&iobus_tag, iobus_h, CIU_IP3_EN0(cpuid), 0); 72 bus_space_write_8(&iobus_tag, iobus_h, CIU_IP2_EN1(cpuid), 0); 73 bus_space_write_8(&iobus_tag, iobus_h, CIU_IP3_EN1(cpuid), 0); 74 75 set_intr(INTPRI_CIU_0, CR_INT_0, octeon_iointr); 76 register_splx_handler(octeon_splx); 77 } 78 79 /* 80 * Establish an interrupt handler called from the dispatcher. 81 * The interrupt function established should return zero if there was nothing 82 * to serve (no int) and non-zero when an interrupt was serviced. 83 * 84 * Interrupts are numbered from 1 and up where 1 maps to HW int 0. 85 * XXX There is no reason to keep this... except for hardcoded interrupts 86 * XXX in kernel configuration files... 87 */ 88 void * 89 octeon_intr_establish(int irq, int level, 90 int (*ih_fun)(void *), void *ih_arg, const char *ih_what) 91 { 92 int cpuid = cpu_number(); 93 struct intrhand **p, *q, *ih; 94 int s; 95 96 #ifdef DIAGNOSTIC 97 if (irq >= OCTEON_NINTS || irq < 0) 98 panic("intr_establish: illegal irq %d", irq); 99 #endif 100 101 ih = malloc(sizeof *ih, M_DEVBUF, M_NOWAIT); 102 if (ih == NULL) 103 return NULL; 104 105 ih->ih_next = NULL; 106 ih->ih_fun = ih_fun; 107 ih->ih_arg = ih_arg; 108 ih->ih_level = level; 109 ih->ih_irq = irq; 110 evcount_attach(&ih->ih_count, ih_what, (void *)&ih->ih_irq); 111 112 s = splhigh(); 113 114 /* 115 * Figure out where to put the handler. 116 * This is O(N^2), but we want to preserve the order, and N is 117 * generally small. 118 */ 119 for (p = &octeon_intrhand[irq]; (q = *p) != NULL; 120 p = (struct intrhand **)&q->ih_next) 121 ; 122 *p = ih; 123 124 octeon_intem[cpuid] |= 1UL << irq; 125 octeon_intr_makemasks(); 126 127 splx(s); /* causes hw mask update */ 128 129 return (ih); 130 } 131 132 void 133 octeon_intr_disestablish(void *ih) 134 { 135 /* XXX */ 136 panic("%s not implemented", __func__); 137 } 138 139 void 140 octeon_splx(int newipl) 141 { 142 struct cpu_info *ci = curcpu(); 143 144 /* Update masks to new ipl. Order highly important! */ 145 __asm__ (".set noreorder\n"); 146 ci->ci_ipl = newipl; 147 mips_sync(); 148 __asm__ (".set reorder\n"); 149 if (CPU_IS_PRIMARY(ci)) 150 octeon_setintrmask(newipl); 151 /* If we still have softints pending trigger processing. */ 152 if (ci->ci_softpending != 0 && newipl < IPL_SOFTINT) 153 setsoftintr0(); 154 } 155 156 /* 157 * Recompute interrupt masks. 158 */ 159 void 160 octeon_intr_makemasks() 161 { 162 int cpuid = cpu_number(); 163 int irq, level; 164 struct intrhand *q; 165 uint intrlevel[OCTEON_NINTS]; 166 167 /* First, figure out which levels each IRQ uses. */ 168 for (irq = 0; irq < OCTEON_NINTS; irq++) { 169 uint levels = 0; 170 for (q = (struct intrhand *)octeon_intrhand[irq]; q != NULL; 171 q = q->ih_next) 172 levels |= 1 << q->ih_level; 173 intrlevel[irq] = levels; 174 } 175 176 /* 177 * Then figure out which IRQs use each level. 178 * Note that we make sure never to overwrite imask[IPL_HIGH], in 179 * case an interrupt occurs during intr_disestablish() and causes 180 * an unfortunate splx() while we are here recomputing the masks. 181 */ 182 for (level = IPL_NONE; level < NIPLS; level++) { 183 uint64_t irqs = 0; 184 for (irq = 0; irq < OCTEON_NINTS; irq++) 185 if (intrlevel[irq] & (1 << level)) 186 irqs |= 1UL << irq; 187 octeon_imask[cpuid][level] = irqs; 188 } 189 /* 190 * There are tty, network and disk drivers that use free() at interrupt 191 * time, so vm > (tty | net | bio). 192 * 193 * Enforce a hierarchy that gives slow devices a better chance at not 194 * dropping data. 195 */ 196 octeon_imask[cpuid][IPL_NET] |= octeon_imask[cpuid][IPL_BIO]; 197 octeon_imask[cpuid][IPL_TTY] |= octeon_imask[cpuid][IPL_NET]; 198 octeon_imask[cpuid][IPL_VM] |= octeon_imask[cpuid][IPL_TTY]; 199 octeon_imask[cpuid][IPL_CLOCK] |= octeon_imask[cpuid][IPL_VM]; 200 octeon_imask[cpuid][IPL_HIGH] |= octeon_imask[cpuid][IPL_CLOCK]; 201 octeon_imask[cpuid][IPL_IPI] |= octeon_imask[cpuid][IPL_HIGH]; 202 203 /* 204 * These are pseudo-levels. 205 */ 206 octeon_imask[cpuid][IPL_NONE] = 0; 207 } 208 209 /* 210 * Interrupt dispatcher. 211 */ 212 uint32_t 213 octeon_iointr(uint32_t hwpend, struct trap_frame *frame) 214 { 215 struct cpu_info *ci = curcpu(); 216 int cpuid = cpu_number(); 217 uint64_t imr, isr, mask; 218 int ipl; 219 int bit; 220 struct intrhand *ih; 221 int rc; 222 uint64_t sum0 = CIU_IP2_SUM0(cpuid); 223 uint64_t en0 = CIU_IP2_EN0(cpuid); 224 225 isr = bus_space_read_8(&iobus_tag, iobus_h, sum0); 226 imr = bus_space_read_8(&iobus_tag, iobus_h, en0); 227 bit = 63; 228 229 isr &= imr; 230 if (isr == 0) 231 return 0; /* not for us */ 232 233 /* 234 * Mask all pending interrupts. 235 */ 236 bus_space_write_8(&iobus_tag, iobus_h, en0, imr & ~isr); 237 238 /* 239 * If interrupts are spl-masked, mask them and wait for splx() 240 * to reenable them when necessary. 241 */ 242 if ((mask = isr & octeon_imask[cpuid][frame->ipl]) != 0) { 243 isr &= ~mask; 244 imr &= ~mask; 245 } 246 247 /* 248 * Now process allowed interrupts. 249 */ 250 if (isr != 0) { 251 int lvl, bitno; 252 uint64_t tmpisr; 253 254 __asm__ (".set noreorder\n"); 255 ipl = ci->ci_ipl; 256 mips_sync(); 257 __asm__ (".set reorder\n"); 258 259 /* Service higher level interrupts first */ 260 for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) { 261 tmpisr = isr & (octeon_imask[cpuid][lvl] ^ octeon_imask[cpuid][lvl - 1]); 262 if (tmpisr == 0) 263 continue; 264 for (bitno = bit, mask = 1UL << bitno; mask != 0; 265 bitno--, mask >>= 1) { 266 if ((tmpisr & mask) == 0) 267 continue; 268 269 rc = 0; 270 for (ih = (struct intrhand *)octeon_intrhand[bitno]; 271 ih != NULL; 272 ih = ih->ih_next) { 273 #ifdef MULTIPROCESSOR 274 register_t sr; 275 #endif 276 splraise(ih->ih_level); 277 #ifdef MULTIPROCESSOR 278 if (ih->ih_level < IPL_IPI) { 279 sr = getsr(); 280 ENABLEIPI(); 281 if (ipl < IPL_SCHED) 282 __mp_lock(&kernel_lock); 283 } 284 #endif 285 if ((*ih->ih_fun)(ih->ih_arg) != 0) { 286 rc = 1; 287 atomic_inc_long((unsigned long *) 288 &ih->ih_count.ec_count); 289 } 290 #ifdef MULTIPROCESSOR 291 if (ih->ih_level < IPL_IPI) { 292 if (ipl < IPL_SCHED) 293 __mp_unlock(&kernel_lock); 294 setsr(sr); 295 } 296 #endif 297 __asm__ (".set noreorder\n"); 298 ci->ci_ipl = ipl; 299 mips_sync(); 300 __asm__ (".set reorder\n"); 301 } 302 if (rc == 0) 303 printf("spurious interrupt %d\n", bitno); 304 305 isr ^= mask; 306 if ((tmpisr ^= mask) == 0) 307 break; 308 } 309 } 310 311 /* 312 * Reenable interrupts which have been serviced. 313 */ 314 bus_space_write_8(&iobus_tag, iobus_h, en0, imr); 315 } 316 317 return hwpend; 318 } 319 320 void 321 octeon_setintrmask(int level) 322 { 323 int cpuid = cpu_number(); 324 325 bus_space_write_8(&iobus_tag, iobus_h, CIU_IP2_EN0(cpuid), 326 octeon_intem[cpuid] & ~octeon_imask[cpuid][level]); 327 } 328