1*9593dc34Smglocker /* $OpenBSD: plic.c,v 1.13 2024/09/04 07:54:51 mglocker Exp $ */ 2380aa7b9Sjsg 3baed8f06Sdrahn /* 4baed8f06Sdrahn * Copyright (c) 2020, Mars Li <mengshi.li.mars@gmail.com> 5baed8f06Sdrahn * Copyright (c) 2020, Brian Bamsch <bbamsch@google.com> 6baed8f06Sdrahn * 7baed8f06Sdrahn * Permission to use, copy, modify, and distribute this software for any 8baed8f06Sdrahn * purpose with or without fee is hereby granted, provided that the above 9baed8f06Sdrahn * copyright notice and this permission notice appear in all copies. 10baed8f06Sdrahn * 11baed8f06Sdrahn * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12baed8f06Sdrahn * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13baed8f06Sdrahn * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14baed8f06Sdrahn * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15baed8f06Sdrahn * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16baed8f06Sdrahn * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17baed8f06Sdrahn * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18baed8f06Sdrahn */ 19baed8f06Sdrahn 20baed8f06Sdrahn #include <sys/param.h> 21baed8f06Sdrahn #include <sys/systm.h> 22baed8f06Sdrahn #include <sys/queue.h> 23baed8f06Sdrahn #include <sys/malloc.h> 24baed8f06Sdrahn #include <sys/device.h> 25baed8f06Sdrahn #include <sys/evcount.h> 26baed8f06Sdrahn 27baed8f06Sdrahn #include <machine/bus.h> 28baed8f06Sdrahn #include <machine/fdt.h> 29baed8f06Sdrahn #include <machine/cpu.h> 308272ea32Scheloha #include <machine/sbi.h> 31baed8f06Sdrahn #include "riscv64/dev/riscv_cpu_intc.h" 32baed8f06Sdrahn 33baed8f06Sdrahn #include <dev/ofw/openfirm.h> 34baed8f06Sdrahn #include <dev/ofw/fdt.h> 35baed8f06Sdrahn 36baed8f06Sdrahn /* 37baed8f06Sdrahn * This driver implements a version of the RISC-V PLIC with the actual layout 38baed8f06Sdrahn * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: 39baed8f06Sdrahn * 40baed8f06Sdrahn * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf 41baed8f06Sdrahn * 42baed8f06Sdrahn * The largest number supported by devices marked as 'sifive,plic-1.0.0', is 43baed8f06Sdrahn * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged 44baed8f06Sdrahn * Spec. 45baed8f06Sdrahn */ 46baed8f06Sdrahn 47baed8f06Sdrahn #define PLIC_MAX_IRQS 1024 48baed8f06Sdrahn 49baed8f06Sdrahn #define PLIC_PRIORITY_BASE 0x000000U 50baed8f06Sdrahn 51baed8f06Sdrahn #define PLIC_ENABLE_BASE 0x002000U 52baed8f06Sdrahn #define PLIC_ENABLE_STRIDE 0x80U 53baed8f06Sdrahn #define IRQ_ENABLE 1 54baed8f06Sdrahn #define IRQ_DISABLE 0 55baed8f06Sdrahn 56baed8f06Sdrahn #define PLIC_CONTEXT_BASE 0x200000U 57baed8f06Sdrahn #define PLIC_CONTEXT_STRIDE 0x1000U 58baed8f06Sdrahn #define PLIC_CONTEXT_THRESHOLD 0x0U 59baed8f06Sdrahn #define PLIC_CONTEXT_CLAIM 0x4U 60baed8f06Sdrahn 61baed8f06Sdrahn #define PLIC_PRIORITY(n) (PLIC_PRIORITY_BASE + (n) * sizeof(uint32_t)) 62baed8f06Sdrahn #define PLIC_ENABLE(sc, n, h) \ 63baed8f06Sdrahn (sc->sc_contexts[h].enable_offset + ((n) / 32) * sizeof(uint32_t)) 64baed8f06Sdrahn #define PLIC_THRESHOLD(sc, h) \ 65baed8f06Sdrahn (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_THRESHOLD) 66baed8f06Sdrahn #define PLIC_CLAIM(sc, h) \ 67baed8f06Sdrahn (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_CLAIM) 68baed8f06Sdrahn 69baed8f06Sdrahn 70baed8f06Sdrahn struct plic_intrhand { 71baed8f06Sdrahn TAILQ_ENTRY(plic_intrhand) ih_list; /* link on intrq list */ 72baed8f06Sdrahn int (*ih_func)(void *); /* handler */ 73baed8f06Sdrahn void *ih_arg; /* arg for handler */ 74baed8f06Sdrahn int ih_ipl; /* IPL_* */ 75baed8f06Sdrahn int ih_flags; 76baed8f06Sdrahn int ih_irq; /* IRQ number */ 77baed8f06Sdrahn struct evcount ih_count; 78baed8f06Sdrahn char *ih_name; 791378d87cSkettenis struct cpu_info *ih_ci; 80baed8f06Sdrahn }; 81baed8f06Sdrahn 82baed8f06Sdrahn /* 83baed8f06Sdrahn * One interrupt source could have multiple handler attached, 84baed8f06Sdrahn * each handler could have different priority level, 85baed8f06Sdrahn * we track the max and min priority level. 86baed8f06Sdrahn */ 87baed8f06Sdrahn struct plic_irqsrc { 88baed8f06Sdrahn TAILQ_HEAD(, plic_intrhand) is_list; /* handler list */ 89baed8f06Sdrahn int is_irq_max; /* IRQ to mask while handling */ 90baed8f06Sdrahn int is_irq_min; /* lowest IRQ when shared */ 91baed8f06Sdrahn }; 92baed8f06Sdrahn 93baed8f06Sdrahn struct plic_context { 94baed8f06Sdrahn bus_size_t enable_offset; 95baed8f06Sdrahn bus_size_t context_offset; 96baed8f06Sdrahn }; 97baed8f06Sdrahn 98baed8f06Sdrahn struct plic_softc { 99baed8f06Sdrahn struct device sc_dev; 100baed8f06Sdrahn int sc_node; 101baed8f06Sdrahn bus_space_tag_t sc_iot; 102baed8f06Sdrahn bus_space_handle_t sc_ioh; 103baed8f06Sdrahn struct plic_irqsrc *sc_isrcs; 104baed8f06Sdrahn struct plic_context sc_contexts[MAXCPUS]; 105baed8f06Sdrahn int sc_ndev; 106baed8f06Sdrahn struct interrupt_controller sc_intc; 107baed8f06Sdrahn }; 108baed8f06Sdrahn struct plic_softc *plic = NULL; 109baed8f06Sdrahn 110baed8f06Sdrahn int plic_match(struct device *, void *, void *); 111baed8f06Sdrahn void plic_attach(struct device *, struct device *, void *); 112baed8f06Sdrahn int plic_irq_handler(void *); 113baed8f06Sdrahn int plic_irq_dispatch(uint32_t, void *); 1141378d87cSkettenis void *plic_intr_establish(int, int, struct cpu_info *, 1151378d87cSkettenis int (*)(void *), void *, char *); 1161378d87cSkettenis void *plic_intr_establish_fdt(void *, int *, int, struct cpu_info *, 1171378d87cSkettenis int (*)(void *), void *, char *); 118baed8f06Sdrahn void plic_intr_disestablish(void *); 119baed8f06Sdrahn void plic_intr_route(void *, int, struct cpu_info *); 1201378d87cSkettenis void plic_intr_barrier(void *); 121baed8f06Sdrahn 122baed8f06Sdrahn void plic_splx(int); 123baed8f06Sdrahn int plic_spllower(int); 124baed8f06Sdrahn int plic_splraise(int); 125baed8f06Sdrahn void plic_setipl(int); 126baed8f06Sdrahn void plic_calc_mask(void); 127baed8f06Sdrahn 128baed8f06Sdrahn /* helper function */ 129baed8f06Sdrahn int plic_get_cpuid(int); 130baed8f06Sdrahn void plic_set_priority(int, uint32_t); 131baed8f06Sdrahn void plic_set_threshold(int, uint32_t); 132baed8f06Sdrahn void plic_intr_route_grid(int, int, int); 133baed8f06Sdrahn void plic_intr_enable_with_pri(int, uint32_t, int); 134baed8f06Sdrahn void plic_intr_disable(int, int); 135baed8f06Sdrahn 136baed8f06Sdrahn 137471aeecfSnaddy const struct cfattach plic_ca = { 138baed8f06Sdrahn sizeof(struct plic_softc), plic_match, plic_attach, 139baed8f06Sdrahn }; 140baed8f06Sdrahn 141baed8f06Sdrahn struct cfdriver plic_cd = { 142baed8f06Sdrahn NULL, "plic", DV_DULL 143baed8f06Sdrahn }; 144baed8f06Sdrahn 145baed8f06Sdrahn int plic_attached = 0; 146baed8f06Sdrahn 147baed8f06Sdrahn int 148baed8f06Sdrahn plic_match(struct device *parent, void *cfdata, void *aux) 149baed8f06Sdrahn { 150baed8f06Sdrahn struct fdt_attach_args *faa = aux; 151baed8f06Sdrahn 152baed8f06Sdrahn if (plic_attached) 153baed8f06Sdrahn return 0; // Only expect one instance of PLIC 154baed8f06Sdrahn 155baed8f06Sdrahn return (OF_is_compatible(faa->fa_node, "riscv,plic0") || 156b91dc086Skettenis OF_is_compatible(faa->fa_node, "sifive,plic-1.0.0") || 157b91dc086Skettenis OF_is_compatible(faa->fa_node, "thead,c900-plic")); 158baed8f06Sdrahn } 159baed8f06Sdrahn 160baed8f06Sdrahn void 161baed8f06Sdrahn plic_attach(struct device *parent, struct device *dev, void *aux) 162baed8f06Sdrahn { 163baed8f06Sdrahn struct plic_softc *sc; 164baed8f06Sdrahn struct fdt_attach_args *faa; 165baed8f06Sdrahn uint32_t *cells; 166baed8f06Sdrahn uint32_t irq; 167985778ebSvisa int cpu; 168baed8f06Sdrahn int node; 169baed8f06Sdrahn int len; 170baed8f06Sdrahn int ncell; 171baed8f06Sdrahn int context; 172baed8f06Sdrahn int i; 173baed8f06Sdrahn struct cpu_info *ci; 174baed8f06Sdrahn CPU_INFO_ITERATOR cii; 175baed8f06Sdrahn 176baed8f06Sdrahn if (plic_attached) 177baed8f06Sdrahn return; 178baed8f06Sdrahn 179baed8f06Sdrahn plic = sc = (struct plic_softc *)dev; 180baed8f06Sdrahn faa = (struct fdt_attach_args *)aux; 181baed8f06Sdrahn 182baed8f06Sdrahn if (faa->fa_nreg < 1) 183baed8f06Sdrahn return; 184baed8f06Sdrahn 185baed8f06Sdrahn sc->sc_node = node = faa->fa_node; 186baed8f06Sdrahn sc->sc_iot = faa->fa_iot; 187baed8f06Sdrahn 188baed8f06Sdrahn /* determine number of devices sending intr to this ic */ 189baed8f06Sdrahn sc->sc_ndev = OF_getpropint(faa->fa_node, "riscv,ndev", -1); 190baed8f06Sdrahn if (sc->sc_ndev < 0) { 191baed8f06Sdrahn printf(": unable to resolve number of devices\n"); 192baed8f06Sdrahn return; 193baed8f06Sdrahn } 194baed8f06Sdrahn 195baed8f06Sdrahn if (sc->sc_ndev >= PLIC_MAX_IRQS) { 196baed8f06Sdrahn printf(": invalid ndev (%d)\n", sc->sc_ndev); 197baed8f06Sdrahn return; 198baed8f06Sdrahn } 199baed8f06Sdrahn 200baed8f06Sdrahn /* map interrupt controller to va space */ 201baed8f06Sdrahn if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 202baed8f06Sdrahn faa->fa_reg[0].size, 0, &sc->sc_ioh)) 203baed8f06Sdrahn panic("%s: bus_space_map failed!", __func__); 204baed8f06Sdrahn 205baed8f06Sdrahn sc->sc_isrcs = mallocarray(PLIC_MAX_IRQS, sizeof(struct plic_irqsrc), 206baed8f06Sdrahn M_DEVBUF, M_ZERO | M_NOWAIT); 207baed8f06Sdrahn 208baed8f06Sdrahn for (irq = 1; irq <= sc->sc_ndev; irq++) { 209baed8f06Sdrahn TAILQ_INIT(&sc->sc_isrcs[irq].is_list); 210baed8f06Sdrahn plic_set_priority(irq, 0);// Mask interrupt 211baed8f06Sdrahn } 212baed8f06Sdrahn 213baed8f06Sdrahn /* 214baed8f06Sdrahn * Calculate the per-cpu enable and context register offsets. 215baed8f06Sdrahn * 216baed8f06Sdrahn * This is tricky for a few reasons. The PLIC divides the interrupt 217baed8f06Sdrahn * enable, threshold, and claim bits by "context" 218baed8f06Sdrahn * 219baed8f06Sdrahn * The tricky part is that the PLIC spec imposes no restrictions on how 220baed8f06Sdrahn * these contexts are laid out. So for example, there is no guarantee 221baed8f06Sdrahn * that each CPU will have both a machine mode and supervisor context, 222baed8f06Sdrahn * or that different PLIC implementations will organize the context 223baed8f06Sdrahn * registers in the same way. On top of this, we must handle the fact 224baed8f06Sdrahn * that cpuid != hartid, as they may have been renumbered during boot. 225baed8f06Sdrahn * We perform the following steps: 226baed8f06Sdrahn * 227baed8f06Sdrahn * 1. Examine the PLIC's "interrupts-extended" property and skip any 228baed8f06Sdrahn * entries that are not for supervisor external interrupts. 229baed8f06Sdrahn * 230baed8f06Sdrahn * 2. Walk up the device tree to find the corresponding CPU, using node 231baed8f06Sdrahn * property to identify the cpuid. 232baed8f06Sdrahn * 233baed8f06Sdrahn * 3. Calculate the register offsets based on the context number. 234baed8f06Sdrahn */ 235baed8f06Sdrahn len = OF_getproplen(node, "interrupts-extended"); 236baed8f06Sdrahn if (len <= 0) { 237baed8f06Sdrahn printf(": could not find interrupts-extended\n"); 238baed8f06Sdrahn return; 239baed8f06Sdrahn } 240baed8f06Sdrahn 241baed8f06Sdrahn cells = malloc(len, M_TEMP, M_WAITOK); 242baed8f06Sdrahn ncell = len / sizeof(*cells); 243baed8f06Sdrahn if (OF_getpropintarray(node, "interrupts-extended", cells, len) < 0) { 244baed8f06Sdrahn printf(": failed to read interrupts-extended\n"); 245baed8f06Sdrahn free(cells, M_TEMP, len); 246baed8f06Sdrahn return; 247baed8f06Sdrahn } 248baed8f06Sdrahn 249baed8f06Sdrahn for (i = 0, context = 0; i < ncell; i += 2, context++) { 250baed8f06Sdrahn /* Skip M-mode external interrupts */ 251baed8f06Sdrahn if (cells[i + 1] != IRQ_EXTERNAL_SUPERVISOR) 252baed8f06Sdrahn continue; 253baed8f06Sdrahn 254baed8f06Sdrahn /* Get the corresponding cpuid. */ 255baed8f06Sdrahn cpu = plic_get_cpuid(OF_getnodebyphandle(cells[i])); 256985778ebSvisa if (cpu < 0) 257985778ebSvisa continue; 258baed8f06Sdrahn 259baed8f06Sdrahn /* 260baed8f06Sdrahn * Set the enable and context register offsets for the CPU. 261baed8f06Sdrahn * 262baed8f06Sdrahn * We assume S-mode handler always comes later than M-mode 263baed8f06Sdrahn * handler, but this might be a little fragile. 264baed8f06Sdrahn * 265baed8f06Sdrahn * XXX 266baed8f06Sdrahn * sifive spec doesn't list hart0 S-mode enable/contexts 267baed8f06Sdrahn * in its memory map, but QEMU emulates hart0 S-mode 268baed8f06Sdrahn * enable/contexts? Otherwise the following offset calculation 269baed8f06Sdrahn * would point to hart1 M-mode enable/contexts. 270baed8f06Sdrahn */ 271baed8f06Sdrahn sc->sc_contexts[cpu].enable_offset = PLIC_ENABLE_BASE + 272baed8f06Sdrahn context * PLIC_ENABLE_STRIDE; 273baed8f06Sdrahn sc->sc_contexts[cpu].context_offset = PLIC_CONTEXT_BASE + 274baed8f06Sdrahn context * PLIC_CONTEXT_STRIDE; 275baed8f06Sdrahn } 276baed8f06Sdrahn 277baed8f06Sdrahn free(cells, M_TEMP, len); 278baed8f06Sdrahn 279baed8f06Sdrahn /* Set CPU interrupt priority thresholds to minimum */ 280baed8f06Sdrahn CPU_INFO_FOREACH(cii, ci) { 281baed8f06Sdrahn plic_set_threshold(ci->ci_cpuid, 0); 282baed8f06Sdrahn } 283baed8f06Sdrahn 284baed8f06Sdrahn plic_setipl(IPL_HIGH); /* XXX ??? */ 285baed8f06Sdrahn plic_calc_mask(); 286baed8f06Sdrahn 287baed8f06Sdrahn /* 288baed8f06Sdrahn * insert self into the external interrupt handler entry in 289baed8f06Sdrahn * global interrupt handler vector 290baed8f06Sdrahn */ 291baed8f06Sdrahn riscv_intc_intr_establish(IRQ_EXTERNAL_SUPERVISOR, 0, 292baed8f06Sdrahn plic_irq_handler, NULL, "plic0"); 293baed8f06Sdrahn 294baed8f06Sdrahn /* 295*9593dc34Smglocker * From now on, spl update must be enforced to plic, so 296baed8f06Sdrahn * spl* routine should be updated. 297baed8f06Sdrahn */ 298baed8f06Sdrahn riscv_set_intr_func(plic_splraise, plic_spllower, 299baed8f06Sdrahn plic_splx, plic_setipl); 300baed8f06Sdrahn 301baed8f06Sdrahn plic_attached = 1; 302baed8f06Sdrahn 303baed8f06Sdrahn /* enable external interrupt */ 304baed8f06Sdrahn csr_set(sie, SIE_SEIE); 305baed8f06Sdrahn 306baed8f06Sdrahn sc->sc_intc.ic_node = faa->fa_node; 307baed8f06Sdrahn sc->sc_intc.ic_cookie = sc; 308baed8f06Sdrahn sc->sc_intc.ic_establish = plic_intr_establish_fdt; 309baed8f06Sdrahn sc->sc_intc.ic_disestablish = plic_intr_disestablish; 310baed8f06Sdrahn sc->sc_intc.ic_route = plic_intr_route; 311baed8f06Sdrahn // sc->sc_intc.ic_cpu_enable = XXX Per-CPU Initialization? 3121378d87cSkettenis sc->sc_intc.ic_barrier = plic_intr_barrier; 313baed8f06Sdrahn 314baed8f06Sdrahn riscv_intr_register_fdt(&sc->sc_intc); 315baed8f06Sdrahn 316baed8f06Sdrahn printf("\n"); 317baed8f06Sdrahn } 318baed8f06Sdrahn 319baed8f06Sdrahn int 320baed8f06Sdrahn plic_irq_handler(void *frame) 321baed8f06Sdrahn { 322baed8f06Sdrahn struct plic_softc* sc; 323baed8f06Sdrahn uint32_t pending; 324baed8f06Sdrahn uint32_t cpu; 325baed8f06Sdrahn int handled = 0; 326baed8f06Sdrahn 327baed8f06Sdrahn sc = plic; 328baed8f06Sdrahn cpu = cpu_number(); 329baed8f06Sdrahn 330baed8f06Sdrahn pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 331baed8f06Sdrahn PLIC_CLAIM(sc, cpu)); 332baed8f06Sdrahn 333baed8f06Sdrahn if (pending >= sc->sc_ndev) { 334baed8f06Sdrahn printf("plic0: pending %x\n", pending); 335baed8f06Sdrahn return 0; 336baed8f06Sdrahn } 337baed8f06Sdrahn 338baed8f06Sdrahn if (pending) { 339baed8f06Sdrahn handled = plic_irq_dispatch(pending, frame); 340baed8f06Sdrahn bus_space_write_4(sc->sc_iot, sc->sc_ioh, 341baed8f06Sdrahn PLIC_CLAIM(sc, cpu), pending); 342baed8f06Sdrahn 343baed8f06Sdrahn //#define DEBUG_INTC 344baed8f06Sdrahn #ifdef DEBUG_INTC 345baed8f06Sdrahn if (handled == 0) { 346baed8f06Sdrahn printf("plic handled == 0 on pending %d\n", pending); 347baed8f06Sdrahn } 348baed8f06Sdrahn #endif /* DEBUG_INTC */ 349baed8f06Sdrahn } 350baed8f06Sdrahn 351baed8f06Sdrahn return handled; 352baed8f06Sdrahn } 353baed8f06Sdrahn 354baed8f06Sdrahn int 355baed8f06Sdrahn plic_irq_dispatch(uint32_t irq, void *frame) 356baed8f06Sdrahn { 357baed8f06Sdrahn int pri, s; 358baed8f06Sdrahn int handled = 0; 359baed8f06Sdrahn struct plic_softc* sc; 360baed8f06Sdrahn struct plic_intrhand *ih; 361baed8f06Sdrahn void *arg; 362baed8f06Sdrahn 363baed8f06Sdrahn #ifdef DEBUG_INTC 364baed8f06Sdrahn printf("plic irq %d fired\n", irq); 365baed8f06Sdrahn #endif 366baed8f06Sdrahn 367baed8f06Sdrahn sc = plic; 368baed8f06Sdrahn pri = sc->sc_isrcs[irq].is_irq_max; 369baed8f06Sdrahn s = plic_splraise(pri); 370baed8f06Sdrahn TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) { 371baed8f06Sdrahn #ifdef MULTIPROCESSOR 372baed8f06Sdrahn int need_lock; 373baed8f06Sdrahn 374baed8f06Sdrahn if (ih->ih_flags & IPL_MPSAFE) 375baed8f06Sdrahn need_lock = 0; 376baed8f06Sdrahn else 377baed8f06Sdrahn need_lock = s < IPL_SCHED; 378baed8f06Sdrahn 379baed8f06Sdrahn if (need_lock) 380baed8f06Sdrahn KERNEL_LOCK(); 381baed8f06Sdrahn #endif 382baed8f06Sdrahn 383fe0d117cSjsg if (ih->ih_arg) 384baed8f06Sdrahn arg = ih->ih_arg; 385baed8f06Sdrahn else 386baed8f06Sdrahn arg = frame; 387baed8f06Sdrahn 38828149d3dSkettenis intr_enable(); 389baed8f06Sdrahn handled = ih->ih_func(arg); 39028149d3dSkettenis intr_disable(); 391baed8f06Sdrahn if (handled) 392baed8f06Sdrahn ih->ih_count.ec_count++; 393baed8f06Sdrahn 394baed8f06Sdrahn #ifdef MULTIPROCESSOR 395baed8f06Sdrahn if (need_lock) 396baed8f06Sdrahn KERNEL_UNLOCK(); 397baed8f06Sdrahn #endif 398baed8f06Sdrahn } 399baed8f06Sdrahn 400baed8f06Sdrahn plic_splx(s); 401baed8f06Sdrahn return handled; 402baed8f06Sdrahn } 403baed8f06Sdrahn 404baed8f06Sdrahn void * 4051378d87cSkettenis plic_intr_establish(int irqno, int level, struct cpu_info *ci, 4061378d87cSkettenis int (*func)(void *), void *arg, char *name) 407baed8f06Sdrahn { 408baed8f06Sdrahn struct plic_softc *sc = plic; 409baed8f06Sdrahn struct plic_intrhand *ih; 410285e3455Skettenis u_long sie; 411baed8f06Sdrahn 412baed8f06Sdrahn if (irqno < 0 || irqno >= PLIC_MAX_IRQS) 413baed8f06Sdrahn panic("plic_intr_establish: bogus irqnumber %d: %s", 414baed8f06Sdrahn irqno, name); 415baed8f06Sdrahn 4161378d87cSkettenis if (ci == NULL) 4171378d87cSkettenis ci = &cpu_info_primary; 4181378d87cSkettenis 419baed8f06Sdrahn ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK); 420baed8f06Sdrahn ih->ih_func = func; 421baed8f06Sdrahn ih->ih_arg = arg; 422baed8f06Sdrahn ih->ih_ipl = level & IPL_IRQMASK; 423baed8f06Sdrahn ih->ih_flags = level & IPL_FLAGMASK; 424baed8f06Sdrahn ih->ih_irq = irqno; 425baed8f06Sdrahn ih->ih_name = name; 4261378d87cSkettenis ih->ih_ci = ci; 427baed8f06Sdrahn 428285e3455Skettenis sie = intr_disable(); 429285e3455Skettenis 430baed8f06Sdrahn TAILQ_INSERT_TAIL(&sc->sc_isrcs[irqno].is_list, ih, ih_list); 431baed8f06Sdrahn 432baed8f06Sdrahn if (name != NULL) 433baed8f06Sdrahn evcount_attach(&ih->ih_count, name, &ih->ih_irq); 434baed8f06Sdrahn 435baed8f06Sdrahn #ifdef DEBUG_INTC 436baed8f06Sdrahn printf("%s irq %d level %d [%s]\n", __func__, irqno, level, 437baed8f06Sdrahn name); 438baed8f06Sdrahn #endif 439baed8f06Sdrahn 440baed8f06Sdrahn plic_calc_mask(); 441baed8f06Sdrahn 442285e3455Skettenis intr_restore(sie); 443baed8f06Sdrahn return (ih); 444baed8f06Sdrahn } 445baed8f06Sdrahn 446baed8f06Sdrahn void * 447baed8f06Sdrahn plic_intr_establish_fdt(void *cookie, int *cell, int level, 4481378d87cSkettenis struct cpu_info *ci, int (*func)(void *), void *arg, char *name) 449baed8f06Sdrahn { 4501378d87cSkettenis return plic_intr_establish(cell[0], level, ci, func, arg, name); 451baed8f06Sdrahn } 452baed8f06Sdrahn 453baed8f06Sdrahn void 454baed8f06Sdrahn plic_intr_disestablish(void *cookie) 455baed8f06Sdrahn { 456baed8f06Sdrahn struct plic_softc *sc = plic; 457baed8f06Sdrahn struct plic_intrhand *ih = cookie; 458baed8f06Sdrahn int irqno = ih->ih_irq; 459285e3455Skettenis u_long sie; 460baed8f06Sdrahn 461285e3455Skettenis sie = intr_disable(); 462285e3455Skettenis 463baed8f06Sdrahn TAILQ_REMOVE(&sc->sc_isrcs[irqno].is_list, ih, ih_list); 464baed8f06Sdrahn if (ih->ih_name != NULL) 465baed8f06Sdrahn evcount_detach(&ih->ih_count); 466285e3455Skettenis 467285e3455Skettenis intr_restore(sie); 468285e3455Skettenis 469baed8f06Sdrahn free(ih, M_DEVBUF, 0); 470baed8f06Sdrahn } 471baed8f06Sdrahn 472baed8f06Sdrahn void 473baed8f06Sdrahn plic_intr_route(void *cookie, int enable, struct cpu_info *ci) 474baed8f06Sdrahn { 475baed8f06Sdrahn struct plic_softc *sc = plic; 476baed8f06Sdrahn struct plic_intrhand *ih = cookie; 477baed8f06Sdrahn 478baed8f06Sdrahn int irq = ih->ih_irq; 479baed8f06Sdrahn int cpu = ci->ci_cpuid; 480baed8f06Sdrahn uint32_t min_pri = sc->sc_isrcs[irq].is_irq_min; 481baed8f06Sdrahn 482baed8f06Sdrahn if (enable == IRQ_ENABLE) { 483baed8f06Sdrahn plic_intr_enable_with_pri(irq, min_pri, cpu); 484baed8f06Sdrahn } else { 485baed8f06Sdrahn plic_intr_route_grid(irq, IRQ_DISABLE, cpu); 486baed8f06Sdrahn } 487baed8f06Sdrahn } 488baed8f06Sdrahn 489baed8f06Sdrahn void 4901378d87cSkettenis plic_intr_barrier(void *cookie) 4911378d87cSkettenis { 4921378d87cSkettenis struct plic_intrhand *ih = cookie; 4931378d87cSkettenis 4941378d87cSkettenis sched_barrier(ih->ih_ci); 4951378d87cSkettenis } 4961378d87cSkettenis 4971378d87cSkettenis void 498baed8f06Sdrahn plic_splx(int new) 499baed8f06Sdrahn { 500baed8f06Sdrahn /* XXX 501baed8f06Sdrahn * how to do pending external interrupt ? 502baed8f06Sdrahn * After set the new threshold, if there is any pending 503baed8f06Sdrahn * external interrupts whose priority is now greater than the 504baed8f06Sdrahn * threshold, they will get passed through plic to cpu, 505baed8f06Sdrahn * trigger a new claim/complete cycle. 506baed8f06Sdrahn * So there is no need to handle pending external intr here. 507baed8f06Sdrahn * 508baed8f06Sdrahn */ 509baed8f06Sdrahn struct cpu_info *ci = curcpu(); 510baed8f06Sdrahn 511baed8f06Sdrahn /* Pending software intr is handled here */ 512baed8f06Sdrahn if (ci->ci_ipending & riscv_smask[new]) 513baed8f06Sdrahn riscv_do_pending_intr(new); 514baed8f06Sdrahn 515baed8f06Sdrahn plic_setipl(new); 516baed8f06Sdrahn } 517baed8f06Sdrahn 518baed8f06Sdrahn int 519baed8f06Sdrahn plic_spllower(int new) 520baed8f06Sdrahn { 521baed8f06Sdrahn struct cpu_info *ci = curcpu(); 522baed8f06Sdrahn int old = ci->ci_cpl; 523baed8f06Sdrahn plic_splx(new); 524baed8f06Sdrahn return (old); 525baed8f06Sdrahn } 526baed8f06Sdrahn 527baed8f06Sdrahn int 528baed8f06Sdrahn plic_splraise(int new) 529baed8f06Sdrahn { 530baed8f06Sdrahn struct cpu_info *ci = curcpu(); 531baed8f06Sdrahn int old; 532baed8f06Sdrahn old = ci->ci_cpl; 533baed8f06Sdrahn 534baed8f06Sdrahn /* 535baed8f06Sdrahn * setipl must always be called because there is a race window 536baed8f06Sdrahn * where the variable is updated before the mask is set 537baed8f06Sdrahn * an interrupt occurs in that window without the mask always 538baed8f06Sdrahn * being set, the hardware might not get updated on the next 539baed8f06Sdrahn * splraise completely messing up spl protection. 540baed8f06Sdrahn */ 541baed8f06Sdrahn if (old > new) 542baed8f06Sdrahn new = old; 543baed8f06Sdrahn 544baed8f06Sdrahn plic_setipl(new); 545baed8f06Sdrahn 546baed8f06Sdrahn return (old); 547baed8f06Sdrahn } 548baed8f06Sdrahn 549baed8f06Sdrahn void 550baed8f06Sdrahn plic_setipl(int new) 551baed8f06Sdrahn { 552baed8f06Sdrahn struct cpu_info *ci = curcpu(); 553285e3455Skettenis u_long sie; 554baed8f06Sdrahn 555baed8f06Sdrahn /* disable here is only to keep hardware in sync with ci->ci_cpl */ 556285e3455Skettenis sie = intr_disable(); 557baed8f06Sdrahn ci->ci_cpl = new; 558baed8f06Sdrahn 559baed8f06Sdrahn /* higher values are higher priority */ 560baed8f06Sdrahn plic_set_threshold(ci->ci_cpuid, new); 561baed8f06Sdrahn 5628272ea32Scheloha /* trigger deferred timer interrupt if cpl is now low enough */ 5638272ea32Scheloha if (ci->ci_timer_deferred && new < IPL_CLOCK) 5648272ea32Scheloha sbi_set_timer(0); 5658272ea32Scheloha 566285e3455Skettenis intr_restore(sie); 567baed8f06Sdrahn } 568baed8f06Sdrahn 569baed8f06Sdrahn /* 570baed8f06Sdrahn * update the max/min priority for an interrupt src, 571baed8f06Sdrahn * and enforce the updated priority to plic. 572baed8f06Sdrahn * this should be called whenever a new handler is attached. 573baed8f06Sdrahn */ 574baed8f06Sdrahn void 575baed8f06Sdrahn plic_calc_mask(void) 576baed8f06Sdrahn { 577baed8f06Sdrahn struct cpu_info *ci = curcpu(); 578baed8f06Sdrahn struct plic_softc *sc = plic; 579baed8f06Sdrahn struct plic_intrhand *ih; 580baed8f06Sdrahn int irq; 581baed8f06Sdrahn 582baed8f06Sdrahn /* PLIC irq 0 is reserved, thus we start from 1 */ 583baed8f06Sdrahn for (irq = 1; irq <= sc->sc_ndev; irq++) { 584baed8f06Sdrahn int max = IPL_NONE; 585baed8f06Sdrahn int min = IPL_HIGH; 586baed8f06Sdrahn TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) { 587baed8f06Sdrahn if (ih->ih_ipl > max) 588baed8f06Sdrahn max = ih->ih_ipl; 589baed8f06Sdrahn 590baed8f06Sdrahn if (ih->ih_ipl < min) 591baed8f06Sdrahn min = ih->ih_ipl; 592baed8f06Sdrahn } 593baed8f06Sdrahn 594baed8f06Sdrahn if (max == IPL_NONE) 595baed8f06Sdrahn min = IPL_NONE; 596baed8f06Sdrahn 597baed8f06Sdrahn if (sc->sc_isrcs[irq].is_irq_max == max && 598baed8f06Sdrahn sc->sc_isrcs[irq].is_irq_min == min) 599baed8f06Sdrahn continue; 600baed8f06Sdrahn 601baed8f06Sdrahn sc->sc_isrcs[irq].is_irq_max = max; 602baed8f06Sdrahn sc->sc_isrcs[irq].is_irq_min = min; 603baed8f06Sdrahn 604baed8f06Sdrahn /* Enable interrupts at lower levels, clear -> enable */ 605baed8f06Sdrahn /* Set interrupt priority/enable */ 606baed8f06Sdrahn if (min != IPL_NONE) { 607baed8f06Sdrahn plic_intr_enable_with_pri(irq, min, ci->ci_cpuid); 608baed8f06Sdrahn } else { 609baed8f06Sdrahn plic_intr_disable(irq, ci->ci_cpuid); 610baed8f06Sdrahn } 611baed8f06Sdrahn } 612baed8f06Sdrahn 613baed8f06Sdrahn plic_setipl(ci->ci_cpl); 614baed8f06Sdrahn } 615baed8f06Sdrahn 616baed8f06Sdrahn /***************** helper functions *****************/ 617baed8f06Sdrahn 618baed8f06Sdrahn /* 619baed8f06Sdrahn * OpenBSD saves cpu node info in ci struct, so we can search 620baed8f06Sdrahn * cpuid by node matching 621baed8f06Sdrahn */ 622baed8f06Sdrahn int 623baed8f06Sdrahn plic_get_cpuid(int intc) 624baed8f06Sdrahn { 625baed8f06Sdrahn uint32_t hart; 626baed8f06Sdrahn int parent_node; 627baed8f06Sdrahn struct cpu_info *ci; 628baed8f06Sdrahn CPU_INFO_ITERATOR cii; 629baed8f06Sdrahn 630baed8f06Sdrahn /* Check the interrupt controller layout. */ 631baed8f06Sdrahn if (OF_getpropintarray(intc, "#interrupt-cells", &hart, 632baed8f06Sdrahn sizeof(hart)) < 0) { 633baed8f06Sdrahn printf(": could not find #interrupt-cells for phandle %u\n", intc); 634baed8f06Sdrahn return (-1); 635baed8f06Sdrahn } 636baed8f06Sdrahn 637baed8f06Sdrahn /* 638baed8f06Sdrahn * The parent of the interrupt-controller is the CPU we are 639baed8f06Sdrahn * interested in, so search for its OF node index. 640baed8f06Sdrahn */ 641baed8f06Sdrahn parent_node = OF_parent(intc); 642baed8f06Sdrahn CPU_INFO_FOREACH(cii, ci) { 643baed8f06Sdrahn if (ci->ci_node == parent_node) 644baed8f06Sdrahn return ci->ci_cpuid; 645baed8f06Sdrahn } 646baed8f06Sdrahn return -1; 647baed8f06Sdrahn } 648baed8f06Sdrahn 649baed8f06Sdrahn /* update priority for intr src 'irq' */ 650baed8f06Sdrahn void 651baed8f06Sdrahn plic_set_priority(int irq, uint32_t pri) 652baed8f06Sdrahn { 653baed8f06Sdrahn struct plic_softc *sc = plic; 654baed8f06Sdrahn uint32_t prival; 655baed8f06Sdrahn 656baed8f06Sdrahn /* 657baed8f06Sdrahn * sifive plic only has 0 - 7 priority levels, yet OpenBSD defines 658baed8f06Sdrahn * 0 - 12 priority levels(level 1 - 4 are for SOFT*, level 12 659baed8f06Sdrahn * is for IPI. They should NEVER be passed to plic. 660baed8f06Sdrahn * So we calculate plic priority in the following way: 661baed8f06Sdrahn */ 662baed8f06Sdrahn if (pri <= 4 || pri >= 12)//invalid input 663baed8f06Sdrahn prival = 0;//effectively disable this intr source 664baed8f06Sdrahn else 665baed8f06Sdrahn prival = pri - 4; 666baed8f06Sdrahn 667baed8f06Sdrahn bus_space_write_4(sc->sc_iot, sc->sc_ioh, 668baed8f06Sdrahn PLIC_PRIORITY(irq), prival); 669baed8f06Sdrahn } 670baed8f06Sdrahn 671baed8f06Sdrahn /* update threshold for 'cpu' */ 672baed8f06Sdrahn void 673baed8f06Sdrahn plic_set_threshold(int cpu, uint32_t threshold) 674baed8f06Sdrahn { 675baed8f06Sdrahn struct plic_softc *sc = plic; 676baed8f06Sdrahn uint32_t prival; 677baed8f06Sdrahn 678baed8f06Sdrahn if (threshold < 4) // enable everything (as far as plic is concerned) 679baed8f06Sdrahn prival = 0; 680baed8f06Sdrahn else if (threshold >= 12) // invalid priority level ? 681baed8f06Sdrahn prival = IPL_HIGH - 4; // XXX Device-specific high threshold 682baed8f06Sdrahn else // everything else 683baed8f06Sdrahn prival = threshold - 4; // XXX Device-specific threshold offset 684baed8f06Sdrahn 685baed8f06Sdrahn bus_space_write_4(sc->sc_iot, sc->sc_ioh, 686baed8f06Sdrahn PLIC_THRESHOLD(sc, cpu), prival); 687baed8f06Sdrahn } 688baed8f06Sdrahn 689baed8f06Sdrahn /* 690baed8f06Sdrahn * turns on/off the route from intr source 'irq' 691baed8f06Sdrahn * to context 'ci' based on 'enable' 692baed8f06Sdrahn */ 693baed8f06Sdrahn void 694baed8f06Sdrahn plic_intr_route_grid(int irq, int enable, int cpu) 695baed8f06Sdrahn { 696baed8f06Sdrahn struct plic_softc *sc = plic; 697baed8f06Sdrahn uint32_t val, mask; 698baed8f06Sdrahn 699baed8f06Sdrahn if (irq == 0) 700baed8f06Sdrahn return; 701baed8f06Sdrahn 702baed8f06Sdrahn KASSERT(cpu < MAXCPUS); 703baed8f06Sdrahn 704baed8f06Sdrahn mask = (1 << (irq % 32)); 705baed8f06Sdrahn val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 706baed8f06Sdrahn PLIC_ENABLE(sc, irq, cpu)); 707baed8f06Sdrahn if (enable == IRQ_ENABLE) 708baed8f06Sdrahn val |= mask; 709baed8f06Sdrahn else 710baed8f06Sdrahn val &= ~mask; 711baed8f06Sdrahn 712baed8f06Sdrahn bus_space_write_4(sc->sc_iot, sc->sc_ioh, 713baed8f06Sdrahn PLIC_ENABLE(sc, irq, cpu), val); 714baed8f06Sdrahn } 715baed8f06Sdrahn 716baed8f06Sdrahn /* 717baed8f06Sdrahn * Enable intr src 'irq' to cpu 'cpu' by setting: 718baed8f06Sdrahn * - priority 719baed8f06Sdrahn * - threshold 720baed8f06Sdrahn * - enable bit 721baed8f06Sdrahn */ 722baed8f06Sdrahn void 723baed8f06Sdrahn plic_intr_enable_with_pri(int irq, uint32_t min_pri, int cpu) 724baed8f06Sdrahn { 725baed8f06Sdrahn plic_set_priority(irq, min_pri); 726baed8f06Sdrahn plic_set_threshold(cpu, min_pri-1); 727baed8f06Sdrahn plic_intr_route_grid(irq, IRQ_ENABLE, cpu); 728baed8f06Sdrahn } 729baed8f06Sdrahn 730baed8f06Sdrahn void 731baed8f06Sdrahn plic_intr_disable(int irq, int cpu) 732baed8f06Sdrahn { 733baed8f06Sdrahn plic_set_priority(irq, 0); 734baed8f06Sdrahn plic_set_threshold(cpu, IPL_HIGH); 735baed8f06Sdrahn plic_intr_route_grid(irq, IRQ_DISABLE, cpu); 736baed8f06Sdrahn } 737baed8f06Sdrahn /***************** end of helper functions *****************/ 738