1 /* $OpenBSD: plic.c,v 1.11 2022/08/09 04:49:08 cheloha Exp $ */ 2 3 /* 4 * Copyright (c) 2020, Mars Li <mengshi.li.mars@gmail.com> 5 * Copyright (c) 2020, Brian Bamsch <bbamsch@google.com> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #include <sys/param.h> 21 #include <sys/systm.h> 22 #include <sys/queue.h> 23 #include <sys/malloc.h> 24 #include <sys/device.h> 25 #include <sys/evcount.h> 26 27 #include <machine/bus.h> 28 #include <machine/fdt.h> 29 #include <machine/cpu.h> 30 #include <machine/sbi.h> 31 #include "riscv64/dev/riscv_cpu_intc.h" 32 33 #include <dev/ofw/openfirm.h> 34 #include <dev/ofw/fdt.h> 35 36 /* 37 * This driver implements a version of the RISC-V PLIC with the actual layout 38 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: 39 * 40 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf 41 * 42 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is 43 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged 44 * Spec. 45 */ 46 47 #define PLIC_MAX_IRQS 1024 48 49 #define PLIC_PRIORITY_BASE 0x000000U 50 51 #define PLIC_ENABLE_BASE 0x002000U 52 #define PLIC_ENABLE_STRIDE 0x80U 53 #define IRQ_ENABLE 1 54 #define IRQ_DISABLE 0 55 56 #define PLIC_CONTEXT_BASE 0x200000U 57 #define PLIC_CONTEXT_STRIDE 0x1000U 58 #define PLIC_CONTEXT_THRESHOLD 0x0U 59 #define PLIC_CONTEXT_CLAIM 0x4U 60 61 #define PLIC_PRIORITY(n) (PLIC_PRIORITY_BASE + (n) * sizeof(uint32_t)) 62 #define PLIC_ENABLE(sc, n, h) \ 63 (sc->sc_contexts[h].enable_offset + ((n) / 32) * sizeof(uint32_t)) 64 #define PLIC_THRESHOLD(sc, h) \ 65 (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_THRESHOLD) 66 #define PLIC_CLAIM(sc, h) \ 67 (sc->sc_contexts[h].context_offset + PLIC_CONTEXT_CLAIM) 68 69 70 struct plic_intrhand { 71 TAILQ_ENTRY(plic_intrhand) ih_list; /* link on intrq list */ 72 int (*ih_func)(void *); /* handler */ 73 void *ih_arg; /* arg for handler */ 74 int ih_ipl; /* IPL_* */ 75 int ih_flags; 76 int ih_irq; /* IRQ number */ 77 struct evcount ih_count; 78 char *ih_name; 79 struct cpu_info *ih_ci; 80 }; 81 82 /* 83 * One interrupt source could have multiple handler attached, 84 * each handler could have different priority level, 85 * we track the max and min priority level. 86 */ 87 struct plic_irqsrc { 88 TAILQ_HEAD(, plic_intrhand) is_list; /* handler list */ 89 int is_irq_max; /* IRQ to mask while handling */ 90 int is_irq_min; /* lowest IRQ when shared */ 91 }; 92 93 struct plic_context { 94 bus_size_t enable_offset; 95 bus_size_t context_offset; 96 }; 97 98 struct plic_softc { 99 struct device sc_dev; 100 int sc_node; 101 bus_space_tag_t sc_iot; 102 bus_space_handle_t sc_ioh; 103 struct plic_irqsrc *sc_isrcs; 104 struct plic_context sc_contexts[MAXCPUS]; 105 int sc_ndev; 106 struct interrupt_controller sc_intc; 107 }; 108 struct plic_softc *plic = NULL; 109 110 int plic_match(struct device *, void *, void *); 111 void plic_attach(struct device *, struct device *, void *); 112 int plic_irq_handler(void *); 113 int plic_irq_dispatch(uint32_t, void *); 114 void *plic_intr_establish(int, int, struct cpu_info *, 115 int (*)(void *), void *, char *); 116 void *plic_intr_establish_fdt(void *, int *, int, struct cpu_info *, 117 int (*)(void *), void *, char *); 118 void plic_intr_disestablish(void *); 119 void plic_intr_route(void *, int, struct cpu_info *); 120 void plic_intr_barrier(void *); 121 122 void plic_splx(int); 123 int plic_spllower(int); 124 int plic_splraise(int); 125 void plic_setipl(int); 126 void plic_calc_mask(void); 127 128 /* helper function */ 129 int plic_get_cpuid(int); 130 void plic_set_priority(int, uint32_t); 131 void plic_set_threshold(int, uint32_t); 132 void plic_intr_route_grid(int, int, int); 133 void plic_intr_enable_with_pri(int, uint32_t, int); 134 void plic_intr_disable(int, int); 135 136 137 const struct cfattach plic_ca = { 138 sizeof(struct plic_softc), plic_match, plic_attach, 139 }; 140 141 struct cfdriver plic_cd = { 142 NULL, "plic", DV_DULL 143 }; 144 145 int plic_attached = 0; 146 147 int 148 plic_match(struct device *parent, void *cfdata, void *aux) 149 { 150 struct fdt_attach_args *faa = aux; 151 152 if (plic_attached) 153 return 0; // Only expect one instance of PLIC 154 155 return (OF_is_compatible(faa->fa_node, "riscv,plic0") || 156 OF_is_compatible(faa->fa_node, "sifive,plic-1.0.0")); 157 } 158 159 void 160 plic_attach(struct device *parent, struct device *dev, void *aux) 161 { 162 struct plic_softc *sc; 163 struct fdt_attach_args *faa; 164 uint32_t *cells; 165 uint32_t irq; 166 int cpu; 167 int node; 168 int len; 169 int ncell; 170 int context; 171 int i; 172 struct cpu_info *ci; 173 CPU_INFO_ITERATOR cii; 174 175 if (plic_attached) 176 return; 177 178 plic = sc = (struct plic_softc *)dev; 179 faa = (struct fdt_attach_args *)aux; 180 181 if (faa->fa_nreg < 1) 182 return; 183 184 sc->sc_node = node = faa->fa_node; 185 sc->sc_iot = faa->fa_iot; 186 187 /* determine number of devices sending intr to this ic */ 188 sc->sc_ndev = OF_getpropint(faa->fa_node, "riscv,ndev", -1); 189 if (sc->sc_ndev < 0) { 190 printf(": unable to resolve number of devices\n"); 191 return; 192 } 193 194 if (sc->sc_ndev >= PLIC_MAX_IRQS) { 195 printf(": invalid ndev (%d)\n", sc->sc_ndev); 196 return; 197 } 198 199 /* map interrupt controller to va space */ 200 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr, 201 faa->fa_reg[0].size, 0, &sc->sc_ioh)) 202 panic("%s: bus_space_map failed!", __func__); 203 204 sc->sc_isrcs = mallocarray(PLIC_MAX_IRQS, sizeof(struct plic_irqsrc), 205 M_DEVBUF, M_ZERO | M_NOWAIT); 206 207 for (irq = 1; irq <= sc->sc_ndev; irq++) { 208 TAILQ_INIT(&sc->sc_isrcs[irq].is_list); 209 plic_set_priority(irq, 0);// Mask interrupt 210 } 211 212 /* 213 * Calculate the per-cpu enable and context register offsets. 214 * 215 * This is tricky for a few reasons. The PLIC divides the interrupt 216 * enable, threshold, and claim bits by "context" 217 * 218 * The tricky part is that the PLIC spec imposes no restrictions on how 219 * these contexts are laid out. So for example, there is no guarantee 220 * that each CPU will have both a machine mode and supervisor context, 221 * or that different PLIC implementations will organize the context 222 * registers in the same way. On top of this, we must handle the fact 223 * that cpuid != hartid, as they may have been renumbered during boot. 224 * We perform the following steps: 225 * 226 * 1. Examine the PLIC's "interrupts-extended" property and skip any 227 * entries that are not for supervisor external interrupts. 228 * 229 * 2. Walk up the device tree to find the corresponding CPU, using node 230 * property to identify the cpuid. 231 * 232 * 3. Calculate the register offsets based on the context number. 233 */ 234 len = OF_getproplen(node, "interrupts-extended"); 235 if (len <= 0) { 236 printf(": could not find interrupts-extended\n"); 237 return; 238 } 239 240 cells = malloc(len, M_TEMP, M_WAITOK); 241 ncell = len / sizeof(*cells); 242 if (OF_getpropintarray(node, "interrupts-extended", cells, len) < 0) { 243 printf(": failed to read interrupts-extended\n"); 244 free(cells, M_TEMP, len); 245 return; 246 } 247 248 for (i = 0, context = 0; i < ncell; i += 2, context++) { 249 /* Skip M-mode external interrupts */ 250 if (cells[i + 1] != IRQ_EXTERNAL_SUPERVISOR) 251 continue; 252 253 /* Get the corresponding cpuid. */ 254 cpu = plic_get_cpuid(OF_getnodebyphandle(cells[i])); 255 if (cpu < 0) 256 continue; 257 258 /* 259 * Set the enable and context register offsets for the CPU. 260 * 261 * We assume S-mode handler always comes later than M-mode 262 * handler, but this might be a little fragile. 263 * 264 * XXX 265 * sifive spec doesn't list hart0 S-mode enable/contexts 266 * in its memory map, but QEMU emulates hart0 S-mode 267 * enable/contexts? Otherwise the following offset calculation 268 * would point to hart1 M-mode enable/contexts. 269 */ 270 sc->sc_contexts[cpu].enable_offset = PLIC_ENABLE_BASE + 271 context * PLIC_ENABLE_STRIDE; 272 sc->sc_contexts[cpu].context_offset = PLIC_CONTEXT_BASE + 273 context * PLIC_CONTEXT_STRIDE; 274 } 275 276 free(cells, M_TEMP, len); 277 278 /* Set CPU interrupt priority thresholds to minimum */ 279 CPU_INFO_FOREACH(cii, ci) { 280 plic_set_threshold(ci->ci_cpuid, 0); 281 } 282 283 plic_setipl(IPL_HIGH); /* XXX ??? */ 284 plic_calc_mask(); 285 286 /* 287 * insert self into the external interrupt handler entry in 288 * global interrupt handler vector 289 */ 290 riscv_intc_intr_establish(IRQ_EXTERNAL_SUPERVISOR, 0, 291 plic_irq_handler, NULL, "plic0"); 292 293 /* 294 * From now on, spl update must be enforeced to plic, so 295 * spl* routine should be updated. 296 */ 297 riscv_set_intr_func(plic_splraise, plic_spllower, 298 plic_splx, plic_setipl); 299 300 plic_attached = 1; 301 302 /* enable external interrupt */ 303 csr_set(sie, SIE_SEIE); 304 305 sc->sc_intc.ic_node = faa->fa_node; 306 sc->sc_intc.ic_cookie = sc; 307 sc->sc_intc.ic_establish = plic_intr_establish_fdt; 308 sc->sc_intc.ic_disestablish = plic_intr_disestablish; 309 sc->sc_intc.ic_route = plic_intr_route; 310 // sc->sc_intc.ic_cpu_enable = XXX Per-CPU Initialization? 311 sc->sc_intc.ic_barrier = plic_intr_barrier; 312 313 riscv_intr_register_fdt(&sc->sc_intc); 314 315 printf("\n"); 316 } 317 318 int 319 plic_irq_handler(void *frame) 320 { 321 struct plic_softc* sc; 322 uint32_t pending; 323 uint32_t cpu; 324 int handled = 0; 325 326 sc = plic; 327 cpu = cpu_number(); 328 329 pending = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 330 PLIC_CLAIM(sc, cpu)); 331 332 if (pending >= sc->sc_ndev) { 333 printf("plic0: pending %x\n", pending); 334 return 0; 335 } 336 337 if (pending) { 338 handled = plic_irq_dispatch(pending, frame); 339 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 340 PLIC_CLAIM(sc, cpu), pending); 341 342 //#define DEBUG_INTC 343 #ifdef DEBUG_INTC 344 if (handled == 0) { 345 printf("plic handled == 0 on pending %d\n", pending); 346 } 347 #endif /* DEBUG_INTC */ 348 } 349 350 return handled; 351 } 352 353 int 354 plic_irq_dispatch(uint32_t irq, void *frame) 355 { 356 int pri, s; 357 int handled = 0; 358 struct plic_softc* sc; 359 struct plic_intrhand *ih; 360 void *arg; 361 362 #ifdef DEBUG_INTC 363 printf("plic irq %d fired\n", irq); 364 #endif 365 366 sc = plic; 367 pri = sc->sc_isrcs[irq].is_irq_max; 368 s = plic_splraise(pri); 369 TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) { 370 #ifdef MULTIPROCESSOR 371 int need_lock; 372 373 if (ih->ih_flags & IPL_MPSAFE) 374 need_lock = 0; 375 else 376 need_lock = s < IPL_SCHED; 377 378 if (need_lock) 379 KERNEL_LOCK(); 380 #endif 381 382 if (ih->ih_arg) 383 arg = ih->ih_arg; 384 else 385 arg = frame; 386 387 intr_enable(); 388 handled = ih->ih_func(arg); 389 intr_disable(); 390 if (handled) 391 ih->ih_count.ec_count++; 392 393 #ifdef MULTIPROCESSOR 394 if (need_lock) 395 KERNEL_UNLOCK(); 396 #endif 397 } 398 399 plic_splx(s); 400 return handled; 401 } 402 403 void * 404 plic_intr_establish(int irqno, int level, struct cpu_info *ci, 405 int (*func)(void *), void *arg, char *name) 406 { 407 struct plic_softc *sc = plic; 408 struct plic_intrhand *ih; 409 u_long sie; 410 411 if (irqno < 0 || irqno >= PLIC_MAX_IRQS) 412 panic("plic_intr_establish: bogus irqnumber %d: %s", 413 irqno, name); 414 415 if (ci == NULL) 416 ci = &cpu_info_primary; 417 418 ih = malloc(sizeof *ih, M_DEVBUF, M_WAITOK); 419 ih->ih_func = func; 420 ih->ih_arg = arg; 421 ih->ih_ipl = level & IPL_IRQMASK; 422 ih->ih_flags = level & IPL_FLAGMASK; 423 ih->ih_irq = irqno; 424 ih->ih_name = name; 425 ih->ih_ci = ci; 426 427 sie = intr_disable(); 428 429 TAILQ_INSERT_TAIL(&sc->sc_isrcs[irqno].is_list, ih, ih_list); 430 431 if (name != NULL) 432 evcount_attach(&ih->ih_count, name, &ih->ih_irq); 433 434 #ifdef DEBUG_INTC 435 printf("%s irq %d level %d [%s]\n", __func__, irqno, level, 436 name); 437 #endif 438 439 plic_calc_mask(); 440 441 intr_restore(sie); 442 return (ih); 443 } 444 445 void * 446 plic_intr_establish_fdt(void *cookie, int *cell, int level, 447 struct cpu_info *ci, int (*func)(void *), void *arg, char *name) 448 { 449 return plic_intr_establish(cell[0], level, ci, func, arg, name); 450 } 451 452 void 453 plic_intr_disestablish(void *cookie) 454 { 455 struct plic_softc *sc = plic; 456 struct plic_intrhand *ih = cookie; 457 int irqno = ih->ih_irq; 458 u_long sie; 459 460 sie = intr_disable(); 461 462 TAILQ_REMOVE(&sc->sc_isrcs[irqno].is_list, ih, ih_list); 463 if (ih->ih_name != NULL) 464 evcount_detach(&ih->ih_count); 465 466 intr_restore(sie); 467 468 free(ih, M_DEVBUF, 0); 469 } 470 471 void 472 plic_intr_route(void *cookie, int enable, struct cpu_info *ci) 473 { 474 struct plic_softc *sc = plic; 475 struct plic_intrhand *ih = cookie; 476 477 int irq = ih->ih_irq; 478 int cpu = ci->ci_cpuid; 479 uint32_t min_pri = sc->sc_isrcs[irq].is_irq_min; 480 481 if (enable == IRQ_ENABLE) { 482 plic_intr_enable_with_pri(irq, min_pri, cpu); 483 } else { 484 plic_intr_route_grid(irq, IRQ_DISABLE, cpu); 485 } 486 } 487 488 void 489 plic_intr_barrier(void *cookie) 490 { 491 struct plic_intrhand *ih = cookie; 492 493 sched_barrier(ih->ih_ci); 494 } 495 496 void 497 plic_splx(int new) 498 { 499 /* XXX 500 * how to do pending external interrupt ? 501 * After set the new threshold, if there is any pending 502 * external interrupts whose priority is now greater than the 503 * threshold, they will get passed through plic to cpu, 504 * trigger a new claim/complete cycle. 505 * So there is no need to handle pending external intr here. 506 * 507 */ 508 struct cpu_info *ci = curcpu(); 509 510 /* Pending software intr is handled here */ 511 if (ci->ci_ipending & riscv_smask[new]) 512 riscv_do_pending_intr(new); 513 514 plic_setipl(new); 515 } 516 517 int 518 plic_spllower(int new) 519 { 520 struct cpu_info *ci = curcpu(); 521 int old = ci->ci_cpl; 522 plic_splx(new); 523 return (old); 524 } 525 526 int 527 plic_splraise(int new) 528 { 529 struct cpu_info *ci = curcpu(); 530 int old; 531 old = ci->ci_cpl; 532 533 /* 534 * setipl must always be called because there is a race window 535 * where the variable is updated before the mask is set 536 * an interrupt occurs in that window without the mask always 537 * being set, the hardware might not get updated on the next 538 * splraise completely messing up spl protection. 539 */ 540 if (old > new) 541 new = old; 542 543 plic_setipl(new); 544 545 return (old); 546 } 547 548 void 549 plic_setipl(int new) 550 { 551 struct cpu_info *ci = curcpu(); 552 u_long sie; 553 554 /* disable here is only to keep hardware in sync with ci->ci_cpl */ 555 sie = intr_disable(); 556 ci->ci_cpl = new; 557 558 /* higher values are higher priority */ 559 plic_set_threshold(ci->ci_cpuid, new); 560 561 /* trigger deferred timer interrupt if cpl is now low enough */ 562 if (ci->ci_timer_deferred && new < IPL_CLOCK) 563 sbi_set_timer(0); 564 565 intr_restore(sie); 566 } 567 568 /* 569 * update the max/min priority for an interrupt src, 570 * and enforce the updated priority to plic. 571 * this should be called whenever a new handler is attached. 572 */ 573 void 574 plic_calc_mask(void) 575 { 576 struct cpu_info *ci = curcpu(); 577 struct plic_softc *sc = plic; 578 struct plic_intrhand *ih; 579 int irq; 580 581 /* PLIC irq 0 is reserved, thus we start from 1 */ 582 for (irq = 1; irq <= sc->sc_ndev; irq++) { 583 int max = IPL_NONE; 584 int min = IPL_HIGH; 585 TAILQ_FOREACH(ih, &sc->sc_isrcs[irq].is_list, ih_list) { 586 if (ih->ih_ipl > max) 587 max = ih->ih_ipl; 588 589 if (ih->ih_ipl < min) 590 min = ih->ih_ipl; 591 } 592 593 if (max == IPL_NONE) 594 min = IPL_NONE; 595 596 if (sc->sc_isrcs[irq].is_irq_max == max && 597 sc->sc_isrcs[irq].is_irq_min == min) 598 continue; 599 600 sc->sc_isrcs[irq].is_irq_max = max; 601 sc->sc_isrcs[irq].is_irq_min = min; 602 603 /* Enable interrupts at lower levels, clear -> enable */ 604 /* Set interrupt priority/enable */ 605 if (min != IPL_NONE) { 606 plic_intr_enable_with_pri(irq, min, ci->ci_cpuid); 607 } else { 608 plic_intr_disable(irq, ci->ci_cpuid); 609 } 610 } 611 612 plic_setipl(ci->ci_cpl); 613 } 614 615 /***************** helper functions *****************/ 616 617 /* 618 * OpenBSD saves cpu node info in ci struct, so we can search 619 * cpuid by node matching 620 */ 621 int 622 plic_get_cpuid(int intc) 623 { 624 uint32_t hart; 625 int parent_node; 626 struct cpu_info *ci; 627 CPU_INFO_ITERATOR cii; 628 629 /* Check the interrupt controller layout. */ 630 if (OF_getpropintarray(intc, "#interrupt-cells", &hart, 631 sizeof(hart)) < 0) { 632 printf(": could not find #interrupt-cells for phandle %u\n", intc); 633 return (-1); 634 } 635 636 /* 637 * The parent of the interrupt-controller is the CPU we are 638 * interested in, so search for its OF node index. 639 */ 640 parent_node = OF_parent(intc); 641 CPU_INFO_FOREACH(cii, ci) { 642 if (ci->ci_node == parent_node) 643 return ci->ci_cpuid; 644 } 645 return -1; 646 } 647 648 /* update priority for intr src 'irq' */ 649 void 650 plic_set_priority(int irq, uint32_t pri) 651 { 652 struct plic_softc *sc = plic; 653 uint32_t prival; 654 655 /* 656 * sifive plic only has 0 - 7 priority levels, yet OpenBSD defines 657 * 0 - 12 priority levels(level 1 - 4 are for SOFT*, level 12 658 * is for IPI. They should NEVER be passed to plic. 659 * So we calculate plic priority in the following way: 660 */ 661 if (pri <= 4 || pri >= 12)//invalid input 662 prival = 0;//effectively disable this intr source 663 else 664 prival = pri - 4; 665 666 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 667 PLIC_PRIORITY(irq), prival); 668 } 669 670 /* update threshold for 'cpu' */ 671 void 672 plic_set_threshold(int cpu, uint32_t threshold) 673 { 674 struct plic_softc *sc = plic; 675 uint32_t prival; 676 677 if (threshold < 4) // enable everything (as far as plic is concerned) 678 prival = 0; 679 else if (threshold >= 12) // invalid priority level ? 680 prival = IPL_HIGH - 4; // XXX Device-specific high threshold 681 else // everything else 682 prival = threshold - 4; // XXX Device-specific threshold offset 683 684 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 685 PLIC_THRESHOLD(sc, cpu), prival); 686 } 687 688 /* 689 * turns on/off the route from intr source 'irq' 690 * to context 'ci' based on 'enable' 691 */ 692 void 693 plic_intr_route_grid(int irq, int enable, int cpu) 694 { 695 struct plic_softc *sc = plic; 696 uint32_t val, mask; 697 698 if (irq == 0) 699 return; 700 701 KASSERT(cpu < MAXCPUS); 702 703 mask = (1 << (irq % 32)); 704 val = bus_space_read_4(sc->sc_iot, sc->sc_ioh, 705 PLIC_ENABLE(sc, irq, cpu)); 706 if (enable == IRQ_ENABLE) 707 val |= mask; 708 else 709 val &= ~mask; 710 711 bus_space_write_4(sc->sc_iot, sc->sc_ioh, 712 PLIC_ENABLE(sc, irq, cpu), val); 713 } 714 715 /* 716 * Enable intr src 'irq' to cpu 'cpu' by setting: 717 * - priority 718 * - threshold 719 * - enable bit 720 */ 721 void 722 plic_intr_enable_with_pri(int irq, uint32_t min_pri, int cpu) 723 { 724 plic_set_priority(irq, min_pri); 725 plic_set_threshold(cpu, min_pri-1); 726 plic_intr_route_grid(irq, IRQ_ENABLE, cpu); 727 } 728 729 void 730 plic_intr_disable(int irq, int cpu) 731 { 732 plic_set_priority(irq, 0); 733 plic_set_threshold(cpu, IPL_HIGH); 734 plic_intr_route_grid(irq, IRQ_DISABLE, cpu); 735 } 736 /***************** end of helper functions *****************/ 737