1 /* $OpenBSD: interrupt.c,v 1.42 2024/10/25 08:08:24 mpi Exp $ */ 2 /* $NetBSD: interrupt.c,v 1.46 2000/06/03 20:47:36 thorpej Exp $ */ 3 4 /*- 5 * Copyright (c) 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 * POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University. 35 * All rights reserved. 36 * 37 * Authors: Keith Bostic, Chris G. Demetriou 38 * 39 * Permission to use, copy, modify and distribute this software and 40 * its documentation is hereby granted, provided that both the copyright 41 * notice and this permission notice appear in all copies of the 42 * software, derivative works or modified versions, and any portions 43 * thereof, and that both notices appear in supporting documentation. 44 * 45 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 46 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 47 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 48 * 49 * Carnegie Mellon requests users of this software to return to 50 * 51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 52 * School of Computer Science 53 * Carnegie Mellon University 54 * Pittsburgh PA 15213-3890 55 * 56 * any improvements or extensions that they make and grant Carnegie the 57 * rights to redistribute these changes. 58 */ 59 /* 60 * Additional Copyright (c) 1997 by Matthew Jacob for NASA/Ames Research Center. 61 * Redistribute and modify at will, leaving only this additional copyright 62 * notice. 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/proc.h> 68 #include <sys/vmmeter.h> 69 #include <sys/sched.h> 70 #include <sys/kernel.h> 71 #include <sys/device.h> 72 #include <sys/mbuf.h> 73 #include <sys/socket.h> 74 #include <sys/evcount.h> 75 76 #include <uvm/uvm_extern.h> 77 78 #include <machine/atomic.h> 79 #include <machine/autoconf.h> 80 #include <machine/cpu.h> 81 #include <machine/reg.h> 82 #include <machine/rpb.h> 83 #include <machine/frame.h> 84 #include <machine/cpuconf.h> 85 86 #include "apecs.h" 87 #include "cia.h" 88 #include "lca.h" 89 #include "tcasic.h" 90 91 extern struct evcount clk_count; 92 93 struct scbvec scb_iovectab[SCB_VECTOIDX(SCB_SIZE - SCB_IOVECBASE)]; 94 95 void scb_stray(void *, u_long); 96 97 /* 98 * True if the system has any non-level interrupts which are shared 99 * on the same pin. 100 */ 101 int intr_shared_edge; 102 103 void 104 scb_init(void) 105 { 106 u_long i; 107 108 for (i = 0; i < SCB_NIOVECS; i++) { 109 scb_iovectab[i].scb_func = scb_stray; 110 scb_iovectab[i].scb_arg = NULL; 111 } 112 } 113 114 void 115 scb_stray(void *arg, u_long vec) 116 { 117 118 printf("WARNING: stray interrupt, vector 0x%lx\n", vec); 119 } 120 121 void 122 scb_set(u_long vec, void (*func)(void *, u_long), void *arg) 123 { 124 u_long idx; 125 int s; 126 127 s = splhigh(); 128 129 if (vec < SCB_IOVECBASE || vec >= SCB_SIZE || 130 (vec & (SCB_VECSIZE - 1)) != 0) 131 panic("scb_set: bad vector 0x%lx", vec); 132 133 idx = SCB_VECTOIDX(vec - SCB_IOVECBASE); 134 135 if (scb_iovectab[idx].scb_func != scb_stray) 136 panic("scb_set: vector 0x%lx already occupied", vec); 137 138 scb_iovectab[idx].scb_func = func; 139 scb_iovectab[idx].scb_arg = arg; 140 141 splx(s); 142 } 143 144 #ifdef unused 145 u_long 146 scb_alloc(void (*func)(void *, u_long), void *arg) 147 { 148 u_long vec, idx; 149 int s; 150 151 s = splhigh(); 152 153 /* 154 * Allocate "downwards", to avoid bumping into 155 * interrupts which are likely to be at the lower 156 * vector numbers. 157 */ 158 for (vec = SCB_SIZE - SCB_VECSIZE; 159 vec >= SCB_IOVECBASE; vec -= SCB_VECSIZE) { 160 idx = SCB_VECTOIDX(vec - SCB_IOVECBASE); 161 if (scb_iovectab[idx].scb_func == scb_stray) { 162 scb_iovectab[idx].scb_func = func; 163 scb_iovectab[idx].scb_arg = arg; 164 splx(s); 165 return (vec); 166 } 167 } 168 169 splx(s); 170 171 return (SCB_ALLOC_FAILED); 172 } 173 #endif 174 175 void 176 scb_free(u_long vec) 177 { 178 u_long idx; 179 int s; 180 181 s = splhigh(); 182 183 if (vec < SCB_IOVECBASE || vec >= SCB_SIZE || 184 (vec & (SCB_VECSIZE - 1)) != 0) 185 panic("scb_free: bad vector 0x%lx", vec); 186 187 idx = SCB_VECTOIDX(vec - SCB_IOVECBASE); 188 189 if (scb_iovectab[idx].scb_func == scb_stray) 190 panic("scb_free: vector 0x%lx is empty", vec); 191 192 scb_iovectab[idx].scb_func = scb_stray; 193 scb_iovectab[idx].scb_arg = (void *) vec; 194 195 splx(s); 196 } 197 198 void 199 interrupt(unsigned long a0, unsigned long a1, unsigned long a2, 200 struct trapframe *framep) 201 { 202 struct cpu_info *ci = curcpu(); 203 204 switch (a0) { 205 case ALPHA_INTR_XPROC: /* interprocessor interrupt */ 206 #if defined(MULTIPROCESSOR) 207 atomic_add_ulong(&ci->ci_idepth, 1); 208 209 alpha_ipi_process(ci, framep); 210 211 /* 212 * Handle inter-console messages if we're the primary 213 * CPU. 214 */ 215 if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id && 216 hwrpb->rpb_txrdy != 0) 217 cpu_iccb_receive(); 218 219 atomic_sub_ulong(&ci->ci_idepth, 1); 220 #else 221 printf("WARNING: received interprocessor interrupt!\n"); 222 #endif /* MULTIPROCESSOR */ 223 break; 224 225 case ALPHA_INTR_CLOCK: /* clock interrupt */ 226 atomic_add_int(&uvmexp.intrs, 1); 227 if (CPU_IS_PRIMARY(ci)) 228 clk_count.ec_count++; 229 if (platform.clockintr) 230 (*platform.clockintr)(framep); 231 break; 232 233 case ALPHA_INTR_ERROR: /* Machine Check or Correctable Error */ 234 atomic_add_ulong(&ci->ci_idepth, 1); 235 a0 = alpha_pal_rdmces(); 236 if (platform.mcheck_handler) 237 (*platform.mcheck_handler)(a0, framep, a1, a2); 238 else 239 machine_check(a0, framep, a1, a2); 240 atomic_sub_ulong(&ci->ci_idepth, 1); 241 break; 242 243 case ALPHA_INTR_DEVICE: /* I/O device interrupt */ 244 { 245 struct scbvec *scb; 246 247 KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE); 248 249 atomic_add_ulong(&ci->ci_idepth, 1); 250 atomic_add_int(&uvmexp.intrs, 1); 251 scb = &scb_iovectab[SCB_VECTOIDX(a1 - SCB_IOVECBASE)]; 252 (*scb->scb_func)(scb->scb_arg, a1); 253 atomic_sub_ulong(&ci->ci_idepth, 1); 254 break; 255 } 256 257 case ALPHA_INTR_PERF: /* performance counter interrupt */ 258 printf("WARNING: received performance counter interrupt!\n"); 259 break; 260 261 case ALPHA_INTR_PASSIVE: 262 #if 0 263 printf("WARNING: received passive release interrupt vec " 264 "0x%lx\n", a1); 265 #endif 266 break; 267 268 default: 269 printf("unexpected interrupt: type 0x%lx vec 0x%lx " 270 "a2 0x%lx" 271 #if defined(MULTIPROCESSOR) 272 " cpu %lu" 273 #endif 274 "\n", a0, a1, a2 275 #if defined(MULTIPROCESSOR) 276 , ci->ci_cpuid 277 #endif 278 ); 279 panic("interrupt"); 280 /* NOTREACHED */ 281 } 282 } 283 284 void 285 machine_check(unsigned long mces, struct trapframe *framep, 286 unsigned long vector, unsigned long param) 287 { 288 const char *type; 289 struct mchkinfo *mcp; 290 291 mcp = &curcpu()->ci_mcinfo; 292 /* Make sure it's an error we know about. */ 293 if ((mces & (ALPHA_MCES_MIP|ALPHA_MCES_SCE|ALPHA_MCES_PCE)) == 0) { 294 type = "fatal machine check or error (unknown type)"; 295 goto fatal; 296 } 297 298 /* Machine checks. */ 299 if (mces & ALPHA_MCES_MIP) { 300 /* If we weren't expecting it, then we punt. */ 301 if (!mcp->mc_expected) { 302 type = "unexpected machine check"; 303 goto fatal; 304 } 305 mcp->mc_expected = 0; 306 mcp->mc_received = 1; 307 } 308 309 /* System correctable errors. */ 310 if (mces & ALPHA_MCES_SCE) 311 printf("Warning: received system correctable error.\n"); 312 313 /* Processor correctable errors. */ 314 if (mces & ALPHA_MCES_PCE) 315 printf("Warning: received processor correctable error.\n"); 316 317 /* Clear pending machine checks and correctable errors */ 318 alpha_pal_wrmces(mces); 319 return; 320 321 fatal: 322 /* Clear pending machine checks and correctable errors */ 323 alpha_pal_wrmces(mces); 324 325 printf("\n"); 326 printf("%s:\n", type); 327 printf("\n"); 328 printf(" mces = 0x%lx\n", mces); 329 printf(" vector = 0x%lx\n", vector); 330 printf(" param = 0x%lx\n", param); 331 printf(" pc = 0x%lx\n", framep->tf_regs[FRAME_PC]); 332 printf(" ra = 0x%lx\n", framep->tf_regs[FRAME_RA]); 333 printf(" curproc = %p\n", curproc); 334 if (curproc != NULL) 335 printf(" pid = %d, comm = %s\n", curproc->p_p->ps_pid, 336 curproc->p_p->ps_comm); 337 printf("\n"); 338 panic("machine check"); 339 } 340 341 #if NAPECS > 0 || NCIA > 0 || NLCA > 0 || NTCASIC > 0 342 343 int 344 badaddr(void *addr, size_t size) 345 { 346 return(badaddr_read(addr, size, NULL)); 347 } 348 349 int 350 badaddr_read(void *addr, size_t size, void *rptr) 351 { 352 struct mchkinfo *mcp = &curcpu()->ci_mcinfo; 353 long rcpt; 354 int rv; 355 356 /* Get rid of any stale machine checks that have been waiting. */ 357 alpha_pal_draina(); 358 359 /* Tell the trap code to expect a machine check. */ 360 mcp->mc_received = 0; 361 mcp->mc_expected = 1; 362 363 /* Read from the test address, and make sure the read happens. */ 364 alpha_mb(); 365 switch (size) { 366 case sizeof (u_int8_t): 367 rcpt = *(volatile u_int8_t *)addr; 368 break; 369 370 case sizeof (u_int16_t): 371 rcpt = *(volatile u_int16_t *)addr; 372 break; 373 374 case sizeof (u_int32_t): 375 rcpt = *(volatile u_int32_t *)addr; 376 break; 377 378 case sizeof (u_int64_t): 379 rcpt = *(volatile u_int64_t *)addr; 380 break; 381 382 default: 383 panic("badaddr: invalid size (%ld)", size); 384 } 385 alpha_mb(); 386 alpha_mb(); /* MAGIC ON SOME SYSTEMS */ 387 388 /* Make sure we took the machine check, if we caused one. */ 389 alpha_pal_draina(); 390 391 /* disallow further machine checks */ 392 mcp->mc_expected = 0; 393 394 rv = mcp->mc_received; 395 mcp->mc_received = 0; 396 397 /* 398 * And copy back read results (if no fault occurred). 399 */ 400 if (rptr && rv == 0) { 401 switch (size) { 402 case sizeof (u_int8_t): 403 *(volatile u_int8_t *)rptr = rcpt; 404 break; 405 406 case sizeof (u_int16_t): 407 *(volatile u_int16_t *)rptr = rcpt; 408 break; 409 410 case sizeof (u_int32_t): 411 *(volatile u_int32_t *)rptr = rcpt; 412 break; 413 414 case sizeof (u_int64_t): 415 *(volatile u_int64_t *)rptr = rcpt; 416 break; 417 } 418 } 419 /* Return non-zero (i.e. true) if it's a bad address. */ 420 return (rv); 421 } 422 423 #endif /* NAPECS > 0 || NCIA > 0 || NLCA > 0 || NTCASIC > 0 */ 424 425 struct alpha_soft_intr alpha_soft_intrs[SI_NSOFT]; 426 427 /* 428 * softintr_init: 429 * 430 * Initialize the software interrupt system. 431 */ 432 void 433 softintr_init() 434 { 435 struct alpha_soft_intr *asi; 436 int i; 437 438 for (i = 0; i < SI_NSOFT; i++) { 439 asi = &alpha_soft_intrs[i]; 440 TAILQ_INIT(&asi->softintr_q); 441 mtx_init(&asi->softintr_mtx, IPL_HIGH); 442 asi->softintr_siq = i; 443 } 444 } 445 446 /* 447 * softintr_dispatch: 448 * 449 * Process pending software interrupts. 450 */ 451 void 452 softintr_dispatch() 453 { 454 struct alpha_soft_intr *asi; 455 struct alpha_soft_intrhand *sih; 456 u_int64_t n, i; 457 458 #if defined(MULTIPROCESSOR) 459 __mp_lock(&kernel_lock); 460 #endif 461 462 while ((n = atomic_loadlatch_ulong(&ssir, 0)) != 0) { 463 for (i = 0; i < SI_NSOFT; i++) { 464 if ((n & (1 << i)) == 0) 465 continue; 466 467 asi = &alpha_soft_intrs[i]; 468 469 for (;;) { 470 mtx_enter(&asi->softintr_mtx); 471 472 sih = TAILQ_FIRST(&asi->softintr_q); 473 if (sih == NULL) { 474 mtx_leave(&asi->softintr_mtx); 475 break; 476 } 477 TAILQ_REMOVE(&asi->softintr_q, sih, sih_q); 478 sih->sih_pending = 0; 479 480 atomic_add_int(&uvmexp.softs, 1); 481 482 mtx_leave(&asi->softintr_mtx); 483 484 (*sih->sih_fn)(sih->sih_arg); 485 } 486 } 487 } 488 489 #if defined(MULTIPROCESSOR) 490 __mp_unlock(&kernel_lock); 491 #endif 492 } 493 494 static int 495 ipl2si(int ipl) 496 { 497 int si; 498 499 switch (ipl) { 500 case IPL_TTY: /* XXX */ 501 case IPL_SOFTSERIAL: 502 si = SI_SOFTSERIAL; 503 break; 504 case IPL_SOFTNET: 505 si = SI_SOFTNET; 506 break; 507 case IPL_SOFTCLOCK: 508 si = SI_SOFTCLOCK; 509 break; 510 case IPL_SOFT: 511 si = SI_SOFT; 512 break; 513 default: 514 panic("ipl2si: %d", ipl); 515 } 516 return si; 517 } 518 519 /* 520 * softintr_establish: [interface] 521 * 522 * Register a software interrupt handler. 523 */ 524 void * 525 softintr_establish(int ipl, void (*func)(void *), void *arg) 526 { 527 struct alpha_soft_intr *asi; 528 struct alpha_soft_intrhand *sih; 529 int si; 530 531 si = ipl2si(ipl); 532 asi = &alpha_soft_intrs[si]; 533 534 sih = malloc(sizeof(*sih), M_DEVBUF, M_NOWAIT); 535 if (__predict_true(sih != NULL)) { 536 sih->sih_intrhead = asi; 537 sih->sih_fn = func; 538 sih->sih_arg = arg; 539 sih->sih_pending = 0; 540 } 541 return (sih); 542 } 543 544 /* 545 * softintr_disestablish: [interface] 546 * 547 * Unregister a software interrupt handler. 548 */ 549 void 550 softintr_disestablish(void *arg) 551 { 552 struct alpha_soft_intrhand *sih = arg; 553 struct alpha_soft_intr *asi = sih->sih_intrhead; 554 555 mtx_enter(&asi->softintr_mtx); 556 if (sih->sih_pending) { 557 TAILQ_REMOVE(&asi->softintr_q, sih, sih_q); 558 sih->sih_pending = 0; 559 } 560 mtx_leave(&asi->softintr_mtx); 561 562 free(sih, M_DEVBUF, sizeof *sih); 563 } 564 565 /* 566 * Schedule a software interrupt. 567 */ 568 void 569 softintr_schedule(void *arg) 570 { 571 struct alpha_soft_intrhand *sih = arg; 572 struct alpha_soft_intr *si = sih->sih_intrhead; 573 574 mtx_enter(&si->softintr_mtx); 575 if (sih->sih_pending == 0) { 576 TAILQ_INSERT_TAIL(&si->softintr_q, sih, sih_q); 577 sih->sih_pending = 1; 578 setsoft(si->softintr_siq); 579 } 580 mtx_leave(&si->softintr_mtx); 581 } 582 583 void 584 intr_barrier(void *cookie) 585 { 586 sched_barrier(NULL); 587 } 588 589 int 590 splraise(int s) 591 { 592 int cur = alpha_pal_rdps() & ALPHA_PSL_IPL_MASK; 593 return (s > cur ? alpha_pal_swpipl(s) : cur); 594 } 595 596 #ifdef DIAGNOSTIC 597 void 598 splassert_check(int wantipl, const char *func) 599 { 600 int curipl = alpha_pal_rdps() & ALPHA_PSL_IPL_MASK; 601 602 /* 603 * Depending on the system, hardware interrupts may occur either 604 * at level 3 or level 4. Avoid false positives in the former case. 605 */ 606 if (curipl == ALPHA_PSL_IPL_IO - 1) 607 curipl = ALPHA_PSL_IPL_IO; 608 609 if (curipl < wantipl) { 610 splassert_fail(wantipl, curipl, func); 611 /* 612 * If splassert_ctl is set to not panic, raise the ipl 613 * in a feeble attempt to reduce damage. 614 */ 615 alpha_pal_swpipl(wantipl); 616 } 617 } 618 #endif 619