1 /* $NetBSD: shared_intr.c,v 1.30 2023/11/21 17:52:51 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1996 Carnegie-Mellon University. 34 * All rights reserved. 35 * 36 * Authors: Chris G. Demetriou 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 */ 58 59 /* 60 * Common shared-interrupt-line functionality. 61 */ 62 63 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 64 65 __KERNEL_RCSID(0, "$NetBSD: shared_intr.c,v 1.30 2023/11/21 17:52:51 thorpej Exp $"); 66 67 #include <sys/param.h> 68 #include <sys/kernel.h> 69 #include <sys/cpu.h> 70 #include <sys/kmem.h> 71 #include <sys/kmem.h> 72 #include <sys/systm.h> 73 #include <sys/syslog.h> 74 #include <sys/queue.h> 75 #include <sys/atomic.h> 76 #include <sys/intr.h> 77 #include <sys/xcall.h> 78 79 static const char * 80 intr_typename(int type) 81 { 82 83 switch (type) { 84 case IST_UNUSABLE: 85 return ("disabled"); 86 case IST_NONE: 87 return ("none"); 88 case IST_PULSE: 89 return ("pulsed"); 90 case IST_EDGE: 91 return ("edge-triggered"); 92 case IST_LEVEL: 93 return ("level-triggered"); 94 } 95 panic("intr_typename: unknown type %d", type); 96 } 97 98 struct alpha_shared_intr * 99 alpha_shared_intr_alloc(unsigned int n) 100 { 101 struct alpha_shared_intr *intr; 102 unsigned int i; 103 104 KASSERT(n != 0); 105 106 intr = kmem_alloc(n * sizeof(*intr), KM_SLEEP); 107 for (i = 0; i < n; i++) { 108 TAILQ_INIT(&intr[i].intr_q); 109 intr[i].intr_sharetype = IST_NONE; 110 intr[i].intr_dfltsharetype = IST_NONE; 111 intr[i].intr_nstrays = 0; 112 intr[i].intr_maxstrays = 0; 113 intr[i].intr_private = NULL; 114 intr[i].intr_cpu = NULL; 115 intr[i].intr_string = kmem_asprintf("irq %u", i); 116 } 117 118 return (intr); 119 } 120 121 int 122 alpha_shared_intr_dispatch(struct alpha_shared_intr *intr, unsigned int num) 123 { 124 struct alpha_shared_intrhand *ih; 125 int rv = 0; 126 127 atomic_add_long(&intr[num].intr_evcnt.ev_count, 1); 128 129 TAILQ_FOREACH(ih, &intr[num].intr_q, ih_q) { 130 /* 131 * The handler returns one of three values: 132 * 0: This interrupt wasn't for me. 133 * 1: This interrupt was for me. 134 * -1: This interrupt might have been for me, but I can't say 135 * for sure. 136 */ 137 rv |= (*ih->ih_fn)(ih->ih_arg); 138 } 139 140 return (rv ? 1 : 0); 141 } 142 143 static int 144 alpha_shared_intr_wrapper(void * const arg) 145 { 146 struct alpha_shared_intrhand * const ih = arg; 147 int rv; 148 149 KERNEL_LOCK(1, NULL); 150 rv = (*ih->ih_real_fn)(ih->ih_real_arg); 151 KERNEL_UNLOCK_ONE(NULL); 152 153 return rv; 154 } 155 156 struct alpha_shared_intrhand * 157 alpha_shared_intr_alloc_intrhand(struct alpha_shared_intr *intr, 158 unsigned int num, int type, int level, int flags, 159 int (*fn)(void *), void *arg, const char *basename) 160 { 161 struct alpha_shared_intrhand *ih; 162 163 if (intr[num].intr_sharetype == IST_UNUSABLE) { 164 printf("%s: %s irq %d: unusable\n", __func__, 165 basename, num); 166 return NULL; 167 } 168 169 KASSERT(type != IST_NONE); 170 171 ih = kmem_alloc(sizeof(*ih), KM_SLEEP); 172 173 ih->ih_intrhead = intr; 174 ih->ih_fn = ih->ih_real_fn = fn; 175 ih->ih_arg = ih->ih_real_arg = arg; 176 ih->ih_level = level; 177 ih->ih_type = type; 178 ih->ih_num = num; 179 180 /* 181 * Non-MPSAFE interrupts get a wrapper that takes the 182 * KERNEL_LOCK. 183 */ 184 if ((flags & ALPHA_INTR_MPSAFE) == 0) { 185 ih->ih_fn = alpha_shared_intr_wrapper; 186 ih->ih_arg = ih; 187 } 188 189 return (ih); 190 } 191 192 void 193 alpha_shared_intr_free_intrhand(struct alpha_shared_intrhand *ih) 194 { 195 196 kmem_free(ih, sizeof(*ih)); 197 } 198 199 static void 200 alpha_shared_intr_link_unlink_xcall(void *arg1, void *arg2) 201 { 202 struct alpha_shared_intrhand *ih = arg1; 203 struct alpha_shared_intr *intr = ih->ih_intrhead; 204 unsigned int num = ih->ih_num; 205 206 struct cpu_info *ci = intr[num].intr_cpu; 207 208 KASSERT(ci != NULL); 209 KASSERT(ci == curcpu() || !mp_online); 210 KASSERT(!cpu_intr_p()); 211 212 const unsigned long psl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); 213 214 if (arg2 != NULL) { 215 TAILQ_INSERT_TAIL(&intr[num].intr_q, ih, ih_q); 216 ci->ci_nintrhand++; 217 } else { 218 TAILQ_REMOVE(&intr[num].intr_q, ih, ih_q); 219 ci->ci_nintrhand--; 220 } 221 222 alpha_pal_swpipl(psl); 223 } 224 225 bool 226 alpha_shared_intr_link(struct alpha_shared_intr *intr, 227 struct alpha_shared_intrhand *ih, const char *basename) 228 { 229 int type = ih->ih_type; 230 unsigned int num = ih->ih_num; 231 232 KASSERT(mutex_owned(&cpu_lock)); 233 KASSERT(ih->ih_intrhead == intr); 234 235 switch (intr[num].intr_sharetype) { 236 case IST_EDGE: 237 case IST_LEVEL: 238 if (type == intr[num].intr_sharetype) 239 break; 240 case IST_PULSE: 241 if (type != IST_NONE) { 242 if (TAILQ_FIRST(&intr[num].intr_q) == NULL) { 243 printf("alpha_shared_intr_establish: %s irq %d: warning: using %s on %s\n", 244 basename, num, intr_typename(type), 245 intr_typename(intr[num].intr_sharetype)); 246 type = intr[num].intr_sharetype; 247 } else { 248 printf("alpha_shared_intr_establish: %s irq %d: can't share %s with %s\n", 249 basename, num, intr_typename(type), 250 intr_typename(intr[num].intr_sharetype)); 251 return (false); 252 } 253 } 254 break; 255 256 case IST_NONE: 257 /* not currently used; safe */ 258 break; 259 } 260 261 intr[num].intr_sharetype = type; 262 263 /* 264 * If a CPU hasn't been assigned yet, just give it to the 265 * primary. 266 */ 267 if (intr[num].intr_cpu == NULL) { 268 intr[num].intr_cpu = &cpu_info_primary; 269 } 270 271 kpreempt_disable(); 272 if (intr[num].intr_cpu == curcpu() || !mp_online) { 273 alpha_shared_intr_link_unlink_xcall(ih, ih); 274 } else { 275 uint64_t where = xc_unicast(XC_HIGHPRI, 276 alpha_shared_intr_link_unlink_xcall, ih, ih, 277 intr->intr_cpu); 278 xc_wait(where); 279 } 280 kpreempt_enable(); 281 282 return (true); 283 } 284 285 void 286 alpha_shared_intr_unlink(struct alpha_shared_intr *intr, 287 struct alpha_shared_intrhand *ih, const char *basename) 288 { 289 unsigned int num = ih->ih_num; 290 291 KASSERT(mutex_owned(&cpu_lock)); 292 293 kpreempt_disable(); 294 if (intr[num].intr_cpu == curcpu() || !mp_online) { 295 alpha_shared_intr_link_unlink_xcall(ih, NULL); 296 } else { 297 uint64_t where = xc_unicast(XC_HIGHPRI, 298 alpha_shared_intr_link_unlink_xcall, ih, NULL, 299 intr->intr_cpu); 300 xc_wait(where); 301 } 302 kpreempt_enable(); 303 } 304 305 int 306 alpha_shared_intr_get_sharetype(struct alpha_shared_intr *intr, 307 unsigned int num) 308 { 309 310 return (intr[num].intr_sharetype); 311 } 312 313 int 314 alpha_shared_intr_isactive(struct alpha_shared_intr *intr, unsigned int num) 315 { 316 317 return TAILQ_FIRST(&intr[num].intr_q) != NULL; 318 } 319 320 int 321 alpha_shared_intr_firstactive(struct alpha_shared_intr *intr, unsigned int num) 322 { 323 struct alpha_shared_intrhand *ih; 324 325 return (ih = TAILQ_FIRST(&intr[num].intr_q)) != NULL && 326 TAILQ_NEXT(ih, ih_q) == NULL; 327 } 328 329 void 330 alpha_shared_intr_set_dfltsharetype(struct alpha_shared_intr *intr, 331 unsigned int num, int newdfltsharetype) 332 { 333 334 #ifdef DIAGNOSTIC 335 if (alpha_shared_intr_isactive(intr, num)) 336 panic("alpha_shared_intr_set_dfltsharetype on active intr"); 337 #endif 338 339 intr[num].intr_dfltsharetype = newdfltsharetype; 340 intr[num].intr_sharetype = intr[num].intr_dfltsharetype; 341 } 342 343 void 344 alpha_shared_intr_set_maxstrays(struct alpha_shared_intr *intr, 345 unsigned int num, int newmaxstrays) 346 { 347 int s = splhigh(); 348 intr[num].intr_maxstrays = newmaxstrays; 349 intr[num].intr_nstrays = 0; 350 splx(s); 351 } 352 353 void 354 alpha_shared_intr_reset_strays(struct alpha_shared_intr *intr, 355 unsigned int num) 356 { 357 358 /* 359 * Don't bother blocking interrupts; this doesn't have to be 360 * precise, but it does need to be fast. 361 */ 362 intr[num].intr_nstrays = 0; 363 } 364 365 void 366 alpha_shared_intr_stray(struct alpha_shared_intr *intr, unsigned int num, 367 const char *basename) 368 { 369 370 intr[num].intr_nstrays++; 371 372 if (intr[num].intr_maxstrays == 0) 373 return; 374 375 if (intr[num].intr_nstrays <= intr[num].intr_maxstrays) 376 log(LOG_ERR, "stray %s irq %d%s\n", basename, num, 377 intr[num].intr_nstrays >= intr[num].intr_maxstrays ? 378 "; stopped logging" : ""); 379 } 380 381 void 382 alpha_shared_intr_set_private(struct alpha_shared_intr *intr, 383 unsigned int num, void *v) 384 { 385 386 intr[num].intr_private = v; 387 } 388 389 void * 390 alpha_shared_intr_get_private(struct alpha_shared_intr *intr, 391 unsigned int num) 392 { 393 394 return (intr[num].intr_private); 395 } 396 397 static unsigned int 398 alpha_shared_intr_q_count_handlers(struct alpha_shared_intr *intr_q) 399 { 400 unsigned int cnt = 0; 401 struct alpha_shared_intrhand *ih; 402 403 TAILQ_FOREACH(ih, &intr_q->intr_q, ih_q) { 404 cnt++; 405 } 406 407 return cnt; 408 } 409 410 static void 411 alpha_shared_intr_set_cpu_xcall(void *arg1, void *arg2) 412 { 413 struct alpha_shared_intr *intr_q = arg1; 414 struct cpu_info *ci = arg2; 415 unsigned int cnt = alpha_shared_intr_q_count_handlers(intr_q); 416 417 KASSERT(ci == curcpu() || !mp_online); 418 419 ci->ci_nintrhand += cnt; 420 KASSERT(cnt <= ci->ci_nintrhand); 421 } 422 423 static void 424 alpha_shared_intr_unset_cpu_xcall(void *arg1, void *arg2) 425 { 426 struct alpha_shared_intr *intr_q = arg1; 427 struct cpu_info *ci = arg2; 428 unsigned int cnt = alpha_shared_intr_q_count_handlers(intr_q); 429 430 KASSERT(ci == curcpu() || !mp_online); 431 432 KASSERT(cnt <= ci->ci_nintrhand); 433 ci->ci_nintrhand -= cnt; 434 } 435 436 void 437 alpha_shared_intr_set_cpu(struct alpha_shared_intr *intr, unsigned int num, 438 struct cpu_info *ci) 439 { 440 struct cpu_info *old_ci; 441 442 KASSERT(mutex_owned(&cpu_lock)); 443 444 old_ci = intr[num].intr_cpu; 445 intr[num].intr_cpu = ci; 446 447 if (old_ci != NULL && old_ci != ci) { 448 kpreempt_disable(); 449 450 if (ci == curcpu() || !mp_online) { 451 alpha_shared_intr_set_cpu_xcall(&intr[num], ci); 452 } else { 453 uint64_t where = xc_unicast(XC_HIGHPRI, 454 alpha_shared_intr_set_cpu_xcall, &intr[num], 455 ci, ci); 456 xc_wait(where); 457 } 458 459 if (old_ci == curcpu() || !mp_online) { 460 alpha_shared_intr_unset_cpu_xcall(&intr[num], old_ci); 461 } else { 462 uint64_t where = xc_unicast(XC_HIGHPRI, 463 alpha_shared_intr_unset_cpu_xcall, &intr[num], 464 old_ci, old_ci); 465 xc_wait(where); 466 } 467 468 kpreempt_enable(); 469 } 470 } 471 472 struct cpu_info * 473 alpha_shared_intr_get_cpu(struct alpha_shared_intr *intr, unsigned int num) 474 { 475 476 return (intr[num].intr_cpu); 477 } 478 479 struct evcnt * 480 alpha_shared_intr_evcnt(struct alpha_shared_intr *intr, 481 unsigned int num) 482 { 483 484 return (&intr[num].intr_evcnt); 485 } 486 487 void 488 alpha_shared_intr_set_string(struct alpha_shared_intr *intr, 489 unsigned int num, char *str) 490 { 491 char *ostr = intr[num].intr_string; 492 intr[num].intr_string = str; 493 kmem_strfree(ostr); 494 } 495 496 const char * 497 alpha_shared_intr_string(struct alpha_shared_intr *intr, 498 unsigned int num) 499 { 500 501 return (intr[num].intr_string); 502 } 503