1 /* $NetBSD: shared_intr.c,v 1.27 2021/05/07 16:58:33 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1996 Carnegie-Mellon University. 34 * All rights reserved. 35 * 36 * Authors: Chris G. Demetriou 37 * 38 * Permission to use, copy, modify and distribute this software and 39 * its documentation is hereby granted, provided that both the copyright 40 * notice and this permission notice appear in all copies of the 41 * software, derivative works or modified versions, and any portions 42 * thereof, and that both notices appear in supporting documentation. 43 * 44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 46 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 47 * 48 * Carnegie Mellon requests users of this software to return to 49 * 50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 51 * School of Computer Science 52 * Carnegie Mellon University 53 * Pittsburgh PA 15213-3890 54 * 55 * any improvements or extensions that they make and grant Carnegie the 56 * rights to redistribute these changes. 57 */ 58 59 /* 60 * Common shared-interrupt-line functionality. 61 */ 62 63 #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ 64 65 __KERNEL_RCSID(0, "$NetBSD: shared_intr.c,v 1.27 2021/05/07 16:58:33 thorpej Exp $"); 66 67 #include <sys/param.h> 68 #include <sys/kernel.h> 69 #include <sys/cpu.h> 70 #include <sys/kmem.h> 71 #include <sys/kmem.h> 72 #include <sys/systm.h> 73 #include <sys/syslog.h> 74 #include <sys/queue.h> 75 #include <sys/atomic.h> 76 #include <sys/intr.h> 77 #include <sys/xcall.h> 78 79 static const char * 80 intr_typename(int type) 81 { 82 83 switch (type) { 84 case IST_UNUSABLE: 85 return ("disabled"); 86 case IST_NONE: 87 return ("none"); 88 case IST_PULSE: 89 return ("pulsed"); 90 case IST_EDGE: 91 return ("edge-triggered"); 92 case IST_LEVEL: 93 return ("level-triggered"); 94 } 95 panic("intr_typename: unknown type %d", type); 96 } 97 98 struct alpha_shared_intr * 99 alpha_shared_intr_alloc(unsigned int n, unsigned int namesize) 100 { 101 struct alpha_shared_intr *intr; 102 unsigned int i; 103 104 intr = kmem_alloc(n * sizeof(*intr), KM_SLEEP); 105 for (i = 0; i < n; i++) { 106 TAILQ_INIT(&intr[i].intr_q); 107 intr[i].intr_sharetype = IST_NONE; 108 intr[i].intr_dfltsharetype = IST_NONE; 109 intr[i].intr_nstrays = 0; 110 intr[i].intr_maxstrays = 5; 111 intr[i].intr_private = NULL; 112 intr[i].intr_cpu = NULL; 113 if (namesize != 0) { 114 intr[i].intr_string = kmem_zalloc(namesize, KM_SLEEP); 115 } else { 116 intr[i].intr_string = NULL; 117 } 118 } 119 120 return (intr); 121 } 122 123 int 124 alpha_shared_intr_dispatch(struct alpha_shared_intr *intr, unsigned int num) 125 { 126 struct alpha_shared_intrhand *ih; 127 int rv, handled; 128 129 atomic_add_long(&intr[num].intr_evcnt.ev_count, 1); 130 131 ih = intr[num].intr_q.tqh_first; 132 handled = 0; 133 while (ih != NULL) { 134 135 /* 136 * The handler returns one of three values: 137 * 0: This interrupt wasn't for me. 138 * 1: This interrupt was for me. 139 * -1: This interrupt might have been for me, but I can't say 140 * for sure. 141 */ 142 143 rv = (*ih->ih_fn)(ih->ih_arg); 144 145 handled = handled || (rv != 0); 146 ih = ih->ih_q.tqe_next; 147 } 148 149 return (handled); 150 } 151 152 static int 153 alpha_shared_intr_wrapper(void * const arg) 154 { 155 struct alpha_shared_intrhand * const ih = arg; 156 int rv; 157 158 KERNEL_LOCK(1, NULL); 159 rv = (*ih->ih_real_fn)(ih->ih_real_arg); 160 KERNEL_UNLOCK_ONE(NULL); 161 162 return rv; 163 } 164 165 struct alpha_shared_intrhand * 166 alpha_shared_intr_alloc_intrhand(struct alpha_shared_intr *intr, 167 unsigned int num, int type, int level, int flags, 168 int (*fn)(void *), void *arg, const char *basename) 169 { 170 struct alpha_shared_intrhand *ih; 171 172 if (intr[num].intr_sharetype == IST_UNUSABLE) { 173 printf("%s: %s %d: unusable\n", __func__, 174 basename, num); 175 return NULL; 176 } 177 178 KASSERT(type != IST_NONE); 179 180 ih = kmem_alloc(sizeof(*ih), KM_SLEEP); 181 182 ih->ih_intrhead = intr; 183 ih->ih_fn = ih->ih_real_fn = fn; 184 ih->ih_arg = ih->ih_real_arg = arg; 185 ih->ih_level = level; 186 ih->ih_type = type; 187 ih->ih_num = num; 188 189 /* 190 * Non-MPSAFE interrupts get a wrapper that takes the 191 * KERNEL_LOCK. 192 */ 193 if ((flags & ALPHA_INTR_MPSAFE) == 0) { 194 ih->ih_fn = alpha_shared_intr_wrapper; 195 ih->ih_arg = ih; 196 } 197 198 return (ih); 199 } 200 201 void 202 alpha_shared_intr_free_intrhand(struct alpha_shared_intrhand *ih) 203 { 204 205 kmem_free(ih, sizeof(*ih)); 206 } 207 208 static void 209 alpha_shared_intr_link_unlink_xcall(void *arg1, void *arg2) 210 { 211 struct alpha_shared_intrhand *ih = arg1; 212 struct alpha_shared_intr *intr = ih->ih_intrhead; 213 unsigned int num = ih->ih_num; 214 215 struct cpu_info *ci = intr[num].intr_cpu; 216 217 KASSERT(ci != NULL); 218 KASSERT(ci == curcpu() || !mp_online); 219 KASSERT(!cpu_intr_p()); 220 221 const unsigned long psl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); 222 223 if (arg2 != NULL) { 224 TAILQ_INSERT_TAIL(&intr[num].intr_q, ih, ih_q); 225 ci->ci_nintrhand++; 226 } else { 227 TAILQ_REMOVE(&intr[num].intr_q, ih, ih_q); 228 ci->ci_nintrhand--; 229 } 230 231 alpha_pal_swpipl(psl); 232 } 233 234 bool 235 alpha_shared_intr_link(struct alpha_shared_intr *intr, 236 struct alpha_shared_intrhand *ih, const char *basename) 237 { 238 int type = ih->ih_type; 239 unsigned int num = ih->ih_num; 240 241 KASSERT(mutex_owned(&cpu_lock)); 242 KASSERT(ih->ih_intrhead == intr); 243 244 switch (intr[num].intr_sharetype) { 245 case IST_EDGE: 246 case IST_LEVEL: 247 if (type == intr[num].intr_sharetype) 248 break; 249 case IST_PULSE: 250 if (type != IST_NONE) { 251 if (intr[num].intr_q.tqh_first == NULL) { 252 printf("alpha_shared_intr_establish: %s %d: warning: using %s on %s\n", 253 basename, num, intr_typename(type), 254 intr_typename(intr[num].intr_sharetype)); 255 type = intr[num].intr_sharetype; 256 } else { 257 printf("alpha_shared_intr_establish: %s %d: can't share %s with %s\n", 258 basename, num, intr_typename(type), 259 intr_typename(intr[num].intr_sharetype)); 260 return (false); 261 } 262 } 263 break; 264 265 case IST_NONE: 266 /* not currently used; safe */ 267 break; 268 } 269 270 intr[num].intr_sharetype = type; 271 272 /* 273 * If a CPU hasn't been assigned yet, just give it to the 274 * primary. 275 */ 276 if (intr[num].intr_cpu == NULL) { 277 intr[num].intr_cpu = &cpu_info_primary; 278 } 279 280 kpreempt_disable(); 281 if (intr[num].intr_cpu == curcpu() || !mp_online) { 282 alpha_shared_intr_link_unlink_xcall(ih, ih); 283 } else { 284 uint64_t where = xc_unicast(XC_HIGHPRI, 285 alpha_shared_intr_link_unlink_xcall, ih, ih, 286 intr->intr_cpu); 287 xc_wait(where); 288 } 289 kpreempt_enable(); 290 291 return (true); 292 } 293 294 void 295 alpha_shared_intr_unlink(struct alpha_shared_intr *intr, 296 struct alpha_shared_intrhand *ih, const char *basename) 297 { 298 unsigned int num = ih->ih_num; 299 300 KASSERT(mutex_owned(&cpu_lock)); 301 302 kpreempt_disable(); 303 if (intr[num].intr_cpu == curcpu() || !mp_online) { 304 alpha_shared_intr_link_unlink_xcall(ih, NULL); 305 } else { 306 uint64_t where = xc_unicast(XC_HIGHPRI, 307 alpha_shared_intr_link_unlink_xcall, ih, NULL, 308 intr->intr_cpu); 309 xc_wait(where); 310 } 311 kpreempt_enable(); 312 } 313 314 int 315 alpha_shared_intr_get_sharetype(struct alpha_shared_intr *intr, 316 unsigned int num) 317 { 318 319 return (intr[num].intr_sharetype); 320 } 321 322 int 323 alpha_shared_intr_isactive(struct alpha_shared_intr *intr, unsigned int num) 324 { 325 326 return (intr[num].intr_q.tqh_first != NULL); 327 } 328 329 int 330 alpha_shared_intr_firstactive(struct alpha_shared_intr *intr, unsigned int num) 331 { 332 333 return (intr[num].intr_q.tqh_first != NULL && 334 intr[num].intr_q.tqh_first->ih_q.tqe_next == NULL); 335 } 336 337 void 338 alpha_shared_intr_set_dfltsharetype(struct alpha_shared_intr *intr, 339 unsigned int num, int newdfltsharetype) 340 { 341 342 #ifdef DIAGNOSTIC 343 if (alpha_shared_intr_isactive(intr, num)) 344 panic("alpha_shared_intr_set_dfltsharetype on active intr"); 345 #endif 346 347 intr[num].intr_dfltsharetype = newdfltsharetype; 348 intr[num].intr_sharetype = intr[num].intr_dfltsharetype; 349 } 350 351 void 352 alpha_shared_intr_set_maxstrays(struct alpha_shared_intr *intr, 353 unsigned int num, int newmaxstrays) 354 { 355 int s = splhigh(); 356 intr[num].intr_maxstrays = newmaxstrays; 357 intr[num].intr_nstrays = 0; 358 splx(s); 359 } 360 361 void 362 alpha_shared_intr_reset_strays(struct alpha_shared_intr *intr, 363 unsigned int num) 364 { 365 366 /* 367 * Don't bother blocking interrupts; this doesn't have to be 368 * precise, but it does need to be fast. 369 */ 370 intr[num].intr_nstrays = 0; 371 } 372 373 void 374 alpha_shared_intr_stray(struct alpha_shared_intr *intr, unsigned int num, 375 const char *basename) 376 { 377 378 intr[num].intr_nstrays++; 379 380 if (intr[num].intr_maxstrays == 0) 381 return; 382 383 if (intr[num].intr_nstrays <= intr[num].intr_maxstrays) 384 log(LOG_ERR, "stray %s %d%s\n", basename, num, 385 intr[num].intr_nstrays >= intr[num].intr_maxstrays ? 386 "; stopped logging" : ""); 387 } 388 389 void 390 alpha_shared_intr_set_private(struct alpha_shared_intr *intr, 391 unsigned int num, void *v) 392 { 393 394 intr[num].intr_private = v; 395 } 396 397 void * 398 alpha_shared_intr_get_private(struct alpha_shared_intr *intr, 399 unsigned int num) 400 { 401 402 return (intr[num].intr_private); 403 } 404 405 static unsigned int 406 alpha_shared_intr_q_count_handlers(struct alpha_shared_intr *intr_q) 407 { 408 unsigned int cnt = 0; 409 struct alpha_shared_intrhand *ih; 410 411 TAILQ_FOREACH(ih, &intr_q->intr_q, ih_q) { 412 cnt++; 413 } 414 415 return cnt; 416 } 417 418 static void 419 alpha_shared_intr_set_cpu_xcall(void *arg1, void *arg2) 420 { 421 struct alpha_shared_intr *intr_q = arg1; 422 struct cpu_info *ci = arg2; 423 unsigned int cnt = alpha_shared_intr_q_count_handlers(intr_q); 424 425 KASSERT(ci == curcpu() || !mp_online); 426 427 ci->ci_nintrhand += cnt; 428 KASSERT(cnt <= ci->ci_nintrhand); 429 } 430 431 static void 432 alpha_shared_intr_unset_cpu_xcall(void *arg1, void *arg2) 433 { 434 struct alpha_shared_intr *intr_q = arg1; 435 struct cpu_info *ci = arg2; 436 unsigned int cnt = alpha_shared_intr_q_count_handlers(intr_q); 437 438 KASSERT(ci == curcpu() || !mp_online); 439 440 KASSERT(cnt <= ci->ci_nintrhand); 441 ci->ci_nintrhand -= cnt; 442 } 443 444 void 445 alpha_shared_intr_set_cpu(struct alpha_shared_intr *intr, unsigned int num, 446 struct cpu_info *ci) 447 { 448 struct cpu_info *old_ci; 449 450 KASSERT(mutex_owned(&cpu_lock)); 451 452 old_ci = intr[num].intr_cpu; 453 intr[num].intr_cpu = ci; 454 455 if (old_ci != NULL && old_ci != ci) { 456 kpreempt_disable(); 457 458 if (ci == curcpu() || !mp_online) { 459 alpha_shared_intr_set_cpu_xcall(&intr[num], ci); 460 } else { 461 uint64_t where = xc_unicast(XC_HIGHPRI, 462 alpha_shared_intr_set_cpu_xcall, &intr[num], 463 ci, ci); 464 xc_wait(where); 465 } 466 467 if (old_ci == curcpu() || !mp_online) { 468 alpha_shared_intr_unset_cpu_xcall(&intr[num], old_ci); 469 } else { 470 uint64_t where = xc_unicast(XC_HIGHPRI, 471 alpha_shared_intr_unset_cpu_xcall, &intr[num], 472 old_ci, old_ci); 473 xc_wait(where); 474 } 475 476 kpreempt_enable(); 477 } 478 } 479 480 struct cpu_info * 481 alpha_shared_intr_get_cpu(struct alpha_shared_intr *intr, unsigned int num) 482 { 483 484 return (intr[num].intr_cpu); 485 } 486 487 struct evcnt * 488 alpha_shared_intr_evcnt(struct alpha_shared_intr *intr, 489 unsigned int num) 490 { 491 492 return (&intr[num].intr_evcnt); 493 } 494 495 char * 496 alpha_shared_intr_string(struct alpha_shared_intr *intr, 497 unsigned int num) 498 { 499 500 return (intr[num].intr_string); 501 } 502