10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 54652Scwb * Common Development and Distribution License (the "License"). 64652Scwb * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 210Sstevel@tonic-gate /* 224652Scwb * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 230Sstevel@tonic-gate * Use is subject to license terms. 240Sstevel@tonic-gate */ 250Sstevel@tonic-gate 260Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 270Sstevel@tonic-gate 280Sstevel@tonic-gate #include <sys/types.h> 290Sstevel@tonic-gate #include <sys/param.h> 300Sstevel@tonic-gate #include <sys/t_lock.h> 310Sstevel@tonic-gate #include <sys/systm.h> 320Sstevel@tonic-gate #include <sys/spl.h> 330Sstevel@tonic-gate #include <sys/cmn_err.h> 340Sstevel@tonic-gate #include <sys/debug.h> 350Sstevel@tonic-gate #include <sys/kdi_impl.h> 365076Smishra #include <sys/cpuvar.h> 375076Smishra #include <sys/cpuvar.h> 385076Smishra #include <sys/archsystm.h> 390Sstevel@tonic-gate 400Sstevel@tonic-gate /* 410Sstevel@tonic-gate * Handle software interrupts through 'softcall' mechanism 42522Ssudheer * 43522Ssudheer * At present softcall mechanism uses a global list headed by softhead. 44522Ssudheer * Entries are added to tail and removed from head so as to preserve FIFO 45522Ssudheer * nature of entries in the softcall list. softcall() takes care of adding 46522Ssudheer * entries to the softtail. 47522Ssudheer * 48522Ssudheer * softint must take care of executing the entries in the FIFO 49522Ssudheer * order. It could be called simultaneously from multiple cpus, however only 505076Smishra * one instance of softint should process the softcall list with the exception 515076Smishra * when CPU is stuck due to high interrupt load and can't execute callbacks. 525076Smishra * State diagram is as follows :- 535076Smishra * 545076Smishra * - Upper half which is same as old state machine 555076Smishra * (IDLE->PEND->DRAIN->IDLE) 565076Smishra * 575076Smishra * - Lower half which steals the entries from softcall queue and execute 585076Smishra * in the context of softint interrupt handler. The interrupt handler 595076Smishra * is fired on a different CPU by sending a cross-call. 605076Smishra * 615076Smishra * Starting state is IDLE. 625076Smishra * 635076Smishra * softint() 645076Smishra * 65522Ssudheer * 665076Smishra * (c) 675076Smishra * ____________________________________________________ 685076Smishra * | ^ ^ 695076Smishra * v (a) | (b) | 705076Smishra * IDLE--------------------->PEND--------------------->DRAIN 715076Smishra * ^ | | 725076Smishra * | | | 735076Smishra * | | | 745076Smishra * | | | 755076Smishra * | | | 765076Smishra * | d d 775076Smishra * | | | 785076Smishra * | v v 795076Smishra * | PEND DRAIN 805076Smishra * | (e) & & 815076Smishra * |<-----------------------STEAL STEAL 825076Smishra * ^ | 835076Smishra * | | 845076Smishra * | (e) v 855076Smishra * |_________________________<__________________________| 865076Smishra * 875076Smishra * 885076Smishra * 895076Smishra * Edge (a)->(b)->(c) are same as old state machine and these 905076Smishra * are mutually exclusive state. 915076Smishra * 925076Smishra * a - When an entry is being enqueued to softcall queue then the state 935076Smishra * moves from IDLE to PEND. 945076Smishra * 955076Smishra * b - When interrupt handler has started processing softcall queue. 965076Smishra * 975076Smishra * c - When interrupt handler finished processing softcall queue, the 985076Smishra * state of machines goes back to IDLE. 995076Smishra * 1005076Smishra * d - softcall() generates another softlevel1 iff interrupt handler 1015076Smishra * hasn't run recently. 1025076Smishra * 1035076Smishra * e - Either PEND|STEAL or DRAIN|STEAL is set. We let softlevel1 1045076Smishra * handler exit because we have processed all the entries. 1055076Smishra * 1065076Smishra * When CPU is being pinned by higher level interrupts for more than 1075076Smishra * softcall_delay clock ticks, SOFT_STEAL is OR'ed so that softlevel1 1085076Smishra * handler on the other CPU can drain the queue. 1095076Smishra * 1105076Smishra * These states are needed for softcall mechanism since Solaris has only 1115076Smishra * one interface (ie. siron ) as of now for : 1125076Smishra * 1135076Smishra * - raising a soft interrupt architecture independently (ie not through 114522Ssudheer * setsoftint(..) ) 115522Ssudheer * - to process the softcall queue. 1160Sstevel@tonic-gate */ 1170Sstevel@tonic-gate 1180Sstevel@tonic-gate #define NSOFTCALLS 200 1195076Smishra 120522Ssudheer /* 121522Ssudheer * Defined states for softcall processing. 122522Ssudheer */ 123522Ssudheer #define SOFT_IDLE 0x01 /* no processing is needed */ 124522Ssudheer #define SOFT_PEND 0x02 /* softcall list needs processing */ 1255076Smishra #define SOFT_DRAIN 0x04 /* list is being processed */ 1265076Smishra #define SOFT_STEAL 0x08 /* list is being stolen for draining */ 1270Sstevel@tonic-gate 1280Sstevel@tonic-gate typedef struct softcall { 1290Sstevel@tonic-gate void (*sc_func)(void *); /* function to call */ 1300Sstevel@tonic-gate void *sc_arg; /* arg to pass to func */ 1310Sstevel@tonic-gate struct softcall *sc_next; /* next in list */ 1320Sstevel@tonic-gate } softcall_t; 1330Sstevel@tonic-gate 1345076Smishra /* 1355076Smishra * softcall list and state variables. 1365076Smishra */ 1375076Smishra static softcall_t *softcalls; 1385076Smishra static softcall_t *softhead, *softtail, *softfree; 139522Ssudheer static uint_t softcall_state; 1405076Smishra static clock_t softcall_tick; 1415076Smishra 1425076Smishra /* 1435076Smishra * This ensures that softcall entries don't get stuck for long. It's expressed 1445076Smishra * in 10 milliseconds as 1 unit. When hires_tick is set or other clock frequency 1455076Smishra * is used, softcall_init() ensures that it's still expressed as 1 = 10 milli 1465076Smishra * seconds. 1475076Smishra */ 1485076Smishra static int softcall_delay = 1; 1495076Smishra 1505076Smishra /* 1515076Smishra * The last CPU which will drain softcall queue. 1525076Smishra */ 1535076Smishra static int softcall_latest_cpuid = -1; 1545076Smishra 1555076Smishra /* 1565076Smishra * CPUSET to hold the CPU which is processing softcall queue 1575076Smishra * currently. There can be more than one CPU having bit set 1585076Smishra * but it will happen only when they are stuck. 1595076Smishra */ 1605076Smishra static cpuset_t *softcall_cpuset = NULL; 1610Sstevel@tonic-gate 162522Ssudheer /* 163522Ssudheer * protects softcall lists and control variable softcall_state. 164522Ssudheer */ 165522Ssudheer static kmutex_t softcall_lock; 1660Sstevel@tonic-gate 1670Sstevel@tonic-gate static void (*kdi_softcall_func)(void); 1685076Smishra extern void siron_poke_cpu(cpuset_t); 1690Sstevel@tonic-gate 1700Sstevel@tonic-gate extern void siron(void); 171*5176Smishra extern void kdi_siron(void); 1720Sstevel@tonic-gate 1730Sstevel@tonic-gate void 1740Sstevel@tonic-gate softcall_init(void) 1750Sstevel@tonic-gate { 1760Sstevel@tonic-gate softcall_t *sc; 1770Sstevel@tonic-gate 1785076Smishra softcalls = kmem_zalloc(sizeof (softcall_t) * NSOFTCALLS, KM_SLEEP); 1795076Smishra softcall_cpuset = kmem_zalloc(sizeof (cpuset_t), KM_SLEEP); 1800Sstevel@tonic-gate for (sc = softcalls; sc < &softcalls[NSOFTCALLS]; sc++) { 1810Sstevel@tonic-gate sc->sc_next = softfree; 1820Sstevel@tonic-gate softfree = sc; 1830Sstevel@tonic-gate } 1845076Smishra mutex_init(&softcall_lock, NULL, MUTEX_SPIN, 1855076Smishra (void *)ipltospl(SPL8)); 1865076Smishra softcall_state = SOFT_IDLE; 1875076Smishra softcall_tick = lbolt; 1885076Smishra 1895076Smishra if (softcall_delay < 0) 1905076Smishra softcall_delay = 1; 1915076Smishra 1925076Smishra /* 1935076Smishra * Since softcall_delay is expressed as 1 = 10 milliseconds. 1945076Smishra */ 1955076Smishra softcall_delay = softcall_delay * (hz/100); 1965076Smishra CPUSET_ZERO(*softcall_cpuset); 1975076Smishra } 1985076Smishra 1995076Smishra /* 2005076Smishra * Gets called when softcall queue is not moving forward. We choose 2015076Smishra * a CPU and poke except the ones which are already poked. 2025076Smishra */ 2035076Smishra static int 2045076Smishra softcall_choose_cpu() 2055076Smishra { 2065076Smishra cpu_t *cplist = CPU; 2075076Smishra cpu_t *cp; 2085076Smishra int intr_load = INT_MAX; 2095076Smishra int cpuid = -1; 2105076Smishra cpuset_t poke; 2115076Smishra int s; 2125076Smishra 2135076Smishra ASSERT(getpil() >= DISP_LEVEL); 2145076Smishra ASSERT(ncpus > 1); 2155076Smishra ASSERT(MUTEX_HELD(&softcall_lock)); 2165076Smishra 2175076Smishra CPUSET_ZERO(poke); 2185076Smishra 2195076Smishra /* 2205076Smishra * The hint is to start from current CPU. 2215076Smishra */ 2225076Smishra cp = cplist; 2235076Smishra do { 224*5176Smishra /* 225*5176Smishra * Don't select this CPU if : 226*5176Smishra * - in cpuset already 227*5176Smishra * - CPU is not accepting interrupts 228*5176Smishra * - CPU is being offlined 229*5176Smishra */ 2305076Smishra if (CPU_IN_SET(*softcall_cpuset, cp->cpu_id) || 231*5176Smishra (cp->cpu_flags & CPU_ENABLE) == 0 || 232*5176Smishra (cp == cpu_inmotion)) 2335076Smishra continue; 2345076Smishra 2355076Smishra /* if CPU is not busy */ 2365076Smishra if (cp->cpu_intrload == 0) { 2375076Smishra cpuid = cp->cpu_id; 2385076Smishra break; 2395076Smishra } 2405076Smishra 2415076Smishra if (cp->cpu_intrload < intr_load) { 2425076Smishra cpuid = cp->cpu_id; 2435076Smishra intr_load = cp->cpu_intrload; 2445076Smishra } else if (cp->cpu_intrload == intr_load) { 2455076Smishra /* 2465076Smishra * We want to poke CPUs having similar 2475076Smishra * load because we don't know which CPU is 2485076Smishra * can acknowledge level1 interrupt. The 2495076Smishra * list of such CPUs should not be large. 2505076Smishra */ 2515076Smishra if (cpuid != -1) { 2525076Smishra /* 2535076Smishra * Put the last CPU chosen because 2545076Smishra * it also has same interrupt load. 2555076Smishra */ 2565076Smishra CPUSET_ADD(poke, cpuid); 2575076Smishra cpuid = -1; 2585076Smishra } 2595076Smishra 2605076Smishra CPUSET_ADD(poke, cp->cpu_id); 2615076Smishra } 2625076Smishra } while ((cp = cp->cpu_next_onln) != cplist); 2635076Smishra 2645076Smishra /* if we found a CPU which suits best to poke */ 2655076Smishra if (cpuid != -1) { 2665076Smishra CPUSET_ZERO(poke); 2675076Smishra CPUSET_ADD(poke, cpuid); 2685076Smishra } 2695076Smishra 2705076Smishra if (CPUSET_ISNULL(poke)) { 2715076Smishra mutex_exit(&softcall_lock); 2725076Smishra return (0); 2735076Smishra } 2745076Smishra 2755076Smishra /* 2765076Smishra * We first set the bit in cpuset and then poke. 2775076Smishra */ 2785076Smishra CPUSET_XOR(*softcall_cpuset, poke); 2795076Smishra mutex_exit(&softcall_lock); 2805076Smishra 2815076Smishra /* 2825076Smishra * If softcall() was called at low pil then we may 2835076Smishra * get preempted before we raise PIL. It should be okay 2845076Smishra * because we are just going to poke CPUs now or at most 2855076Smishra * another thread may start choosing CPUs in this routine. 2865076Smishra */ 2875076Smishra s = splhigh(); 2885076Smishra siron_poke_cpu(poke); 2895076Smishra splx(s); 2905076Smishra return (1); 2910Sstevel@tonic-gate } 2920Sstevel@tonic-gate 2930Sstevel@tonic-gate /* 2940Sstevel@tonic-gate * Call function func with argument arg 2950Sstevel@tonic-gate * at some later time at software interrupt priority 2960Sstevel@tonic-gate */ 2970Sstevel@tonic-gate void 2980Sstevel@tonic-gate softcall(void (*func)(void *), void *arg) 2990Sstevel@tonic-gate { 3000Sstevel@tonic-gate softcall_t *sc; 3015076Smishra clock_t w; 3020Sstevel@tonic-gate 3030Sstevel@tonic-gate /* 3040Sstevel@tonic-gate * protect against cross-calls 3050Sstevel@tonic-gate */ 3060Sstevel@tonic-gate mutex_enter(&softcall_lock); 3070Sstevel@tonic-gate /* coalesce identical softcalls */ 3080Sstevel@tonic-gate for (sc = softhead; sc != 0; sc = sc->sc_next) { 3090Sstevel@tonic-gate if (sc->sc_func == func && sc->sc_arg == arg) { 3105076Smishra goto intr; 3110Sstevel@tonic-gate } 3120Sstevel@tonic-gate } 3130Sstevel@tonic-gate 3140Sstevel@tonic-gate if ((sc = softfree) == 0) 3150Sstevel@tonic-gate panic("too many softcalls"); 3165076Smishra 3170Sstevel@tonic-gate softfree = sc->sc_next; 3180Sstevel@tonic-gate sc->sc_func = func; 3190Sstevel@tonic-gate sc->sc_arg = arg; 3200Sstevel@tonic-gate sc->sc_next = 0; 3210Sstevel@tonic-gate 3220Sstevel@tonic-gate if (softhead) { 3230Sstevel@tonic-gate softtail->sc_next = sc; 3240Sstevel@tonic-gate softtail = sc; 3255076Smishra } else 3265076Smishra softhead = softtail = sc; 3275076Smishra 3285076Smishra intr: 3295076Smishra if (softcall_state & SOFT_IDLE) { 3305076Smishra softcall_state = SOFT_PEND; 3315076Smishra softcall_tick = lbolt; 3320Sstevel@tonic-gate mutex_exit(&softcall_lock); 3335076Smishra siron(); 3345076Smishra } else if (softcall_state & (SOFT_DRAIN|SOFT_PEND)) { 3355076Smishra w = lbolt - softcall_tick; 3365076Smishra if (w <= softcall_delay || ncpus == 1) { 3375076Smishra mutex_exit(&softcall_lock); 3385076Smishra return; 3395076Smishra } 3405076Smishra 3415076Smishra if (!(softcall_state & SOFT_STEAL)) { 3425076Smishra softcall_state |= SOFT_STEAL; 3435076Smishra 344522Ssudheer /* 3455076Smishra * We want to give some more chance before 3465076Smishra * fishing around again. 347522Ssudheer */ 3485076Smishra softcall_tick = lbolt; 349522Ssudheer } 3505076Smishra 3515076Smishra /* softcall_lock will be released by this routine */ 3525076Smishra (void) softcall_choose_cpu(); 3530Sstevel@tonic-gate } 3540Sstevel@tonic-gate } 3550Sstevel@tonic-gate 3560Sstevel@tonic-gate void 3570Sstevel@tonic-gate kdi_softcall(void (*func)(void)) 3580Sstevel@tonic-gate { 3590Sstevel@tonic-gate kdi_softcall_func = func; 3600Sstevel@tonic-gate 3610Sstevel@tonic-gate if (softhead == NULL) 362*5176Smishra kdi_siron(); 3630Sstevel@tonic-gate } 3640Sstevel@tonic-gate 3650Sstevel@tonic-gate /* 366522Ssudheer * Called to process software interrupts take one off queue, call it, 367522Ssudheer * repeat. 368522Ssudheer * 3695076Smishra * Note queue may change during call; softcall_lock, state variables 3705076Smishra * softcall_state and softcall_latest_cpuid ensures that - 3715076Smishra * - we don't have multiple cpus pulling from the list (thus causing 3725076Smishra * a violation of FIFO order with an exception when we are stuck). 3735076Smishra * - we don't miss a new entry having been added to the head. 3745076Smishra * - we don't miss a wakeup. 3750Sstevel@tonic-gate */ 376522Ssudheer 3770Sstevel@tonic-gate void 3780Sstevel@tonic-gate softint(void) 3790Sstevel@tonic-gate { 3805076Smishra softcall_t *sc = NULL; 3810Sstevel@tonic-gate void (*func)(); 3820Sstevel@tonic-gate caddr_t arg; 3835076Smishra int cpu_id = CPU->cpu_id; 3840Sstevel@tonic-gate 385*5176Smishra /* 386*5176Smishra * Don't process softcall queue if current CPU is quiesced or 387*5176Smishra * offlined. This can happen when a CPU is running pause 388*5176Smishra * thread but softcall already sent a xcall. 389*5176Smishra */ 390*5176Smishra if (CPU->cpu_flags & (CPU_QUIESCED|CPU_OFFLINE)) { 391*5176Smishra if (softcall_cpuset != NULL && 392*5176Smishra CPU_IN_SET(*softcall_cpuset, cpu_id)) { 393*5176Smishra CPUSET_DEL(*softcall_cpuset, cpu_id); 394*5176Smishra goto out; 395*5176Smishra } 396*5176Smishra } 397*5176Smishra 398522Ssudheer mutex_enter(&softcall_lock); 3995076Smishra 4005076Smishra if (softcall_state & (SOFT_STEAL|SOFT_PEND)) { 4015076Smishra softcall_state = SOFT_DRAIN; 4025076Smishra } else { 4035076Smishra /* 4045076Smishra * The check for softcall_cpuset being 4055076Smishra * NULL is required because it may get 4065076Smishra * called very early during boot. 4075076Smishra */ 4085076Smishra if (softcall_cpuset != NULL && 4095076Smishra CPU_IN_SET(*softcall_cpuset, cpu_id)) 4105076Smishra CPUSET_DEL(*softcall_cpuset, cpu_id); 411522Ssudheer mutex_exit(&softcall_lock); 412522Ssudheer goto out; 413522Ssudheer } 4145076Smishra 4155076Smishra /* 4165076Smishra * Setting softcall_latest_cpuid to current CPU ensures 4175076Smishra * that there is only one active softlevel1 handler to 4185076Smishra * process softcall queues. 4195076Smishra * 4205076Smishra * Since softcall_lock lock is dropped before calling 4215076Smishra * func (callback), we need softcall_latest_cpuid 4225076Smishra * to prevent two softlevel1 hanlders working on the 4235076Smishra * queue when the first softlevel1 handler gets 4245076Smishra * stuck due to high interrupt load. 4255076Smishra */ 4265076Smishra softcall_latest_cpuid = cpu_id; 4275076Smishra 4285076Smishra /* add ourself to the cpuset */ 4295076Smishra if (!CPU_IN_SET(*softcall_cpuset, cpu_id)) 4305076Smishra CPUSET_ADD(*softcall_cpuset, cpu_id); 431522Ssudheer 4320Sstevel@tonic-gate for (;;) { 4335076Smishra softcall_tick = lbolt; 4340Sstevel@tonic-gate if ((sc = softhead) != NULL) { 4350Sstevel@tonic-gate func = sc->sc_func; 4360Sstevel@tonic-gate arg = sc->sc_arg; 4370Sstevel@tonic-gate softhead = sc->sc_next; 4380Sstevel@tonic-gate sc->sc_next = softfree; 4390Sstevel@tonic-gate softfree = sc; 4400Sstevel@tonic-gate } 4415076Smishra 442522Ssudheer if (sc == NULL) { 4435076Smishra if (CPU_IN_SET(*softcall_cpuset, cpu_id)) 4445076Smishra CPUSET_DEL(*softcall_cpuset, cpu_id); 4455076Smishra 446522Ssudheer softcall_state = SOFT_IDLE; 4475076Smishra ASSERT(softcall_latest_cpuid == cpu_id); 4485076Smishra softcall_latest_cpuid = -1; 4495076Smishra 450522Ssudheer mutex_exit(&softcall_lock); 4510Sstevel@tonic-gate break; 452522Ssudheer } 4535076Smishra 454522Ssudheer mutex_exit(&softcall_lock); 4550Sstevel@tonic-gate func(arg); 456522Ssudheer mutex_enter(&softcall_lock); 4575076Smishra 4585076Smishra /* 4595076Smishra * No longer need softcall processing from current 4605076Smishra * interrupt handler because either 4615076Smishra * (a) softcall is in SOFT_IDLE state or 4625076Smishra * (b) There is a CPU already draining softcall 4635076Smishra * queue and the current softlevel1 is no 4645076Smishra * longer required. 4655076Smishra */ 4665076Smishra if (softcall_latest_cpuid != cpu_id) { 4675076Smishra if (CPU_IN_SET(*softcall_cpuset, cpu_id)) 4685076Smishra CPUSET_DEL(*softcall_cpuset, cpu_id); 4695076Smishra 4705076Smishra mutex_exit(&softcall_lock); 4715076Smishra break; 4725076Smishra } 4730Sstevel@tonic-gate } 4745076Smishra 475522Ssudheer out: 4760Sstevel@tonic-gate if ((func = kdi_softcall_func) != NULL) { 4770Sstevel@tonic-gate kdi_softcall_func = NULL; 4780Sstevel@tonic-gate func(); 4790Sstevel@tonic-gate } 4800Sstevel@tonic-gate } 481